repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
stuartsierra/password-store | contrib/importers/kedpm2pass.py | 42 | 1568 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Antoine Beaupré <[email protected]>. All Rights Reserved.
# This file is licensed under the GPLv2+. Please see COPYING for more information.
#
# To double-check your import worked:
# grep Path passwords | sed 's#^Path: ##;s/$/.gpg/' | sort > listpaths
# (cd ~/.password-store/ ; find -type f ) | sort | diff -u - listpaths
import re
import fileinput
import sys # for exit
import subprocess
def insert(d):
path = d['Path']
del d['Path']
print "inserting " + path
content = d['Password'] + "\n"
del d['Password']
for k, v in d.iteritems():
content += "%s: %s\n" % (k, v)
del d
cmd = ["pass", "insert", "--force", "--multiline", path]
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate(content)
retcode = process.wait()
if retcode:
print 'command "%s" failed with exit code %d: %s' % (" ".join(cmd), retcode, stdout + stderr)
sys.exit(1);
d = None
for line in fileinput.input():
if line == "\n":
continue
match = re.match("(\w+): (.*)$", line)
if match:
if match.group(1) == 'Path':
if d is not None:
insert(d)
else:
d = {}
d[match.group(1)] = match.group(2)
#print "found field: %s => %s" % (match.group(1), match.group(2))
else:
print "warning: no match found on line: *%s*" % line
if d is not None:
insert(d)
| gpl-2.0 | 6,661,278,760,321,560,000 | 29.134615 | 106 | 0.577537 | false |
vyscond/cocos | cocos/batch.py | 3 | 4429 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2015 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Batch
Batches
=======
Batches allow you to optimize the number of gl calls using pyglets batch
"""
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
import pyglet
from pyglet.gl import *
from cocos.cocosnode import CocosNode
__all__ = ['BatchNode', 'BatchableNode']
def ensure_batcheable(node):
if not isinstance(node, BatchableNode):
raise Exception("Children node of a batch must be of class BatchableNode")
for c in node.get_children():
ensure_batcheable(c)
class BatchNode(CocosNode):
def __init__(self):
super(BatchNode, self).__init__()
self.batch = pyglet.graphics.Batch()
self.groups = {}
def add(self, child, z=0, name=None):
ensure_batcheable(child)
child.set_batch(self.batch, self.groups, z)
super(BatchNode, self).add(child, z, name)
def visit(self):
""" All children are placed in to self.batch, so nothing to visit """
if not self.visible:
return
glPushMatrix()
self.transform()
self.batch.draw()
glPopMatrix()
def remove(self, child):
if isinstance(child, str):
child_node = self.get(child)
else:
child_node = child
child_node.set_batch(None)
super(BatchNode, self).remove(child)
def draw(self):
pass # All drawing done in visit!
class BatchableNode(CocosNode):
def add(self, child, z=0, name=None):
batchnode = self.get_ancestor(BatchNode)
if not batchnode:
# this node was addded, but theres no batchnode in the
# hierarchy. so we proceed as normal
super(BatchableNode, self).add(child, z, name)
return
ensure_batcheable(child)
super(BatchableNode, self).add(child, z, name)
child.set_batch(self.batch, batchnode.groups, z)
def remove(self, child):
if isinstance(child, str):
child_node = self.get(child)
else:
child_node = child
child_node.set_batch(None)
super(BatchableNode, self).remove(child)
def set_batch(self, batch, groups=None, z=0):
self.batch = batch
if batch is None:
self.group = None
else:
group = groups.get(z)
if group is None:
group = pyglet.graphics.OrderedGroup(z)
groups[z] = group
self.group = group
for childZ, child in self.children:
child.set_batch(self.batch, groups, z + childZ)
| bsd-3-clause | 9,004,397,023,532,956,000 | 33.874016 | 82 | 0.641228 | false |
delhivery/django | tests/forms_tests/tests/test_forms.py | 46 | 148965 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import datetime
import json
import uuid
from django.core.exceptions import NON_FIELD_ERRORS
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import RegexValidator
from django.forms import (
BooleanField, CharField, CheckboxSelectMultiple, ChoiceField, DateField,
DateTimeField, EmailField, FileField, FloatField, Form, HiddenInput,
ImageField, IntegerField, MultipleChoiceField, MultipleHiddenInput,
MultiValueField, NullBooleanField, PasswordInput, RadioSelect, Select,
SplitDateTimeField, SplitHiddenDateTimeWidget, Textarea, TextInput,
TimeField, ValidationError, forms,
)
from django.forms.utils import ErrorList
from django.http import QueryDict
from django.template import Context, Template
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import format_html
from django.utils.safestring import SafeData, mark_safe
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class PersonNew(Form):
first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'}))
last_name = CharField()
birthday = DateField()
class FormsTestCase(SimpleTestCase):
# A Form is a collection of Fields. It knows how to validate a set of data and it
# knows how to render itself in a couple of default ways (e.g., an HTML table).
# You can pass it data in __init__(), as a dictionary.
def test_form(self):
# Pass a dictionary to a Form's __init__().
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertHTMLEqual(p.errors.as_ul(), '')
self.assertEqual(p.errors.as_text(), '')
self.assertEqual(p.cleaned_data["first_name"], 'John')
self.assertEqual(p.cleaned_data["last_name"], 'Lennon')
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" value="John" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" value="Lennon" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />')
nonexistenterror = "Key u?'nonexistentfield' not found in 'Person'"
with six.assertRaisesRegex(self, KeyError, nonexistenterror):
p['nonexistentfield']
self.fail('Attempts to access non-existent fields should fail.')
form_output = []
for boundfield in p:
form_output.append(str(boundfield))
self.assertHTMLEqual('\n'.join(form_output), """<input type="text" name="first_name" value="John" id="id_first_name" />
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />""")
form_output = []
for boundfield in p:
form_output.append([boundfield.label, boundfield.data])
self.assertEqual(form_output, [
['First name', 'John'],
['Last name', 'Lennon'],
['Birthday', '1940-10-9']
])
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>""")
def test_empty_dict(self):
# Empty dictionaries are valid, too.
p = Person({})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {})
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unbound_form(self):
# If you don't pass any values to the Form's __init__(), or if you pass None,
# the Form will be considered unbound and won't do any validation. Form.errors
# will be an empty dictionary *but* Form.is_valid() will return False.
p = Person()
self.assertFalse(p.is_bound)
self.assertEqual(p.errors, {})
self.assertFalse(p.is_valid())
try:
p.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unicode_values(self):
# Unicode values are handled properly.
p = Person({'first_name': 'John', 'last_name': '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111', 'birthday': '1940-10-9'})
self.assertHTMLEqual(p.as_table(), '<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>\n<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></td></tr>\n<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>')
self.assertHTMLEqual(p.as_ul(), '<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></li>\n<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></li>\n<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></li>')
self.assertHTMLEqual(p.as_p(), '<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></p>\n<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></p>\n<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></p>')
p = Person({'last_name': 'Lennon'})
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertDictEqual(p.errors, {'birthday': ['This field is required.'], 'first_name': ['This field is required.']})
self.assertEqual(p.cleaned_data, {'last_name': 'Lennon'})
self.assertEqual(p['first_name'].errors, ['This field is required.'])
self.assertHTMLEqual(p['first_name'].errors.as_ul(), '<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertEqual(p['first_name'].errors.as_text(), '* This field is required.')
p = Person()
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" id="id_birthday" />')
def test_cleaned_data_only_fields(self):
# cleaned_data will always *only* contain a key for fields defined in the
# Form, even if you pass extra data when you define the Form. In this
# example, we pass a bunch of extra fields to the form constructor,
# but cleaned_data contains only the form's fields.
data = {'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9', 'extra1': 'hello', 'extra2': 'hello'}
p = Person(data)
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_optional_data(self):
# cleaned_data will include a key and value for *all* fields defined in the Form,
# even if the Form's data didn't include a value for fields that are not
# required. In this example, the data dictionary doesn't include a value for the
# "nick_name" field, but cleaned_data includes it. For CharFields, it's set to the
# empty string.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
nick_name = CharField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['nick_name'], '')
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
# For DateFields, it's set to None.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
birth_date = DateField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['birth_date'], None)
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
def test_auto_id(self):
# "auto_id" tells the Form to add an "id" attribute to each form element.
# If it's a string that contains '%s', Django will use that as a format string
# into which the field's name will be inserted. It will also put a <label> around
# the human-readable labels for a field.
p = Person(auto_id='%s_id')
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="first_name_id">First name:</label></th><td><input type="text" name="first_name" id="first_name_id" /></td></tr>
<tr><th><label for="last_name_id">Last name:</label></th><td><input type="text" name="last_name" id="last_name_id" /></td></tr>
<tr><th><label for="birthday_id">Birthday:</label></th><td><input type="text" name="birthday" id="birthday_id" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></li>
<li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></li>
<li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></p>
<p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></p>
<p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></p>""")
def test_auto_id_true(self):
# If auto_id is any True value whose str() does not contain '%s', the "id"
# attribute will be the name of the field.
p = Person(auto_id=True)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_auto_id_false(self):
# If auto_id is any False value, an "id" attribute won't be output unless it
# was manually entered.
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_id_on_field(self):
# In this example, auto_id is False, but the "id" attribute for the "first_name"
# field is given. Also note that field gets a <label>, while the others don't.
p = PersonNew(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_auto_id_on_form_and_field(self):
# If the "id" attribute is specified in the Form and auto_id is True, the "id"
# attribute in the Form gets precedence.
p = PersonNew(auto_id=True)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_various_boolean_values(self):
class SignupForm(Form):
email = EmailField()
get_spam = BooleanField()
f = SignupForm(auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" />')
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': True}, auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" value="[email protected]" />')
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# 'True' or 'true' should be rendered without a value attribute
f = SignupForm({'email': '[email protected]', 'get_spam': 'True'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': 'true'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# A value of 'False' or 'false' should be rendered unchecked
f = SignupForm({'email': '[email protected]', 'get_spam': 'False'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': '[email protected]', 'get_spam': 'false'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
# A value of '0' should be interpreted as a True value (#16820)
f = SignupForm({'email': '[email protected]', 'get_spam': '0'})
self.assertTrue(f.is_valid())
self.assertTrue(f.cleaned_data.get('get_spam'))
def test_widget_output(self):
# Any Field can have a Widget class passed to its constructor:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea)
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['subject']), '<input type="text" name="subject" />')
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="10" cols="40"></textarea>')
# as_textarea(), as_text() and as_hidden() are shortcuts for changing the output
# widget type:
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea name="subject" rows="10" cols="40"></textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" />')
# The 'widget' parameter to a Field can also be an instance:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20}))
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="80" cols="20"></textarea>')
# Instance-level attrs are *not* carried over to as_textarea(), as_text() and
# as_hidden():
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False)
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea rows="10" cols="40" name="subject">Hello</textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" value="I love you." />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" value="I love you." />')
def test_forms_with_choices(self):
# For a form with a <select>, use ChoiceField:
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# A subtlety: If one of the choices' value is the empty string and the form is
# unbound, then the <option> for the empty-string choice will get selected="selected".
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="" selected="selected">------</option>
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
# You can specify widget attributes in the Widget constructor.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# When passing a custom widget instance to ChoiceField, note that setting
# 'choices' on the widget is meaningless. The widget will use the choices
# defined on the Field, not the ones defined on the Widget.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# You can set a ChoiceField's choices after the fact.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField()
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
</select>""")
f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')]
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
def test_forms_with_radio(self):
# Add widget=RadioSelect to use that widget with a ChoiceField.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect)
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul>""")
self.assertHTMLEqual(f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" /></td></tr>
<tr><th>Language:</th><td><ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" /></li>
<li>Language: <ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></li>""")
# Regarding auto_id and <label>, RadioSelect is a special case. Each radio button
# gets a distinct ID, formed by appending an underscore plus the button's
# zero-based index.
f = FrameworkForm(auto_id='id_%s')
self.assertHTMLEqual(str(f['language']), """<ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul>""")
# When RadioSelect is used with auto_id, and the whole form is printed using
# either as_table() or as_ul(), the label for the RadioSelect will point to the
# ID of the *first* radio button.
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" /></td></tr>
<tr><th><label for="id_language_0">Language:</label></th><td><ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></li>""")
self.assertHTMLEqual(f.as_p(), """<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></p>""")
# Test iterating on individual radios in a template
t = Template('{% for radio in form.language %}<div class="myradio">{{ radio }}</div>{% endfor %}')
self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="myradio"><label for="id_language_0">
<input id="id_language_0" name="language" type="radio" value="P" /> Python</label></div>
<div class="myradio"><label for="id_language_1">
<input id="id_language_1" name="language" type="radio" value="J" /> Java</label></div>""")
def test_form_with_iterable_boundfield(self):
class BeatleForm(Form):
name = ChoiceField(choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect)
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), """<label><input type="radio" name="name" value="john" /> John</label>
<label><input type="radio" name="name" value="paul" /> Paul</label>
<label><input type="radio" name="name" value="george" /> George</label>
<label><input type="radio" name="name" value="ringo" /> Ringo</label>""")
self.assertHTMLEqual('\n'.join('<div>%s</div>' % bf for bf in f['name']), """<div><label><input type="radio" name="name" value="john" /> John</label></div>
<div><label><input type="radio" name="name" value="paul" /> Paul</label></div>
<div><label><input type="radio" name="name" value="george" /> George</label></div>
<div><label><input type="radio" name="name" value="ringo" /> Ringo</label></div>""")
def test_form_with_noniterable_boundfield(self):
# You can iterate over any BoundField, not just those with widget=RadioSelect.
class BeatleForm(Form):
name = CharField()
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), '<input type="text" name="name" />')
def test_forms_with_multiple_choice(self):
# MultipleChoiceField is a special case, as its data is required to be a list:
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField()
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
</select>""")
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P">Paul McCartney</option>
</select>""")
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(str(f['name']), '<input type="text" name="name" value="Yesterday" />')
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P" selected="selected">Paul McCartney</option>
</select>""")
def test_form_with_disabled_fields(self):
class PersonForm(Form):
name = CharField()
birthday = DateField(disabled=True)
class PersonFormFieldInitial(Form):
name = CharField()
birthday = DateField(disabled=True, initial=datetime.date(1974, 8, 16))
# Disabled fields are generally not transmitted by user agents.
# The value from the form's initial data is used.
f1 = PersonForm({'name': 'John Doe'}, initial={'birthday': datetime.date(1974, 8, 16)})
f2 = PersonFormFieldInitial({'name': 'John Doe'})
for form in (f1, f2):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data,
{'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'}
)
# Values provided in the form's data are ignored.
data = {'name': 'John Doe', 'birthday': '1984-11-10'}
f1 = PersonForm(data, initial={'birthday': datetime.date(1974, 8, 16)})
f2 = PersonFormFieldInitial(data)
for form in (f1, f2):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data,
{'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'}
)
def test_hidden_data(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
# MultipleChoiceField rendered as_hidden() is a special case. Because it can
# have multiple values, its as_hidden() renders multiple <input type="hidden">
# tags.
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), '<input type="hidden" name="composers" value="P" />')
f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), """<input type="hidden" name="composers" value="P" />
<input type="hidden" name="composers" value="J" />""")
# DateTimeField rendered as_hidden() is special too
class MessageForm(Form):
when = SplitDateTimeField()
f = MessageForm({'when_0': '1992-01-01', 'when_1': '01:01'})
self.assertTrue(f.is_valid())
self.assertHTMLEqual(str(f['when']), '<input type="text" name="when_0" value="1992-01-01" id="id_when_0" /><input type="text" name="when_1" value="01:01" id="id_when_1" />')
self.assertHTMLEqual(f['when'].as_hidden(), '<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0" /><input type="hidden" name="when_1" value="01:01" id="id_when_1" />')
def test_mulitple_choice_checkbox(self):
# MultipleChoiceField can also be used with the CheckboxSelectMultiple widget.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J', 'P']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input checked="checked" type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
# Test iterating on individual checkboxes in a template
t = Template('{% for checkbox in form.composers %}<div class="mycheckbox">{{ checkbox }}</div>{% endfor %}')
self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="mycheckbox"><label>
<input checked="checked" name="composers" type="checkbox" value="J" /> John Lennon</label></div>
<div class="mycheckbox"><label>
<input checked="checked" name="composers" type="checkbox" value="P" /> Paul McCartney</label></div>""")
def test_checkbox_auto_id(self):
# Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox
# gets a distinct ID, formed by appending an underscore plus the checkbox's
# zero-based index.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id='%s_id')
self.assertHTMLEqual(str(f['composers']), """<ul id="composers_id">
<li><label for="composers_id_0"><input type="checkbox" name="composers" value="J" id="composers_id_0" /> John Lennon</label></li>
<li><label for="composers_id_1"><input type="checkbox" name="composers" value="P" id="composers_id_1" /> Paul McCartney</label></li>
</ul>""")
def test_multiple_choice_list_data(self):
# Data for a MultipleChoiceField should be a list. QueryDict and
# MultiValueDict conveniently work with this.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
data = {'name': 'Yesterday', 'composers': ['J', 'P']}
f = SongForm(data)
self.assertEqual(f.errors, {})
data = QueryDict('name=Yesterday&composers=J&composers=P')
f = SongForm(data)
self.assertEqual(f.errors, {})
data = MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P']))
f = SongForm(data)
self.assertEqual(f.errors, {})
def test_multiple_hidden(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
# The MultipleHiddenInput widget renders multiple values as hidden fields.
class SongFormHidden(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=MultipleHiddenInput)
f = SongFormHidden(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])), auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" value="Yesterday" /><input type="hidden" name="composers" value="J" />
<input type="hidden" name="composers" value="P" /></li>""")
# When using CheckboxSelectMultiple, the framework expects a list of input and
# returns a list of input.
f = SongForm({'name': 'Yesterday'}, auto_id=False)
self.assertEqual(f.errors['composers'], ['This field is required.'])
f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J', 'P'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
def test_escaping(self):
# Validation errors are HTML-escaped when output as HTML.
class EscapingForm(Form):
special_name = CharField(label="<em>Special</em> Field")
special_safe_name = CharField(label=mark_safe("<em>Special</em> Field"))
def clean_special_name(self):
raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name'])
def clean_special_safe_name(self):
raise ValidationError(mark_safe("'<b>%s</b>' is a safe string" % self.cleaned_data['special_safe_name']))
f = EscapingForm({'special_name': "Nothing to escape", 'special_safe_name': "Nothing to escape"}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Nothing to escape'</li></ul><input type="text" name="special_name" value="Nothing to escape" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b>Nothing to escape</b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="Nothing to escape" /></td></tr>""")
f = EscapingForm({
'special_name': "Should escape < & > and <script>alert('xss')</script>",
'special_safe_name': "<i>Do not escape</i>"
}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Should escape < & > and <script>alert('xss')</script>'</li></ul><input type="text" name="special_name" value="Should escape < & > and <script>alert('xss')</script>" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b><i>Do not escape</i></b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="<i>Do not escape</i>" /></td></tr>""")
def test_validating_multiple_fields(self):
# There are a couple of ways to do multiple-field validation. If you want the
# validation message to be associated with a particular field, implement the
# clean_XXX() method on the Form, where XXX is the field name. As in
# Field.clean(), the clean_XXX() method should return the cleaned value. In the
# clean_XXX() method, you have access to self.cleaned_data, which is a dictionary
# of all the data that has been cleaned *so far*, in order by the fields,
# including the current field (e.g., the field XXX if you're in clean_XXX()).
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean_password2(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data['password2']
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['password2'], ['Please make sure your passwords match.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
# Another way of doing multiple-field validation is by implementing the
# Form's clean() method. Usually ValidationError raised by that method
# will not be associated with a particular field and will have a
# special-case association with the field named '__all__'. It's
# possible to associate the errors to particular field with the
# Form.add_error() method or by passing a dictionary that maps each
# field to one or more errors.
#
# Note that in Form.clean(), you have access to self.cleaned_data, a
# dictionary of all the fields/values that have *not* raised a
# ValidationError. Also note Form.clean() is required to return a
# dictionary of all clean data.
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
# Test raising a ValidationError as NON_FIELD_ERRORS.
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
# Test raising ValidationError that targets multiple fields.
errors = {}
if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE':
errors['password1'] = 'Forbidden value.'
if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE':
errors['password2'] = ['Forbidden value.']
if errors:
raise ValidationError(errors)
# Test Form.add_error()
if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE2':
self.add_error(None, 'Non-field error 1.')
self.add_error('password1', 'Forbidden value 2.')
if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE2':
self.add_error('password2', 'Forbidden value 2.')
raise ValidationError('Non-field error 2.')
return self.cleaned_data
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th>Username:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password2" /></td></tr>""")
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Please make sure your passwords match.'])
self.assertHTMLEqual(f.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></li>
<li>Username: <input type="text" name="username" value="adrian" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Password2: <input type="password" name="password2" /></li>""")
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
f = UserRegistration({'username': 'adrian', 'password1': 'FORBIDDEN_VALUE', 'password2': 'FORBIDDEN_VALUE'}, auto_id=False)
self.assertEqual(f.errors['password1'], ['Forbidden value.'])
self.assertEqual(f.errors['password2'], ['Forbidden value.'])
f = UserRegistration({'username': 'adrian', 'password1': 'FORBIDDEN_VALUE2', 'password2': 'FORBIDDEN_VALUE2'}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Non-field error 1.', 'Non-field error 2.'])
self.assertEqual(f.errors['password1'], ['Forbidden value 2.'])
self.assertEqual(f.errors['password2'], ['Forbidden value 2.'])
with six.assertRaisesRegex(self, ValueError, "has no field named"):
f.add_error('missing_field', 'Some error.')
def test_update_error_dict(self):
class CodeForm(Form):
code = CharField(max_length=10)
def clean(self):
try:
raise ValidationError({'code': [ValidationError('Code error 1.')]})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError({'code': [ValidationError('Code error 2.')]})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError({'code': forms.ErrorList(['Code error 3.'])})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError('Non-field error 1.')
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError([ValidationError('Non-field error 2.')])
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
# Ensure that the newly added list of errors is an instance of ErrorList.
for field, error_list in self._errors.items():
if not isinstance(error_list, self.error_class):
self._errors[field] = self.error_class(error_list)
form = CodeForm({'code': 'hello'})
# Trigger validation.
self.assertFalse(form.is_valid())
# Check that update_error_dict didn't lose track of the ErrorDict type.
self.assertIsInstance(form._errors, forms.ErrorDict)
self.assertEqual(dict(form.errors), {
'code': ['Code error 1.', 'Code error 2.', 'Code error 3.'],
NON_FIELD_ERRORS: ['Non-field error 1.', 'Non-field error 2.'],
})
def test_has_error(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput, min_length=5)
password2 = CharField(widget=PasswordInput)
def clean(self):
if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2')
and self.cleaned_data['password1'] != self.cleaned_data['password2']):
raise ValidationError(
'Please make sure your passwords match.',
code='password_mismatch',
)
f = UserRegistration(data={})
self.assertTrue(f.has_error('password1'))
self.assertTrue(f.has_error('password1', 'required'))
self.assertFalse(f.has_error('password1', 'anything'))
f = UserRegistration(data={'password1': 'Hi', 'password2': 'Hi'})
self.assertTrue(f.has_error('password1'))
self.assertTrue(f.has_error('password1', 'min_length'))
self.assertFalse(f.has_error('password1', 'anything'))
self.assertFalse(f.has_error('password2'))
self.assertFalse(f.has_error('password2', 'anything'))
f = UserRegistration(data={'password1': 'Bonjour', 'password2': 'Hello'})
self.assertFalse(f.has_error('password1'))
self.assertFalse(f.has_error('password1', 'required'))
self.assertTrue(f.has_error(NON_FIELD_ERRORS))
self.assertTrue(f.has_error(NON_FIELD_ERRORS, 'password_mismatch'))
self.assertFalse(f.has_error(NON_FIELD_ERRORS, 'anything'))
def test_dynamic_construction(self):
# It's possible to construct a Form dynamically by adding to the self.fields
# dictionary in __init__(). Don't forget to call Form.__init__() within the
# subclass' __init__().
class Person(Form):
first_name = CharField()
last_name = CharField()
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.fields['birthday'] = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /></td></tr>""")
# Instances of a dynamic Form do not persist fields from one Form instance to
# the next.
class MyForm(Form):
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
class MyForm(Form):
default_field_1 = CharField()
default_field_2 = CharField()
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
# Similarly, changes to field attributes do not persist from one Form instance
# to the next.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
def __init__(self, names_required=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if names_required:
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs['class'] = 'required'
self.fields['last_name'].required = True
self.fields['last_name'].widget.attrs['class'] = 'required'
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
f = Person(names_required=True)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (True, True))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({'class': 'required'}, {'class': 'required'}))
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
class Person(Form):
first_name = CharField(max_length=30)
last_name = CharField(max_length=30)
def __init__(self, name_max_length=None, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if name_max_length:
self.fields['first_name'].max_length = name_max_length
self.fields['last_name'].max_length = name_max_length
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
f = Person(name_max_length=20)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (20, 20))
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
# Similarly, choices do not persist from one Form instance to the next.
# Refs #15127.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
gender = ChoiceField(choices=(('f', 'Female'), ('m', 'Male')))
def __init__(self, allow_unspec_gender=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if allow_unspec_gender:
self.fields['gender'].choices += (('u', 'Unspecified'),)
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
f = Person(allow_unspec_gender=True)
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male'), ('u', 'Unspecified')])
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
def test_validators_independence(self):
""" Test that we are able to modify a form field validators list without polluting
other forms """
from django.core.validators import MaxValueValidator
class MyForm(Form):
myfield = CharField(max_length=25)
f1 = MyForm()
f2 = MyForm()
f1.fields['myfield'].validators[0] = MaxValueValidator(12)
self.assertNotEqual(f1.fields['myfield'].validators[0], f2.fields['myfield'].validators[0])
def test_hidden_widget(self):
# HiddenInput widgets are displayed differently in the as_table(), as_ul())
# and as_p() output of a Form -- their verbose names are not displayed, and a
# separate row is not displayed. They're displayed in the last row of the
# form, directly after that row's form element.
class Person(Form):
first_name = CharField()
last_name = CharField()
hidden_text = CharField(widget=HiddenInput)
birthday = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p>First name: <input type="text" name="first_name" /></p>
<p>Last name: <input type="text" name="last_name" /></p>
<p>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></p>""")
# With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label.
p = Person(auto_id='id_%s')
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></p>""")
# If a field with a HiddenInput has errors, the as_table() and as_ul() output
# will include the error message(s) with the text "(Hidden field [fieldname]) "
# prepended. This message is displayed at the top of the output, regardless of
# its field's order in the form.
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr>
<tr><th>First name:</th><td><input type="text" name="first_name" value="John" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></li>
<li>First name: <input type="text" name="first_name" value="John" /></li>
<li>Last name: <input type="text" name="last_name" value="Lennon" /></li>
<li>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul>
<p>First name: <input type="text" name="first_name" value="John" /></p>
<p>Last name: <input type="text" name="last_name" value="Lennon" /></p>
<p>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></p>""")
# A corner case: It's possible for a form to have only HiddenInputs.
class TestForm(Form):
foo = CharField(widget=HiddenInput)
bar = CharField(widget=HiddenInput)
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_ul(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_p(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
def test_field_order(self):
# A Form's fields are displayed in the same order in which they were defined.
class TestForm(Form):
field1 = CharField()
field2 = CharField()
field3 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field7 = CharField()
field8 = CharField()
field9 = CharField()
field10 = CharField()
field11 = CharField()
field12 = CharField()
field13 = CharField()
field14 = CharField()
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>
<tr><th>Field5:</th><td><input type="text" name="field5" /></td></tr>
<tr><th>Field6:</th><td><input type="text" name="field6" /></td></tr>
<tr><th>Field7:</th><td><input type="text" name="field7" /></td></tr>
<tr><th>Field8:</th><td><input type="text" name="field8" /></td></tr>
<tr><th>Field9:</th><td><input type="text" name="field9" /></td></tr>
<tr><th>Field10:</th><td><input type="text" name="field10" /></td></tr>
<tr><th>Field11:</th><td><input type="text" name="field11" /></td></tr>
<tr><th>Field12:</th><td><input type="text" name="field12" /></td></tr>
<tr><th>Field13:</th><td><input type="text" name="field13" /></td></tr>
<tr><th>Field14:</th><td><input type="text" name="field14" /></td></tr>""")
def test_explicit_field_order(self):
class TestFormParent(Form):
field1 = CharField()
field2 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field_order = ['field6', 'field5', 'field4', 'field2', 'field1']
class TestForm(TestFormParent):
field3 = CharField()
field_order = ['field2', 'field4', 'field3', 'field5', 'field6']
class TestFormRemove(TestForm):
field1 = None
class TestFormMissing(TestForm):
field_order = ['field2', 'field4', 'field3', 'field5', 'field6', 'field1']
field1 = None
class TestFormInit(TestFormParent):
field3 = CharField()
field_order = None
def __init__(self, **kwargs):
super(TestFormInit, self).__init__(**kwargs)
self.order_fields(field_order=TestForm.field_order)
p = TestFormParent()
self.assertEqual(list(p.fields.keys()), TestFormParent.field_order)
p = TestFormRemove()
self.assertEqual(list(p.fields.keys()), TestForm.field_order)
p = TestFormMissing()
self.assertEqual(list(p.fields.keys()), TestForm.field_order)
p = TestForm()
self.assertEqual(list(p.fields.keys()), TestFormMissing.field_order)
p = TestFormInit()
order = list(TestForm.field_order) + ['field1']
self.assertEqual(list(p.fields.keys()), order)
TestForm.field_order = ['unknown']
p = TestForm()
self.assertEqual(list(p.fields.keys()), ['field1', 'field2', 'field4', 'field5', 'field6', 'field3'])
def test_form_html_attributes(self):
# Some Field classes have an effect on the HTML attributes of their associated
# Widget. If you set max_length in a CharField and its associated widget is
# either a TextInput or PasswordInput, then the widget's rendered HTML will
# include the "maxlength" attribute.
class UserRegistration(Form):
username = CharField(max_length=10) # uses TextInput by default
password = CharField(max_length=10, widget=PasswordInput)
realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test
address = CharField() # no max_length defined here
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>
<li>Realname: <input type="text" name="realname" maxlength="10" /></li>
<li>Address: <input type="text" name="address" /></li>""")
# If you specify a custom "attrs" that includes the "maxlength" attribute,
# the Field's max_length attribute will override whatever "maxlength" you specify
# in "attrs".
class UserRegistration(Form):
username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20}))
password = CharField(max_length=10, widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>""")
def test_specifying_labels(self):
# You can specify the label for a field by using the 'label' argument to a Field
# class. If you don't specify 'label', Django will use the field name with
# underscores converted to spaces, and the initial letter capitalized.
class UserRegistration(Form):
username = CharField(max_length=10, label='Your username')
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput, label='Contraseña (de nuevo)')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Your username: <input type="text" name="username" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Contraseña (de nuevo): <input type="password" name="password2" /></li>""")
# Labels for as_* methods will only end in a colon if they don't end in other
# punctuation already.
class Questions(Form):
q1 = CharField(label='The first question')
q2 = CharField(label='What is your name?')
q3 = CharField(label='The answer to life is:')
q4 = CharField(label='Answer this question!')
q5 = CharField(label='The last question. Period.')
self.assertHTMLEqual(Questions(auto_id=False).as_p(), """<p>The first question: <input type="text" name="q1" /></p>
<p>What is your name? <input type="text" name="q2" /></p>
<p>The answer to life is: <input type="text" name="q3" /></p>
<p>Answer this question! <input type="text" name="q4" /></p>
<p>The last question. Period. <input type="text" name="q5" /></p>""")
self.assertHTMLEqual(Questions().as_p(), """<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" /></p>
<p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" /></p>
<p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" /></p>
<p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" /></p>
<p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" /></p>""")
# If a label is set to the empty string for a field, that field won't get a label.
class UserRegistration(Form):
username = CharField(max_length=10, label='')
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li> <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(p.as_ul(), """<li> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
# If label is None, Django will auto-create the label from the field name. This
# is default behavior.
class UserRegistration(Form):
username = CharField(max_length=10, label=None)
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
def test_label_suffix(self):
# You can specify the 'label_suffix' argument to a Form class to modify the
# punctuation symbol used at the end of a label. By default, the colon (:) is
# used, and is only appended to the label if the label doesn't already end with a
# punctuation symbol: ., !, ? or :. If you specify a different suffix, it will
# be appended regardless of the last character of the label.
class FavoriteForm(Form):
color = CharField(label='Favorite color?')
animal = CharField(label='Favorite animal')
answer = CharField(label='Secret answer', label_suffix=' =')
f = FavoriteForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal: <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='?')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal? <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='\u2192')
self.assertHTMLEqual(f.as_ul(), '<li>Favorite color? <input type="text" name="color" /></li>\n<li>Favorite animal\u2192 <input type="text" name="animal" /></li>\n<li>Secret answer = <input type="text" name="answer" /></li>')
def test_initial_data(self):
# You can specify initial data for a field by using the 'initial' argument to a
# Field class. This initial data is displayed when a Form is rendered with *no*
# data. It is not displayed when a Form is rendered with any data (including an
# empty dictionary). Also, the initial value is *not* used if data for a
# particular required field isn't provided.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# Here, we're submitting data, so the initial value will *not* be displayed.
p = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': ''}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# An 'initial' value is *not* used as a fallback if data is not provided. In this
# example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
def test_dynamic_initial_data(self):
# The previous technique dealt with "hard-coded" initial data, but it's also
# possible to specify initial data after you've already created the Form class
# (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This
# should be a dictionary containing initial values for one or more fields in the
# form, keyed by field name.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(initial={'username': 'stephane'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': ''}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': 'foo'}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# A dynamic 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': 'django'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
p = UserRegistration(initial={'username': 'babik'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="babik" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
def test_callable_initial_data(self):
# The previous technique dealt with raw values as initial data, but it's also
# possible to specify callable data.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')])
# We need to define functions that get called later.)
def initial_django():
return 'django'
def initial_stephane():
return 'stephane'
def initial_options():
return ['f', 'b']
def initial_other_options():
return ['b', 'w']
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': ''}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': 'foo', 'options': ['f', 'b']}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# A callable 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': initial_django, 'options': initial_options})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial=initial_django)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')], initial=initial_other_options)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w" selected="selected">whiz</option>
</select></li>""")
p = UserRegistration(initial={'username': initial_stephane, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
def test_changed_data(self):
class Person(Form):
first_name = CharField(initial='Hans')
last_name = CharField(initial='Greatel')
birthday = DateField(initial=datetime.date(1974, 8, 16))
p = Person(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16'})
self.assertTrue(p.is_valid())
self.assertNotIn('first_name', p.changed_data)
self.assertIn('last_name', p.changed_data)
self.assertNotIn('birthday', p.changed_data)
# Test that field raising ValidationError is always in changed_data
class PedanticField(forms.Field):
def to_python(self, value):
raise ValidationError('Whatever')
class Person2(Person):
pedantic = PedanticField(initial='whatever', show_hidden_initial=True)
p = Person2(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16', 'initial-pedantic': 'whatever'})
self.assertFalse(p.is_valid())
self.assertIn('pedantic', p.changed_data)
def test_boundfield_values(self):
# It's possible to get to the value which would be used for rendering
# the widget for a field by using the BoundField's value method.
class UserRegistration(Form):
username = CharField(max_length=10, initial='djangonaut')
password = CharField(widget=PasswordInput)
unbound = UserRegistration()
bound = UserRegistration({'password': 'foo'})
self.assertEqual(bound['username'].value(), None)
self.assertEqual(unbound['username'].value(), 'djangonaut')
self.assertEqual(bound['password'].value(), 'foo')
self.assertEqual(unbound['password'].value(), None)
def test_boundfield_initial_called_once(self):
"""
Multiple calls to BoundField().value() in an unbound form should return
the same result each time (#24391).
"""
class MyForm(Form):
name = CharField(max_length=10, initial=uuid.uuid4)
form = MyForm()
name = form['name']
self.assertEqual(name.value(), name.value())
# BoundField is also cached
self.assertIs(form['name'], name)
def test_boundfield_rendering(self):
"""
Python 2 issue: Test that rendering a BoundField with bytestring content
doesn't lose it's safe string status (#22950).
"""
class CustomWidget(TextInput):
def render(self, name, value, attrs=None):
return format_html(str('<input{} />'), ' id=custom')
class SampleForm(Form):
name = CharField(widget=CustomWidget)
f = SampleForm(data={'name': 'bar'})
self.assertIsInstance(force_text(f['name']), SafeData)
def test_initial_datetime_values(self):
now = datetime.datetime.now()
# Nix microseconds (since they should be ignored). #22502
now_no_ms = now.replace(microsecond=0)
if now == now_no_ms:
now = now.replace(microsecond=1)
def delayed_now():
return now
def delayed_now_time():
return now.time()
class HiddenInputWithoutMicrosec(HiddenInput):
supports_microseconds = False
class TextInputWithoutMicrosec(TextInput):
supports_microseconds = False
class DateTimeForm(Form):
auto_timestamp = DateTimeField(initial=delayed_now)
auto_time_only = TimeField(initial=delayed_now_time)
supports_microseconds = DateTimeField(initial=delayed_now, widget=TextInput)
hi_default_microsec = DateTimeField(initial=delayed_now, widget=HiddenInput)
hi_without_microsec = DateTimeField(initial=delayed_now, widget=HiddenInputWithoutMicrosec)
ti_without_microsec = DateTimeField(initial=delayed_now, widget=TextInputWithoutMicrosec)
unbound = DateTimeForm()
self.assertEqual(unbound['auto_timestamp'].value(), now_no_ms)
self.assertEqual(unbound['auto_time_only'].value(), now_no_ms.time())
self.assertEqual(unbound['supports_microseconds'].value(), now)
self.assertEqual(unbound['hi_default_microsec'].value(), now)
self.assertEqual(unbound['hi_without_microsec'].value(), now_no_ms)
self.assertEqual(unbound['ti_without_microsec'].value(), now_no_ms)
def test_help_text(self):
# You can specify descriptive text for a field by using the 'help_text' argument)
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., [email protected]')
password = CharField(widget=PasswordInput, help_text='Wählen Sie mit Bedacht.')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""")
self.assertHTMLEqual(p.as_p(), """<p>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></p>
<p>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></p>""")
self.assertHTMLEqual(p.as_table(), """<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /><br /><span class="helptext">e.g., [email protected]</span></td></tr>
<tr><th>Password:</th><td><input type="password" name="password" /><br /><span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>""")
# The help text is displayed whether or not data is provided for the form.
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""")
# help_text is not displayed for hidden fields. It can be used for documentation
# purposes, though.
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., [email protected]')
password = CharField(widget=PasswordInput)
next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" /><input type="hidden" name="next" value="/" /></li>""")
def test_subclassing_forms(self):
# You can subclass a Form to add fields. The resulting form subclass will have
# all of the fields of the parent Form, plus whichever fields you define in the
# subclass.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Musician(Person):
instrument = CharField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
m = Musician(auto_id=False)
self.assertHTMLEqual(m.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>""")
# Yes, you can subclass multiple forms. The fields are added in the order in
# which the parent classes are listed.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Instrument(Form):
instrument = CharField()
class Beatle(Person, Instrument):
haircut_type = CharField()
b = Beatle(auto_id=False)
self.assertHTMLEqual(b.as_ul(), """<li>Instrument: <input type="text" name="instrument" /></li>
<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Haircut type: <input type="text" name="haircut_type" /></li>""")
def test_forms_with_prefixes(self):
# Sometimes it's necessary to have multiple forms display on the same HTML page,
# or multiple copies of the same form. We can accomplish this with form prefixes.
# Pass the keyword argument 'prefix' to the Form constructor to use this feature.
# This value will be prepended to each HTML form field name. One way to think
# about this is "namespaces for HTML forms". Notice that in the data argument,
# each field's key has the prefix, in this case 'person1', prepended to the
# actual field name.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" /></li>
<li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" /></li>
<li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" /></li>""")
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" />')
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
# Let's try submitting some bad data to make sure form.errors and field.errors
# work as expected.
data = {
'person1-first_name': '',
'person1-last_name': '',
'person1-birthday': ''
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertEqual(p['first_name'].errors, ['This field is required.'])
try:
p['person1-first_name'].errors
self.fail('Attempts to access non-existent fields should fail.')
except KeyError:
pass
# In this example, the data doesn't have a prefix, but the form requires it, so
# the form doesn't "see" the fields.
data = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
# With prefixes, a single data dictionary can hold data for multiple instances
# of the same form.
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9',
'person2-first_name': 'Jim',
'person2-last_name': 'Morrison',
'person2-birthday': '1943-12-8'
}
p1 = Person(data, prefix='person1')
self.assertTrue(p1.is_valid())
self.assertEqual(p1.cleaned_data['first_name'], 'John')
self.assertEqual(p1.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p1.cleaned_data['birthday'], datetime.date(1940, 10, 9))
p2 = Person(data, prefix='person2')
self.assertTrue(p2.is_valid())
self.assertEqual(p2.cleaned_data['first_name'], 'Jim')
self.assertEqual(p2.cleaned_data['last_name'], 'Morrison')
self.assertEqual(p2.cleaned_data['birthday'], datetime.date(1943, 12, 8))
# By default, forms append a hyphen between the prefix and the field name, but a
# form can alter that behavior by implementing the add_prefix() method. This
# method takes a field name and returns the prefixed field, according to
# self.prefix.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def add_prefix(self, field_name):
return '%s-prefix-%s' % (self.prefix, field_name) if self.prefix else field_name
p = Person(prefix='foo')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" /></li>
<li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" /></li>
<li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" /></li>""")
data = {
'foo-prefix-first_name': 'John',
'foo-prefix-last_name': 'Lennon',
'foo-prefix-birthday': '1940-10-9'
}
p = Person(data, prefix='foo')
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_class_prefix(self):
# Prefix can be also specified at the class level.
class Person(Form):
first_name = CharField()
prefix = 'foo'
p = Person()
self.assertEqual(p.prefix, 'foo')
p = Person(prefix='bar')
self.assertEqual(p.prefix, 'bar')
def test_forms_with_null_boolean(self):
# NullBooleanField is a bit of a special case because its presentation (widget)
# is different than its data. This is handled transparently, though.
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
p = Person({'name': 'Joe'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '1'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '2'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '3'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': True}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': False}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
def test_forms_with_file_fields(self):
# FileFields are a special case because they take their data from the request.FILES,
# not request.POST.
class FileForm(Form):
file1 = FileField()
f = FileForm(auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>The submitted file is empty.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>No file was submitted. Check the encoding type on the form.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'some content')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
self.assertTrue(f.is_valid())
f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
def test_basic_processing_in_view(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
def my_function(method, post_data):
if method == 'POST':
form = UserRegistration(post_data, auto_id=False)
else:
form = UserRegistration(auto_id=False)
if form.is_valid():
return 'VALID: %r' % sorted(six.iteritems(form.cleaned_data))
t = Template('<form action="" method="post">\n<table>\n{{ form }}\n</table>\n<input type="submit" />\n</form>')
return t.render(Context({'form': form}))
# Case 1: GET (an empty form, with no errors).)
self.assertHTMLEqual(my_function('GET', {}), """<form action="" method="post">
<table>
<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 2: POST with erroneous data (a redisplayed form, with errors).)
self.assertHTMLEqual(my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'}), """<form action="" method="post">
<table>
<tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><ul class="errorlist"><li>Ensure this value has at most 10 characters (it has 23).</li></ul><input type="text" name="username" value="this-is-a-long-username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 3: POST with valid data (the success message).)
self.assertEqual(my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'}),
str_prefix("VALID: [('password1', %(_)s'secret'), ('password2', %(_)s'secret'), ('username', %(_)s'adrian')]"))
def test_templates_with_forms(self):
class UserRegistration(Form):
username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.")
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
# You have full flexibility in displaying form fields in a template. Just pass a
# Form instance to the template, and use "dot" access to refer to individual
# fields. Note, however, that this flexibility comes with the responsibility of
# displaying all the errors, including any that might not be associated with a
# particular field.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password: <input type="password" name="password1" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# Use form.[field].label to output a field's label. You can specify the label for
# a field by using the 'label' argument to a Field class. If you don't specify
# 'label', Django will use the field name with underscores converted to spaces,
# and the initial letter capitalized.
t = Template('''<form action="">
<p><label>{{ form.username.label }}: {{ form.username }}</label></p>
<p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p>
<p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password1: <input type="password" name="password1" /></label></p>
<p><label>Password2: <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# User form.[field].label_tag to output a field's label with a <label> tag
# wrapped around it, but *only* if the given field has an "id" attribute.
# Recall from above that passing the "auto_id" argument to a Form gives each
# field an "id" attribute.
t = Template('''<form action="">
<p>{{ form.username.label_tag }} {{ form.username }}</p>
<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /></p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id='id_%s')})), """<form action="">
<p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>
<p><label for="id_password1">Password1:</label> <input type="password" name="password1" id="id_password1" /></p>
<p><label for="id_password2">Password2:</label> <input type="password" name="password2" id="id_password2" /></p>
<input type="submit" />
</form>""")
# User form.[field].help_text to output a field's help text. If the given field
# does not have help text, nothing will be output.
t = Template('''<form action="">
<p>{{ form.username.label_tag }} {{ form.username }}<br />{{ form.username.help_text }}</p>
<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /><br />Good luck picking a username that doesn't already exist.</p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertEqual(Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)})), '')
# To display the errors that aren't associated with a particular field -- e.g.,
# the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the
# template. If used on its own, it is displayed as a <ul> (or an empty string, if
# the list of errors is empty). You can also use it in {% if %} statements.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
t = Template('''<form action="">
{{ form.non_field_errors }}
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul>
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
def test_empty_permitted(self):
# Sometimes (pretty much in formsets) we want to allow a form to pass validation
# if it is completely empty. We can accomplish this by using the empty_permitted
# argument to a form constructor.
class SongForm(Form):
artist = CharField()
name = CharField()
# First let's show what happens id empty_permitted=False (the default):
data = {'artist': '', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.'], 'artist': ['This field is required.']})
self.assertEqual(form.cleaned_data, {})
# Now let's show what happens when empty_permitted=True and the form is empty.
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(form.cleaned_data, {})
# But if we fill in data for one of the fields, the form is no longer empty and
# the whole thing must pass validation.
data = {'artist': 'The Doors', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.']})
self.assertEqual(form.cleaned_data, {'artist': 'The Doors'})
# If a field is not given in the data then None is returned for its data. Lets
# make sure that when checking for empty_permitted that None is treated
# accordingly.
data = {'artist': None, 'song': ''}
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
# However, we *really* need to be sure we are checking for None as any data in
# initial that returns False on a boolean call needs to be treated literally.
class PriceForm(Form):
amount = FloatField()
qty = IntegerField()
data = {'amount': '0.0', 'qty': ''}
form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True)
self.assertTrue(form.is_valid())
def test_extracting_hidden_and_visible(self):
class SongForm(Form):
token = CharField(widget=HiddenInput)
artist = CharField()
name = CharField()
form = SongForm()
self.assertEqual([f.name for f in form.hidden_fields()], ['token'])
self.assertEqual([f.name for f in form.visible_fields()], ['artist', 'name'])
def test_hidden_initial_gets_id(self):
class MyForm(Form):
field1 = CharField(max_length=50, show_hidden_initial=True)
self.assertHTMLEqual(MyForm().as_table(), '<tr><th><label for="id_field1">Field1:</label></th><td><input id="id_field1" type="text" name="field1" maxlength="50" /><input type="hidden" name="initial-field1" id="initial-id_field1" /></td></tr>')
def test_error_html_required_html_classes(self):
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
email = EmailField(required=False)
age = IntegerField()
p = Person({})
p.error_css_class = 'error'
p.required_css_class = 'required'
self.assertHTMLEqual(p.as_ul(), """<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></li>
<li><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></li>
<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></p>
<p><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></p>""")
self.assertHTMLEqual(p.as_table(), """<tr class="required error"><th><label class="required" for="id_name">Name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="name" id="id_name" /></td></tr>
<tr class="required"><th><label class="required" for="id_is_cool">Is cool:</label></th><td><select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></td></tr>
<tr><th><label for="id_email">Email:</label></th><td><input type="email" name="email" id="id_email" /></td></tr>
<tr class="required error"><th><label class="required" for="id_age">Age:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="number" name="age" id="id_age" /></td></tr>""")
def test_label_has_required_css_class(self):
"""
#17922 - required_css_class is added to the label_tag() of required fields.
"""
class SomeForm(Form):
required_css_class = 'required'
field = CharField(max_length=10)
field2 = IntegerField(required=False)
f = SomeForm({'field': 'test'})
self.assertHTMLEqual(f['field'].label_tag(), '<label for="id_field" class="required">Field:</label>')
self.assertHTMLEqual(f['field'].label_tag(attrs={'class': 'foo'}),
'<label for="id_field" class="foo required">Field:</label>')
self.assertHTMLEqual(f['field2'].label_tag(), '<label for="id_field2">Field2:</label>')
def test_label_split_datetime_not_displayed(self):
class EventForm(Form):
happened_at = SplitDateTimeField(widget=SplitHiddenDateTimeWidget)
form = EventForm()
self.assertHTMLEqual(form.as_ul(), '<input type="hidden" name="happened_at_0" id="id_happened_at_0" /><input type="hidden" name="happened_at_1" id="id_happened_at_1" />')
def test_multivalue_field_validation(self):
def bad_names(value):
if value == 'bad value':
raise ValidationError('bad value not allowed')
class NameField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (CharField(label='First name', max_length=10),
CharField(label='Last name', max_length=10))
super(NameField, self).__init__(fields=fields, *args, **kwargs)
def compress(self, data_list):
return ' '.join(data_list)
class NameForm(Form):
name = NameField(validators=[bad_names])
form = NameForm(data={'name': ['bad', 'value']})
form.full_clean()
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['bad value not allowed']})
form = NameForm(data={'name': ['should be overly', 'long for the field names']})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['Ensure this value has at most 10 characters (it has 16).',
'Ensure this value has at most 10 characters (it has 24).']})
form = NameForm(data={'name': ['fname', 'lname']})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'name': 'fname lname'})
def test_multivalue_deep_copy(self):
"""
#19298 -- MultiValueField needs to override the default as it needs
to deep-copy subfields:
"""
class ChoicesField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (ChoiceField(label='Rank',
choices=((1, 1), (2, 2))),
CharField(label='Name', max_length=10))
super(ChoicesField, self).__init__(fields=fields, *args, **kwargs)
field = ChoicesField()
field2 = copy.deepcopy(field)
self.assertIsInstance(field2, ChoicesField)
self.assertIsNot(field2.fields, field.fields)
self.assertIsNot(field2.fields[0].choices, field.fields[0].choices)
def test_multivalue_initial_data(self):
"""
#23674 -- invalid initial data should not break form.changed_data()
"""
class DateAgeField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (DateField(label="Date"), IntegerField(label="Age"))
super(DateAgeField, self).__init__(fields=fields, *args, **kwargs)
class DateAgeForm(Form):
date_age = DateAgeField()
data = {"date_age": ["1998-12-06", 16]}
form = DateAgeForm(data, initial={"date_age": ["200-10-10", 14]})
self.assertTrue(form.has_changed())
def test_multivalue_optional_subfields(self):
class PhoneField(MultiValueField):
def __init__(self, *args, **kwargs):
fields = (
CharField(label='Country Code', validators=[
RegexValidator(r'^\+[0-9]{1,2}$', message='Enter a valid country code.')]),
CharField(label='Phone Number'),
CharField(label='Extension', error_messages={'incomplete': 'Enter an extension.'}),
CharField(label='Label', required=False, help_text='E.g. home, work.'),
)
super(PhoneField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
return '%s.%s ext. %s (label: %s)' % tuple(data_list)
return None
# An empty value for any field will raise a `required` error on a
# required `MultiValueField`.
f = PhoneField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61'])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61', '287654321', '123'])
self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home']))
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# Empty values for fields will NOT raise a `required` error on an
# optional `MultiValueField`
f = PhoneField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean([]))
self.assertEqual('+61. ext. (label: )', f.clean(['+61']))
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home']))
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# For a required `MultiValueField` with `require_all_fields=False`, a
# `required` error will only be raised if all fields are empty. Fields
# can individually be required or optional. An empty value for any
# required field will raise an `incomplete` error.
f = PhoneField(require_all_fields=False)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61'])
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
six.assertRaisesRegex(self, ValidationError,
"'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home'])
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# For an optional `MultiValueField` with `require_all_fields=False`, we
# don't get any `required` error but we still get `incomplete` errors.
f = PhoneField(required=False, require_all_fields=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean([]))
self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61'])
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
six.assertRaisesRegex(self, ValidationError,
"'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home'])
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
def test_custom_empty_values(self):
"""
Test that form fields can customize what is considered as an empty value
for themselves (#19997).
"""
class CustomJSONField(CharField):
empty_values = [None, '']
def to_python(self, value):
# Fake json.loads
if value == '{}':
return {}
return super(CustomJSONField, self).to_python(value)
class JSONForm(forms.Form):
json = CustomJSONField()
form = JSONForm(data={'json': '{}'})
form.full_clean()
self.assertEqual(form.cleaned_data, {'json': {}})
def test_boundfield_label_tag(self):
class SomeForm(Form):
field = CharField()
boundfield = SomeForm()['field']
testcases = [ # (args, kwargs, expected)
# without anything: just print the <label>
((), {}, '<label for="id_field">Field:</label>'),
# passing just one argument: overrides the field's label
(('custom',), {}, '<label for="id_field">custom:</label>'),
# the overridden label is escaped
(('custom&',), {}, '<label for="id_field">custom&:</label>'),
((mark_safe('custom&'),), {}, '<label for="id_field">custom&:</label>'),
# Passing attrs to add extra attributes on the <label>
((), {'attrs': {'class': 'pretty'}}, '<label for="id_field" class="pretty">Field:</label>')
]
for args, kwargs, expected in testcases:
self.assertHTMLEqual(boundfield.label_tag(*args, **kwargs), expected)
def test_boundfield_label_tag_no_id(self):
"""
If a widget has no id, label_tag just returns the text with no
surrounding <label>.
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(auto_id='')['field']
self.assertHTMLEqual(boundfield.label_tag(), 'Field:')
self.assertHTMLEqual(boundfield.label_tag('Custom&'), 'Custom&:')
def test_boundfield_label_tag_custom_widget_id_for_label(self):
class CustomIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return 'custom_' + id
class EmptyIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return None
class SomeForm(Form):
custom = CharField(widget=CustomIdForLabelTextInput)
empty = CharField(widget=EmptyIdForLabelTextInput)
form = SomeForm()
self.assertHTMLEqual(form['custom'].label_tag(), '<label for="custom_id_custom">Custom:</label>')
self.assertHTMLEqual(form['empty'].label_tag(), '<label>Empty:</label>')
def test_boundfield_empty_label(self):
class SomeForm(Form):
field = CharField(label='')
boundfield = SomeForm()['field']
self.assertHTMLEqual(boundfield.label_tag(), '<label for="id_field"></label>')
def test_boundfield_id_for_label(self):
class SomeForm(Form):
field = CharField(label='')
self.assertEqual(SomeForm()['field'].id_for_label, 'id_field')
def test_boundfield_id_for_label_override_by_attrs(self):
"""
If an id is provided in `Widget.attrs`, it overrides the generated ID,
unless it is `None`.
"""
class SomeForm(Form):
field = CharField(widget=forms.TextInput(attrs={'id': 'myCustomID'}))
field_none = CharField(widget=forms.TextInput(attrs={'id': None}))
form = SomeForm()
self.assertEqual(form['field'].id_for_label, 'myCustomID')
self.assertEqual(form['field_none'].id_for_label, 'id_field_none')
def test_label_tag_override(self):
"""
BoundField label_suffix (if provided) overrides Form label_suffix
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(label_suffix='!')['field']
self.assertHTMLEqual(boundfield.label_tag(label_suffix='$'), '<label for="id_field">Field$</label>')
def test_field_name(self):
"""#5749 - `field_name` may be used as a key in _html_output()."""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output(
normal_row='<p id="p_%(field_name)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p id="p_some_field"></p>')
def test_field_without_css_classes(self):
"""
`css_classes` may be used as a key in _html_output() (empty classes).
"""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output(
normal_row='<p class="%(css_classes)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p class=""></p>')
def test_field_with_css_class(self):
"""
`css_classes` may be used as a key in _html_output() (class comes
from required_css_class in this case).
"""
class SomeForm(Form):
some_field = CharField()
required_css_class = 'foo'
def as_p(self):
return self._html_output(
normal_row='<p class="%(css_classes)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p class="foo"></p>')
def test_field_name_with_hidden_input(self):
"""
BaseForm._html_output() should merge all the hidden input fields and
put them in the last row.
"""
class SomeForm(Form):
hidden1 = CharField(widget=HiddenInput)
custom = CharField()
hidden2 = CharField(widget=HiddenInput)
def as_p(self):
return self._html_output(
normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(
form.as_p(),
'<p><input id="id_custom" name="custom" type="text" /> custom'
'<input id="id_hidden1" name="hidden1" type="hidden" />'
'<input id="id_hidden2" name="hidden2" type="hidden" /></p>'
)
def test_field_name_with_hidden_input_and_non_matching_row_ender(self):
"""
BaseForm._html_output() should merge all the hidden input fields and
put them in the last row ended with the specific row ender.
"""
class SomeForm(Form):
hidden1 = CharField(widget=HiddenInput)
custom = CharField()
hidden2 = CharField(widget=HiddenInput)
def as_p(self):
return self._html_output(
normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>',
error_row='%s',
row_ender='<hr/><hr/>',
help_text_html=' %s',
errors_on_separate_row=True
)
form = SomeForm()
self.assertHTMLEqual(
form.as_p(),
'<p><input id="id_custom" name="custom" type="text" /> custom</p>\n'
'<input id="id_hidden1" name="hidden1" type="hidden" />'
'<input id="id_hidden2" name="hidden2" type="hidden" /><hr/><hr/>'
)
def test_error_dict(self):
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError('Non-field error.', code='secret', params={'a': 1, 'b': 2})
form = MyForm({})
self.assertEqual(form.is_valid(), False)
errors = form.errors.as_text()
control = [
'* foo\n * This field is required.',
'* bar\n * This field is required.',
'* __all__\n * Non-field error.',
]
for error in control:
self.assertIn(error, errors)
errors = form.errors.as_ul()
control = [
'<li>foo<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>bar<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>__all__<ul class="errorlist nonfield"><li>Non-field error.</li></ul></li>',
]
for error in control:
self.assertInHTML(error, errors)
errors = json.loads(form.errors.as_json())
control = {
'foo': [{'code': 'required', 'message': 'This field is required.'}],
'bar': [{'code': 'required', 'message': 'This field is required.'}],
'__all__': [{'code': 'secret', 'message': 'Non-field error.'}]
}
self.assertEqual(errors, control)
def test_error_dict_as_json_escape_html(self):
"""#21962 - adding html escape flag to ErrorDict"""
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError('<p>Non-field error.</p>',
code='secret',
params={'a': 1, 'b': 2})
control = {
'foo': [{'code': 'required', 'message': 'This field is required.'}],
'bar': [{'code': 'required', 'message': 'This field is required.'}],
'__all__': [{'code': 'secret', 'message': '<p>Non-field error.</p>'}]
}
form = MyForm({})
self.assertFalse(form.is_valid())
errors = json.loads(form.errors.as_json())
self.assertEqual(errors, control)
errors = json.loads(form.errors.as_json(escape_html=True))
control['__all__'][0]['message'] = '<p>Non-field error.</p>'
self.assertEqual(errors, control)
def test_error_list(self):
e = ErrorList()
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertIsInstance(e, list)
self.assertIn('Foo', e)
self.assertIn('Foo', forms.ValidationError(e))
self.assertEqual(
e.as_text(),
'* Foo\n* Foobar'
)
self.assertEqual(
e.as_ul(),
'<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
self.assertEqual(
json.loads(e.as_json()),
[{"message": "Foo", "code": ""}, {"message": "Foobar", "code": "foobar"}]
)
def test_error_list_class_not_specified(self):
e = ErrorList()
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_class_has_one_class_specified(self):
e = ErrorList(error_class='foobar-error-class')
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist foobar-error-class"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_with_hidden_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField(widget=HiddenInput)
p = Person({'first_name': 'John'})
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul></li><li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td><input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></td></tr>"""
)
def test_error_list_with_non_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField()
def clean(self):
raise ValidationError('Generic validation error')
p = Person({'first_name': 'John', 'last_name': 'Lennon'})
self.assertHTMLEqual(
str(p.non_field_errors()),
'<ul class="errorlist nonfield"><li>Generic validation error</li></ul>'
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield"><li>Generic validation error</li></ul></li><li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /></li>
<li><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" /></li>"""
)
self.assertHTMLEqual(
p.non_field_errors().as_text(),
'* Generic validation error'
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>Generic validation error</li></ul>
<p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /></p>
<p><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield"><li>Generic validation error</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td><input id="id_first_name" name="first_name" type="text" value="John" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input id="id_last_name" name="last_name" type="text" value="Lennon" /></td></tr>"""
)
def test_errorlist_override(self):
@python_2_unicode_compatible
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="errorlist">%s</div>' % ''.join(
'<div class="error">%s</div>' % force_text(e) for e in self)
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = dict(email='invalid')
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p>
<div class="errorlist"><div class="error">Enter a valid email address.</div></div>
<p>Email: <input type="email" name="email" value="invalid" /></p>
<div class="errorlist"><div class="error">This field is required.</div></div>
<p>Comment: <input type="text" name="comment" /></p>""")
def test_baseform_repr(self):
"""
BaseForm.__repr__() should contain some basic information about the
form.
"""
p = Person()
self.assertEqual(repr(p), "<Person bound=False, valid=Unknown, fields=(first_name;last_name;birthday)>")
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'})
self.assertEqual(repr(p), "<Person bound=True, valid=Unknown, fields=(first_name;last_name;birthday)>")
p.is_valid()
self.assertEqual(repr(p), "<Person bound=True, valid=True, fields=(first_name;last_name;birthday)>")
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'})
p.is_valid()
self.assertEqual(repr(p), "<Person bound=True, valid=False, fields=(first_name;last_name;birthday)>")
def test_baseform_repr_dont_trigger_validation(self):
"""
BaseForm.__repr__() shouldn't trigger the form validation.
"""
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'})
repr(p)
self.assertRaises(AttributeError, lambda: p.cleaned_data)
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {'first_name': 'John', 'last_name': 'Lennon'})
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data['username'] = data['username'].lower()
return data
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_nothing_returned(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
self.cleaned_data['username'] = self.cleaned_data['username'].lower()
# don't return anything
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_in_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
# Return a different dict. We have not changed self.cleaned_data.
return {
'username': data['username'].lower(),
'password': 'this_is_not_a_secret',
}
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_html_safe(self):
class SimpleForm(Form):
username = CharField()
form = SimpleForm()
self.assertTrue(hasattr(SimpleForm, '__html__'))
self.assertEqual(force_text(form), form.__html__())
self.assertTrue(hasattr(form['username'], '__html__'))
self.assertEqual(force_text(form['username']), form['username'].__html__())
| bsd-3-clause | 7,104,298,423,227,530,000 | 53.833886 | 519 | 0.609213 | false |
gangadharkadam/sher | erpnext/manufacturing/doctype/bom/test_bom.py | 38 | 1471 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
test_records = frappe.get_test_records('Bom')
class TestBOM(unittest.TestCase):
def test_get_items(self):
from erpnext.manufacturing.doctype.bom.bom import get_bom_items_as_dict
items_dict = get_bom_items_as_dict(bom="BOM/_Test FG Item 2/001", qty=1, fetch_exploded=0)
self.assertTrue(test_records[2]["bom_materials"][0]["item_code"] in items_dict)
self.assertTrue(test_records[2]["bom_materials"][1]["item_code"] in items_dict)
self.assertEquals(len(items_dict.values()), 2)
def test_get_items_exploded(self):
from erpnext.manufacturing.doctype.bom.bom import get_bom_items_as_dict
items_dict = get_bom_items_as_dict(bom="BOM/_Test FG Item 2/001", qty=1, fetch_exploded=1)
self.assertTrue(test_records[2]["bom_materials"][0]["item_code"] in items_dict)
self.assertFalse(test_records[2]["bom_materials"][1]["item_code"] in items_dict)
self.assertTrue(test_records[0]["bom_materials"][0]["item_code"] in items_dict)
self.assertTrue(test_records[0]["bom_materials"][1]["item_code"] in items_dict)
self.assertEquals(len(items_dict.values()), 3)
def test_get_items_list(self):
from erpnext.manufacturing.doctype.bom.bom import get_bom_items
self.assertEquals(len(get_bom_items(bom="BOM/_Test FG Item 2/001", qty=1, fetch_exploded=1)), 3)
| agpl-3.0 | -1,929,751,487,570,526,000 | 48.033333 | 98 | 0.732835 | false |
OpenTTD-Ladder/ladder-web | ladder/design/management/commands/compile_css.py | 1 | 1028 | import os
from ..base import CompilerCommand, CSS_PROPERTY, CSS_STATIC_DIR
class Command(CompilerCommand):
static_dir = CSS_STATIC_DIR
module_property = CSS_PROPERTY
def queue_file(self, fname, module):
return self.test_file_age(fname, ''.join([os.path.splitext(fname)[0], '.css']))
def test_file(self, name, item):
stdout, _, _ = self.get_output("recess", item)
failed = " error" in stdout or "Error" in stdout
if failed:
print ""
print stdout
return not failed
def compile_file(self, name, item):
parts = os.path.splitext(item)
css_file = ''.join([parts[0], '.css'])
min_css = ''.join([parts[0], '.min.css'])
css_out, _, _ = self.get_output("recess", "--compile", item)
min_out, _, _ = self.get_output("recess", "--compress", item)
with open(css_file, 'w') as fh:
fh.write(css_out)
with open(min_css, 'w') as fh:
fh.write(min_out)
return True | gpl-2.0 | 8,837,112,577,709,586,000 | 31.15625 | 87 | 0.561284 | false |
google/material-design-icons | update/venv/lib/python3.9/site-packages/setuptools/command/install_lib.py | 5 | 5023 | import os
import sys
from itertools import product, starmap
import distutils.command.install_lib as orig
class install_lib(orig.install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def initialize_options(self):
orig.install_lib.initialize_options(self)
self.multiarch = None
self.install_layout = None
def finalize_options(self):
orig.install_lib.finalize_options(self)
self.set_undefined_options('install',('install_layout','install_layout'))
if self.install_layout == 'deb' and sys.version_info[:2] >= (3, 3):
import sysconfig
self.multiarch = sysconfig.get_config_var('MULTIARCH')
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
"""
Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations.
"""
all_packages = (
pkg
for ns_pkg in self._get_SVEM_NSPs()
for pkg in self._all_packages(ns_pkg)
)
excl_specs = product(all_packages, self._gen_exclusion_paths())
return set(starmap(self._exclude_pkg_path, excl_specs))
def _exclude_pkg_path(self, pkg, exclusion_path):
"""
Given a package name and exclusion path within that package,
compute the full exclusion path.
"""
parts = pkg.split('.') + [exclusion_path]
return os.path.join(self.install_dir, *parts)
@staticmethod
def _all_packages(pkg_name):
"""
>>> list(install_lib._all_packages('foo.bar.baz'))
['foo.bar.baz', 'foo.bar', 'foo']
"""
while pkg_name:
yield pkg_name
pkg_name, sep, child = pkg_name.rpartition('.')
def _get_SVEM_NSPs(self):
"""
Get namespace packages (list) but only for
single_version_externally_managed installations and empty otherwise.
"""
# TODO: is it necessary to short-circuit here? i.e. what's the cost
# if get_finalized_command is called even when namespace_packages is
# False?
if not self.distribution.namespace_packages:
return []
install_cmd = self.get_finalized_command('install')
svem = install_cmd.single_version_externally_managed
return self.distribution.namespace_packages if svem else []
@staticmethod
def _gen_exclusion_paths():
"""
Generate file paths to be excluded for namespace packages (bytecode
cache files).
"""
# always exclude the package module itself
yield '__init__.py'
yield '__init__.pyc'
yield '__init__.pyo'
if not hasattr(sys, 'implementation'):
return
base = os.path.join('__pycache__', '__init__.' + sys.implementation.cache_tag)
yield base + '.pyc'
yield base + '.pyo'
yield base + '.opt-1.pyc'
yield base + '.opt-2.pyc'
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
import distutils.dir_util
distutils.dir_util._multiarch = self.multiarch
return orig.install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
if self.multiarch:
import sysconfig
ext_suffix = sysconfig.get_config_var ('EXT_SUFFIX')
if ext_suffix.endswith(self.multiarch + ext_suffix[-3:]):
new_suffix = None
else:
new_suffix = "%s-%s%s" % (ext_suffix[:-3], self.multiarch, ext_suffix[-3:])
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",
dst)
return False
if self.multiarch and new_suffix and dst.endswith(ext_suffix) and not dst.endswith(new_suffix):
dst = dst.replace(ext_suffix, new_suffix)
log.info("renaming extension to %s", os.path.basename(dst))
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = orig.install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
| apache-2.0 | 6,992,083,474,491,409,000 | 33.170068 | 107 | 0.589289 | false |
stwunsch/gnuradio | gr-audio/examples/python/audio_fft.py | 68 | 4596 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru, audio
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import stdgui2, fftsink2, waterfallsink2, scopesink2, form, slider
from optparse import OptionParser
import wx
import sys
class app_top_block(stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__(self, frame, panel, vbox, argv)
self.frame = frame
self.panel = panel
parser = OptionParser(option_class=eng_option)
parser.add_option("-W", "--waterfall", action="store_true", default=False,
help="Enable waterfall display")
parser.add_option("-S", "--oscilloscope", action="store_true", default=False,
help="Enable oscilloscope display")
parser.add_option("-I", "--audio-input", type="string", default="",
help="pcm input device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args()
sample_rate = int(options.sample_rate)
if len(args) != 0:
parser.print_help()
sys.exit(1)
self.show_debug_info = True
# build the graph
if options.waterfall:
self.scope = \
waterfallsink2.waterfall_sink_f (panel, fft_size=1024, sample_rate=sample_rate)
elif options.oscilloscope:
self.scope = scopesink2.scope_sink_f(panel, sample_rate=sample_rate)
else:
self.scope = fftsink2.fft_sink_f (panel, fft_size=1024, sample_rate=sample_rate, fft_rate=30,
ref_scale=1.0, ref_level=0, y_divs=12)
self.src = audio.source (sample_rate, options.audio_input)
self.connect(self.src, self.scope)
self._build_gui(vbox)
# set initial values
def _set_status_msg(self, msg):
self.frame.GetStatusBar().SetStatusText(msg, 0)
def _build_gui(self, vbox):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
vbox.Add(self.scope.win, 10, wx.EXPAND)
#self._build_subpanel(vbox)
def _build_subpanel(self, vbox_arg):
# build a secondary information panel (sometimes hidden)
# FIXME figure out how to have this be a subpanel that is always
# created, but has its visibility controlled by foo.Show(True/False)
def _form_set_decim(kv):
return self.set_decim(kv['decim'])
if not(self.show_debug_info):
return
panel = self.panel
vbox = vbox_arg
myform = self.myform
#panel = wx.Panel(self.panel, -1)
#vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['decim'] = form.int_field(
parent=panel, sizer=hbox, label="Decim",
callback=myform.check_input_and_call(_form_set_decim, self._set_status_msg))
hbox.Add((5,0), 1)
myform['fs@usb'] = form.static_float_field(
parent=panel, sizer=hbox, label="Fs@USB")
hbox.Add((5,0), 1)
myform['dbname'] = form.static_text_field(
parent=panel, sizer=hbox)
hbox.Add((5,0), 1)
myform['baseband'] = form.static_float_field(
parent=panel, sizer=hbox, label="Analog BB")
hbox.Add((5,0), 1)
myform['ddc'] = form.static_float_field(
parent=panel, sizer=hbox, label="DDC")
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
def main ():
app = stdgui2.stdapp(app_top_block, "Audio FFT", nstatus=1)
app.MainLoop()
if __name__ == '__main__':
main ()
| gpl-3.0 | -2,957,092,252,860,519,000 | 32.304348 | 105 | 0.618799 | false |
FEniCS/dolfin | site-packages/dolfin_utils/documentation/extractdocumentation.py | 1 | 7632 | # -*- coding: utf-8 -*-
"""Script to extract documentation from docstrings in *.h files in the DOLFIN
source tree."""
# Copyright (C) 2010 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Kristian B. Oelgaard, 2011.
# Modified by Marie E. Rognes, 2011.
# Modified by Anders E. Johansen, 2011.
#
# First added: 2010-08-26
# Last changed: 2011-07-10
import os, io
def extract_documentation(dolfin_dir, header, module):
"Extract documentation for given header in given module"
# print "Extracting documentation for %s..." % header
# List of classes with documentation
classnames = []
documentation = []
# Class name and parent class name
classname = None
parents = None
# Comment and signature
comment = None
signature = None
# Indentation of signatures
indent = 0
# Iterate over each line
f = io.open(os.path.join(dolfin_dir, "dolfin", module, header), encoding="utf-8")
for line in f:
# Check for comment
if "///" in line:
# We may have either "///" and "/// "
if "/// " in line:
c = line.split("/// ")[1].rstrip()
else:
c = line.split("///")[1].rstrip()
# Found start of new comment
if comment is None:
comment = c
# Continuing comment on next line
else:
comment += "\n" + c
# Check for class
# If anything goes wrong, remove the last '<' after template.
elif " class " in line and not ";" in line and not "//" in line and not "template<" in line:
# Get class name and parent
classname = line.split(" class ")[1].split(":")[0].strip()
if "public" in line:
# Strip of each parent of additional commas, blanks
# and newlines
parents = [p.strip(", \n") for p in line.split("public")[1:]]
# Remove virtual modifier
parents = [p.replace("virtual ", "") for p in parents]
# Store documentation
# TODO: KBO: we don't check if a given classname is in the dolfin
# namepace.
classnames.append(classname)
documentation.append((classname, parents, comment, []))
classname = None
parents = None
comment = None
# If we encounter a '//' commented line we reset comment and proceed
# This means that '///' and '//' should not be mixed when documenting
# functions.
elif line.lstrip()[0:2] == "//" and not line.lstrip()[0:3] == "///":
comment = None
continue
# Check for function signature
elif comment is not None:
s = line.strip()
# Found start of new signature
if signature is None:
signature = s
#indent = (len(s.split("(")[0]) + 1)*" "
# Continuing signature on next line
else:
#signature += "\n" + indent + s
signature += " " + s
# Signature ends when we find ";" or "{"
if ";" in s or "{" in s:
# Strip out last part
signature = signature.split(";")[0]
signature = signature.split("{")[0]
signature = signature.strip()
# Remove stuff Sphinx can't handle
signature = signature.replace("virtual ", "")
signature = signature.replace("inline ", "")
# Remove ": stuff" for constructors
new_s = []
for l in signature.split("::"):
if not ":" in l:
new_s.append(l)
else:
new_s.append(l.split(":")[0])
break
signature = "::".join(new_s).strip()
# Remove template stuff (not handled correctly by Sphinx)
# e.g., 'template <typename Kernel> CGAL::Bbox_3 bbox()' in mesh/Point.h
if "template" in signature:
signature = ">".join(signature.split(">")[1:]).lstrip()
# Only handle functions or enums, i.e. signatures that contain
# '(' (or ')'). This is to avoid picking up data members.
# NOTE, KBO: Should we also skip private functions?
if not "(" in signature and not "enum" in signature:
# Reset comment and signature
comment = None
signature = None
continue
# Skip destructors (not handled by Sphinx)
destructor = "~" in signature
# Get function name
#function = signature.split("(")[0].split(" ")[-1]
# Store documentation
if len(documentation) > 0 and not destructor:
documentation[-1][-1].append((signature, comment))
elif not destructor:
documentation = [(None, None, None, [(signature, comment)])]
# Reset comment and signature
comment = None
signature = None
# Close file
f.close()
# Sort documentation alphabetically within each class
# for (classname, parent, comment, function_documentation) in documentation:
# function_documentation.sort()
return documentation, classnames
def extract_doc_representation(dolfin_dir):
# Extract modules from dolfin.h
modules = []
f = open(os.path.join(dolfin_dir, "dolfin", "dolfin.h"))
for line in f:
if line.startswith("#include <dolfin/"):
module = line.split("/")[1]
modules += [module]
f.close()
# Iterate over modules
documentation = {}
classnames = []
for module in modules:
# if not module == "la":
# continue
# Extract header files from dolfin_foo.h
f = open(os.path.join(dolfin_dir, "dolfin", module, "dolfin_%s.h" % module))
documentation[module] = []
for line in f:
# Generate documentation for header file
if line.startswith("#include <dolfin/"):
header = line.split("/")[2].split(">")[0]
# Skip version.h (generated from version.h.in via CMake)
if header == "version.h":
continue
# if not header == "GenericTensor.h":
# continue
doc, cls = extract_documentation(dolfin_dir, header, module)
documentation[module].append((header, doc))
classnames += cls
return documentation, classnames
if __name__ == "__main__":
docs, classes = extract_doc_representation()
# for c in classes:
# print c
# for key, doc in docs.items():
# for header, cont in doc:
# print cont
| lgpl-3.0 | -1,260,720,273,082,587,600 | 33.378378 | 100 | 0.539308 | false |
opennode/nodeconductor-openstack | src/waldur_openstack/openstack_tenant/tests/unittests/test_handlers.py | 1 | 18087 | from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from waldur_core.core.models import StateMixin
from waldur_core.cost_tracking import models as cost_tracking_models
from waldur_core.structure import models as structure_models
from waldur_core.structure.tests import factories as structure_factories
from waldur_openstack.openstack.tests import factories as openstack_factories
from .. import factories
from ... import models, apps, PriceItemTypes
class BaseServicePropertyTest(TestCase):
def setUp(self):
self.tenant = openstack_factories.TenantFactory()
self.service_settings = structure_models.ServiceSettings.objects.get(
scope=self.tenant,
type=apps.OpenStackTenantConfig.service_name)
class SecurityGroupHandlerTest(BaseServicePropertyTest):
def setUp(self):
super(SecurityGroupHandlerTest, self).setUp()
def test_security_group_create(self):
openstack_security_group = openstack_factories.SecurityGroupFactory(
tenant=self.tenant,
state=StateMixin.States.CREATING
)
openstack_security_rule = openstack_factories.SecurityGroupRuleFactory(security_group=openstack_security_group)
self.assertEqual(models.SecurityGroup.objects.count(), 0)
openstack_security_group.set_ok()
openstack_security_group.save()
self.assertEqual(models.SecurityGroup.objects.count(), 1)
self.assertTrue(models.SecurityGroup.objects.filter(
settings=self.service_settings,
backend_id=openstack_security_group.backend_id
).exists())
security_group_property = models.SecurityGroup.objects.get(settings=self.service_settings,
backend_id=openstack_security_group.backend_id)
self.assertTrue(security_group_property.rules.filter(backend_id=openstack_security_rule.backend_id).exists())
def test_security_group_update(self):
openstack_security_group = openstack_factories.SecurityGroupFactory(
tenant=self.tenant,
name='New name',
description='New description',
state=StateMixin.States.UPDATING
)
security_group = factories.SecurityGroupFactory(
settings=self.service_settings,
backend_id=openstack_security_group.backend_id
)
openstack_security_group.set_ok()
openstack_security_group.save()
security_group.refresh_from_db()
self.assertIn(openstack_security_group.name, security_group.name)
self.assertIn(openstack_security_group.description, security_group.description)
def test_security_group_rules_are_updated_when_one_more_rule_is_added(self):
openstack_security_group = openstack_factories.SecurityGroupFactory(
tenant=self.tenant,
state=StateMixin.States.UPDATING
)
openstack_factories.SecurityGroupRuleFactory(security_group=openstack_security_group)
security_group = factories.SecurityGroupFactory(
settings=self.service_settings,
backend_id=openstack_security_group.backend_id
)
openstack_security_group.set_ok()
openstack_security_group.save()
self.assertEqual(security_group.rules.count(), 1, 'Security group rule has not been added')
self.assertEqual(security_group.rules.first().protocol, openstack_security_group.rules.first().protocol)
self.assertEqual(security_group.rules.first().from_port, openstack_security_group.rules.first().from_port)
self.assertEqual(security_group.rules.first().to_port, openstack_security_group.rules.first().to_port)
def test_security_group_is_deleted_when_openstack_security_group_is_deleted(self):
openstack_security_group = openstack_factories.SecurityGroupFactory(tenant=self.tenant)
factories.SecurityGroupFactory(settings=self.service_settings, backend_id=openstack_security_group.backend_id)
openstack_security_group.delete()
self.assertEqual(models.SecurityGroup.objects.count(), 0)
def test_if_security_group_already_exists_duplicate_is_not_created(self):
"""
Consider the following case: there are two objects:
security group as a property and security group as a resource.
Property has been created by pull_security_groups method.
When resource switches state, property should be created too via signal handler.
But as security group already exists as a property it should not be created twice,
because otherwise it violates uniqueness constraint.
"""
security_group = factories.SecurityGroupFactory(
settings=self.service_settings,
backend_id='backend_id',
)
openstack_security_group = openstack_factories.SecurityGroupFactory(
tenant=self.tenant,
state=StateMixin.States.CREATING,
backend_id=security_group.backend_id,
)
openstack_security_group.set_ok()
openstack_security_group.save()
self.assertEqual(models.SecurityGroup.objects.count(), 1)
class FloatingIPHandlerTest(BaseServicePropertyTest):
def setUp(self):
super(FloatingIPHandlerTest, self).setUp()
def test_floating_ip_create(self):
openstack_floating_ip = openstack_factories.FloatingIPFactory(
tenant=self.tenant,
state=StateMixin.States.CREATING
)
self.assertEqual(models.FloatingIP.objects.count(), 0)
openstack_floating_ip.set_ok()
openstack_floating_ip.save()
self.assertEqual(models.FloatingIP.objects.count(), 1)
def test_floating_ip_is_not_created_if_it_already_exists(self):
factories.FloatingIPFactory(
settings=self.service_settings,
backend_id='VALID_BACKEND_ID'
)
openstack_floating_ip = openstack_factories.FloatingIPFactory(
tenant=self.tenant,
state=StateMixin.States.CREATING,
backend_id='VALID_BACKEND_ID',
)
self.assertEqual(models.FloatingIP.objects.count(), 1)
openstack_floating_ip.set_ok()
openstack_floating_ip.save()
self.assertEqual(models.FloatingIP.objects.count(), 1)
def test_floating_ip_update(self):
openstack_floating_ip = openstack_factories.FloatingIPFactory(
tenant=self.tenant,
name='New name',
state=StateMixin.States.UPDATING
)
floating_ip = factories.FloatingIPFactory(
settings=self.service_settings,
backend_id=openstack_floating_ip.backend_id,
)
openstack_floating_ip.set_ok()
openstack_floating_ip.save()
floating_ip.refresh_from_db()
self.assertEqual(openstack_floating_ip.name, floating_ip.name)
self.assertEqual(openstack_floating_ip.address, floating_ip.address)
self.assertEqual(openstack_floating_ip.runtime_state, floating_ip.runtime_state)
self.assertEqual(openstack_floating_ip.backend_network_id, floating_ip.backend_network_id)
def test_floating_ip_delete(self):
openstack_floating_ip = openstack_factories.FloatingIPFactory(tenant=self.tenant)
factories.FloatingIPFactory(settings=self.service_settings, backend_id=openstack_floating_ip.backend_id)
openstack_floating_ip.delete()
self.assertEqual(models.FloatingIP.objects.count(), 0)
class TenantChangeCredentialsTest(TestCase):
def test_service_settings_password_and_username_are_updated_when_tenant_user_password_changes(self):
tenant = openstack_factories.TenantFactory()
service_settings = structure_models.ServiceSettings.objects.first()
service_settings.scope = tenant
service_settings.password = tenant.user_password
service_settings.save()
new_password = 'new_password'
new_username = 'new_username'
tenant.user_password = new_password
tenant.user_username = new_username
tenant.save()
service_settings.refresh_from_db()
self.assertEqual(service_settings.password, new_password)
self.assertEqual(service_settings.username, new_username)
class NetworkHandlerTest(BaseServicePropertyTest):
def setUp(self):
super(NetworkHandlerTest, self).setUp()
def test_network_create(self):
openstack_network = openstack_factories.NetworkFactory(
tenant=self.tenant, state=StateMixin.States.CREATING)
self.assertEqual(models.Network.objects.count(), 0)
openstack_network.set_ok()
openstack_network.save()
self.assertTrue(models.Network.objects.filter(backend_id=openstack_network.backend_id).exists())
def test_network_update(self):
openstack_network = openstack_factories.NetworkFactory(
tenant=self.tenant,
name='New network name',
state=StateMixin.States.UPDATING
)
network = factories.NetworkFactory(
settings=self.service_settings,
backend_id=openstack_network.backend_id,
)
openstack_network.set_ok()
openstack_network.save()
network.refresh_from_db()
self.assertEqual(openstack_network.name, network.name)
self.assertEqual(openstack_network.is_external, network.is_external)
self.assertEqual(openstack_network.type, network.type)
self.assertEqual(openstack_network.segmentation_id, network.segmentation_id)
self.assertEqual(openstack_network.backend_id, network.backend_id)
def test_network_delete(self):
openstack_network = openstack_factories.NetworkFactory(tenant=self.tenant)
factories.NetworkFactory(settings=self.service_settings, backend_id=openstack_network.backend_id)
openstack_network.delete()
self.assertEqual(models.Network.objects.count(), 0)
class SubNetHandlerTest(BaseServicePropertyTest):
def setUp(self):
super(SubNetHandlerTest, self).setUp()
self.openstack_network = openstack_factories.NetworkFactory(tenant=self.tenant)
self.network = factories.NetworkFactory(
settings=self.service_settings,
backend_id=self.openstack_network.backend_id
)
def test_subnet_create(self):
openstack_subnet = openstack_factories.SubNetFactory(
network=self.openstack_network,
state=StateMixin.States.CREATING
)
self.assertEqual(models.SubNet.objects.count(), 0)
openstack_subnet.set_ok()
openstack_subnet.save()
self.assertTrue(models.SubNet.objects.filter(backend_id=openstack_subnet.backend_id).exists())
def test_subnet_update(self):
openstack_subnet = openstack_factories.SubNetFactory(
network=self.openstack_network,
name='New subnet name',
state=StateMixin.States.UPDATING
)
subnet = factories.SubNetFactory(
network=self.network,
settings=self.service_settings,
backend_id=openstack_subnet.backend_id,
)
openstack_subnet.set_ok()
openstack_subnet.save()
subnet.refresh_from_db()
self.assertEqual(openstack_subnet.name, subnet.name)
self.assertEqual(openstack_subnet.cidr, subnet.cidr)
self.assertEqual(openstack_subnet.gateway_ip, subnet.gateway_ip)
self.assertEqual(openstack_subnet.allocation_pools, subnet.allocation_pools)
self.assertEqual(openstack_subnet.ip_version, subnet.ip_version)
self.assertEqual(openstack_subnet.enable_dhcp, subnet.enable_dhcp)
self.assertEqual(openstack_subnet.dns_nameservers, subnet.dns_nameservers)
def test_subnet_delete(self):
openstack_subnet = openstack_factories.SubNetFactory(network__tenant=self.tenant)
factories.SubNetFactory(settings=self.service_settings, backend_id=openstack_subnet.backend_id)
openstack_subnet.delete()
self.assertEqual(models.SubNet.objects.count(), 0)
class ServiceSettingsCertificationHandlerTest(TestCase):
def test_openstack_tenant_service_certifications_are_update_when_tenant_settings_certification_are_added(self):
tenant = openstack_factories.TenantFactory()
tenant_service1 = factories.OpenStackTenantServiceFactory(settings__scope=tenant)
tenant_service2 = factories.OpenStackTenantServiceFactory(settings__scope=tenant)
self.assertEqual(tenant_service1.settings.certifications.count(), 0)
self.assertEqual(tenant_service2.settings.certifications.count(), 0)
new_certification = structure_factories.ServiceCertificationFactory()
tenant.service_project_link.service.settings.certifications.add(new_certification)
self.assertTrue(tenant_service1.settings.certifications.filter(pk__in=[new_certification.pk]).exists())
self.assertTrue(tenant_service2.settings.certifications.filter(pk__in=[new_certification.pk]).exists())
def test_openstack_tenant_service_certifications_are_removed_if_tenant_settings_certifications_are_removed(self):
tenant = openstack_factories.TenantFactory()
tenant_service = factories.OpenStackTenantServiceFactory(settings__scope=tenant)
new_certification = structure_factories.ServiceCertificationFactory()
tenant.service_project_link.service.settings.certifications.add(new_certification)
self.assertEqual(tenant_service.settings.certifications.count(), 1)
tenant.service_project_link.service.settings.certifications.clear()
self.assertEqual(tenant.service_project_link.service.settings.certifications.count(), 0)
self.assertEquals(tenant_service.settings.certifications.count(), 0)
class CopyCertificationsTest(TestCase):
def test_openstack_tenant_settings_certifications_are_copied_from_openstack_settings(self):
tenant = openstack_factories.TenantFactory()
certifications = structure_factories.ServiceCertificationFactory.create_batch(2)
tenant.service_project_link.service.settings.certifications.add(*certifications)
settings = factories.OpenStackTenantServiceSettingsFactory(scope=tenant)
certifications_pk = [c.pk for c in certifications]
self.assertEqual(settings.certifications.filter(pk__in=certifications_pk).count(), 2)
def test_openstack_tenant_settings_certifications_are_not_copied_on_update(self):
tenant = openstack_factories.TenantFactory()
certification = structure_factories.ServiceCertificationFactory()
tenant.service_project_link.service.settings.certifications.add(certification)
settings = factories.OpenStackTenantServiceSettingsFactory(scope=tenant)
self.assertEquals(settings.certifications.count(), 1)
settings.name = 'new_name'
settings.save()
self.assertEquals(settings.certifications.count(), 1)
self.assertEquals(settings.certifications.first().pk, certification.pk)
def test_openstack_tenant_settings_certifications_are_not_copied_if_scope_is_not_tenant(self):
instance = factories.InstanceFactory()
certification = structure_factories.ServiceCertificationFactory()
instance.service_project_link.service.settings.certifications.add(certification)
settings = factories.OpenStackTenantServiceSettingsFactory(scope=instance)
self.assertFalse(settings.certifications.exists())
def test_openstack_tenant_settings_certifications_are_not_copied_if_scope_is_None(self):
settings = factories.OpenStackTenantServiceSettingsFactory(scope=None)
self.assertFalse(settings.certifications.exists())
class CreateServiceFromTenantTest(TestCase):
def test_service_is_created_on_tenant_creation(self):
tenant = openstack_factories.TenantFactory()
self.assertTrue(structure_models.ServiceSettings.objects.filter(scope=tenant).exists())
service_settings = structure_models.ServiceSettings.objects.get(
scope=tenant,
type=apps.OpenStackTenantConfig.service_name,
)
self.assertEquals(service_settings.name, tenant.name)
self.assertEquals(service_settings.customer, tenant.service_project_link.project.customer)
self.assertEquals(service_settings.username, tenant.user_username)
self.assertEquals(service_settings.password, tenant.user_password)
self.assertEquals(service_settings.domain, tenant.service_project_link.service.settings.domain)
self.assertEquals(service_settings.backend_url, tenant.service_project_link.service.settings.backend_url)
self.assertEquals(service_settings.type, apps.OpenStackTenantConfig.service_name)
self.assertEquals(service_settings.options['tenant_id'], tenant.backend_id)
self.assertEquals(service_settings.options['availability_zone'], tenant.availability_zone)
self.assertTrue(models.OpenStackTenantService.objects.filter(
settings=service_settings,
customer=tenant.service_project_link.project.customer
).exists())
service = models.OpenStackTenantService.objects.get(
settings=service_settings,
customer=tenant.service_project_link.project.customer,
)
self.assertTrue(models.OpenStackTenantServiceProjectLink.objects.filter(
service=service,
project=tenant.service_project_link.project,
).exists())
class FlavorPriceListItemTest(TestCase):
def setUp(self):
self.flavor = factories.FlavorFactory()
self.content_type = ContentType.objects.get_for_model(models.Instance)
def test_price_list_item_is_created_on_flavor_creation(self):
cost_tracking_models.DefaultPriceListItem.objects.get(
resource_content_type=self.content_type,
item_type=PriceItemTypes.FLAVOR,
key=self.flavor.name,
)
| mit | 4,625,497,592,013,013,000 | 42.900485 | 119 | 0.707856 | false |
MayankGo/ec2-api | ec2api/config.py | 1 | 2017 | # Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_db import options
from oslo_log import log
from ec2api import paths
from ec2api import version
CONF = cfg.CONF
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('ec2api.sqlite')
_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
'keystonemiddleware=WARN', 'routes.middleware=WARN',
'stevedore=WARN', 'glanceclient=WARN']
_DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d '
'%(levelname)s %(name)s [%(request_id)s '
'%(user_identity)s] %(instance)s'
'%(message)s')
def parse_args(argv, default_config_files=None):
log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
log.register_options(CONF)
options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='ec2api.sqlite')
cfg.CONF(argv[1:],
project='ec2api',
version=version.version_info.version_string(),
default_config_files=default_config_files)
| apache-2.0 | 7,144,816,583,722,957,000 | 39.34 | 78 | 0.631135 | false |
jaredkoontz/leetcode | Python/design-snake-game.py | 3 | 2231 | # Time: O(1) per move
# Space: O(s), s is the current length of the snake.
from collections import deque
class SnakeGame(object):
def __init__(self, width,height,food):
"""
Initialize your data structure here.
@param width - screen width
@param height - screen height
@param food - A list of food positions
E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
:type width: int
:type height: int
:type food: List[List[int]]
"""
self.__width = width
self.__height = height
self.__score = 0
self.__food = deque(food)
self.__snake = deque([(0, 0)])
self.__direction = {"U":(-1, 0), "L":(0, -1), "R":(0, 1), "D":(1, 0)};
self.__lookup = collections.defaultdict(int)
self.__lookup[(0, 0)] += 1
def move(self, direction):
"""
Moves the snake.
@param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
@return The game's score after the move. Return -1 if game over.
Game over when snake crosses the screen boundary or bites its body.
:type direction: str
:rtype: int
"""
def valid(x, y):
return 0 <= x < self.__height and \
0 <= y < self.__width and \
(x, y) not in self.__lookup
d = self.__direction[direction]
x, y = self.__snake[-1][0] + d[0], self.__snake[-1][1] + d[1]
tail = self.__snake[-1]
self.__lookup[self.__snake[0]] -= 1
if self.__lookup[self.__snake[0]] == 0:
self.__lookup.pop(self.__snake[0])
self.__snake.popleft()
if not valid(x, y):
return -1
elif self.__food and (self.__food[0][0], self.__food[0][1]) == (x, y):
self.__score += 1
self.__food.popleft()
self.__snake.appendleft(tail)
self.__lookup[tail] += 1
self.__snake += (x, y),
self.__lookup[(x, y)] += 1
return self.__score
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction)
| mit | -5,936,794,719,119,591,000 | 34.412698 | 102 | 0.50874 | false |
mikebenfield/scipy | scipy/fftpack/tests/test_import.py | 49 | 1352 | """Test possibility of patching fftpack with pyfftw.
No module source outside of scipy.fftpack should contain an import of
the form `from scipy.fftpack import ...`, so that a simple replacement
of scipy.fftpack by the corresponding fftw interface completely swaps
the two FFT implementations.
Because this simply inspects source files, we only need to run the test
on one version of Python.
"""
import sys
if sys.version_info >= (3, 4):
from pathlib import Path
import re
import tokenize
from numpy.testing import TestCase, assert_, run_module_suite
import scipy
class TestFFTPackImport(TestCase):
def test_fftpack_import(self):
base = Path(scipy.__file__).parent
regexp = r"\s*from.+\.fftpack import .*\n"
for path in base.rglob("*.py"):
if base / "fftpack" in path.parents:
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
assert_(all(not re.fullmatch(regexp, line)
for line in file),
"{0} contains an import from fftpack".format(path))
if __name__ == "__main__":
run_module_suite(argv=sys.argv)
| bsd-3-clause | 7,049,412,135,776,211,000 | 36.555556 | 79 | 0.613166 | false |
watermelo/libcloud | libcloud/test/dns/test_godaddy.py | 14 | 7283 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.dns.drivers.godaddy import GoDaddyDNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_GODADDY
from libcloud.dns.base import Zone, RecordType
class GoDaddyTests(unittest.TestCase):
def setUp(self):
GoDaddyMockHttp.type = None
GoDaddyDNSDriver.connectionCls.conn_classes = (
None, GoDaddyMockHttp)
self.driver = GoDaddyDNSDriver(*DNS_PARAMS_GODADDY)
def assertHasKeys(self, dictionary, keys):
for key in keys:
self.assertTrue(key in dictionary, 'key "%s" not in dictionary' %
(key))
def test_list_zones(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 5)
self.assertEqual(zones[0].id, '177184419')
self.assertEqual(zones[0].domain, 'aperture-platform.com')
def test_ex_check_availability(self):
check = self.driver.ex_check_availability("wazzlewobbleflooble.com")
self.assertEqual(check.available, True)
self.assertEqual(check.price, 14.99)
def test_ex_list_tlds(self):
tlds = self.driver.ex_list_tlds()
self.assertEqual(len(tlds), 331)
self.assertEqual(tlds[0].name, 'academy')
self.assertEqual(tlds[0].type, 'GENERIC')
def test_ex_get_purchase_schema(self):
schema = self.driver.ex_get_purchase_schema('com')
self.assertEqual(schema['id'],
'https://api.godaddy.com/DomainPurchase#')
def test_ex_get_agreements(self):
ags = self.driver.ex_get_agreements('com')
self.assertEqual(len(ags), 1)
self.assertEqual(ags[0].title, 'Domain Name Registration Agreement')
def test_ex_purchase_domain(self):
fixtures = DNSFileFixtures('godaddy')
document = fixtures.load('purchase_request.json')
order = self.driver.ex_purchase_domain(document)
self.assertEqual(order.order_id, 1)
def test_list_records(self):
zone = Zone(id='177184419',
domain='aperture-platform.com',
type='master',
ttl=None,
driver=self.driver)
records = self.driver.list_records(zone)
self.assertEqual(len(records), 14)
self.assertEqual(records[0].type, RecordType.A)
self.assertEqual(records[0].name, '@')
self.assertEqual(records[0].data, '50.63.202.42')
self.assertEqual(records[0].id, '@:A')
def test_get_record(self):
record = self.driver.get_record(
'aperture-platform.com',
'www:A')
self.assertEqual(record.id, 'www:A')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '50.63.202.42')
def test_create_record(self):
zone = Zone(id='177184419',
domain='aperture-platform.com',
type='master',
ttl=None,
driver=self.driver)
record = self.driver.create_record(
zone=zone,
name='www',
type=RecordType.A,
data='50.63.202.42'
)
self.assertEqual(record.id, 'www:A')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '50.63.202.42')
def test_update_record(self):
record = self.driver.get_record(
'aperture-platform.com',
'www:A')
record = self.driver.update_record(
record=record,
name='www',
type=RecordType.A,
data='50.63.202.22'
)
self.assertEqual(record.id, 'www:A')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '50.63.202.22')
def test_get_zone(self):
zone = self.driver.get_zone('aperture-platform.com')
self.assertEqual(zone.id, '177184419')
self.assertEqual(zone.domain, 'aperture-platform.com')
def test_delete_zone(self):
zone = Zone(id='177184419',
domain='aperture-platform.com',
type='master',
ttl=None,
driver=self.driver)
self.driver.delete_zone(zone)
class GoDaddyMockHttp(MockHttp):
fixtures = DNSFileFixtures('godaddy')
def _v1_domains(self, method, url, body, headers):
body = self.fixtures.load('v1_domains.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_aperture_platform_com(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_aperture_platform_com.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_aperture_platform_com_records(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_aperture_platform_com_records.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_available(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_available.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_tlds(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_tlds.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_aperture_platform_com_records_A_www(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_aperture_platform_com_records_A_www.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_purchase_schema_com(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_purchase_schema_com.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_agreements(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_agreements.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_domains_purchase(self, method, url, body, headers):
body = self.fixtures.load('v1_domains_purchase.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | 1,445,131,565,873,339,000 | 39.237569 | 90 | 0.636825 | false |
helinwang/Paddle | paddle/api/test/util.py | 20 | 1752 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
from py_paddle import swig_paddle
def doubleEqual(a, b):
return abs(a - b) < 1e-5
def __readFromFile():
for i in xrange(10002):
label = np.random.randint(0, 9)
sample = np.random.rand(784) + 0.1 * label
yield sample, label
def loadMNISTTrainData(batch_size=100):
if not hasattr(loadMNISTTrainData, "gen"):
generator = __readFromFile()
loadMNISTTrainData.gen = generator
else:
generator = loadMNISTTrainData.gen
args = swig_paddle.Arguments.createArguments(2)
# batch_size = 100
dense_slot = []
id_slot = []
atEnd = False
for _ in xrange(batch_size):
try:
result = generator.next()
dense_slot.extend(result[0])
id_slot.append(result[1])
except StopIteration:
atEnd = True
del loadMNISTTrainData.gen
break
dense_slot = swig_paddle.Matrix.createDense(dense_slot, batch_size, 784)
id_slot = swig_paddle.IVector.create(id_slot)
args.setSlotValue(0, dense_slot)
args.setSlotIds(1, id_slot)
return args, atEnd
| apache-2.0 | 6,792,482,890,558,770,000 | 28.694915 | 76 | 0.667808 | false |
hmendozap/master-arbeit-projects | autosk_dev_test/component/RegDeepNet.py | 1 | 19049 | import numpy as np
import scipy.sparse as sp
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.conditions import EqualsCondition, InCondition
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, Constant
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class RegDeepNet(AutoSklearnRegressionAlgorithm):
def __init__(self, number_epochs, batch_size, num_layers,
dropout_output, learning_rate, solver,
lambda2, random_state=None,
**kwargs):
self.number_epochs = number_epochs
self.batch_size = batch_size
self.num_layers = ord(num_layers) - ord('a')
self.dropout_output = dropout_output
self.learning_rate = learning_rate
self.lambda2 = lambda2
self.solver = solver
# Also taken from **kwargs. Because the assigned
# arguments are the minimum parameters to run
# the iterative net. IMO.
self.lr_policy = kwargs.get("lr_policy", "fixed")
self.momentum = kwargs.get("momentum", 0.99)
self.beta1 = 1 - kwargs.get("beta1", 0.1)
self.beta2 = 1 - kwargs.get("beta2", 0.01)
self.rho = kwargs.get("rho", 0.95)
self.gamma = kwargs.get("gamma", 0.01)
self.power = kwargs.get("power", 1.0)
self.epoch_step = kwargs.get("epoch_step", 1)
# Empty features and shape
self.n_features = None
self.input_shape = None
self.m_issparse = False
self.m_isbinary = False
self.m_ismultilabel = False
self.m_isregression = True
# TODO: Should one add a try-except here?
self.num_units_per_layer = []
self.dropout_per_layer = []
self.activation_per_layer = []
self.weight_init_layer = []
self.std_per_layer = []
self.leakiness_per_layer = []
self.tanh_alpha_per_layer = []
self.tanh_beta_per_layer = []
for i in range(1, self.num_layers):
self.num_units_per_layer.append(int(kwargs.get("num_units_layer_" + str(i), 128)))
self.dropout_per_layer.append(float(kwargs.get("dropout_layer_" + str(i), 0.5)))
self.activation_per_layer.append(kwargs.get("activation_layer_" + str(i), 'relu'))
self.weight_init_layer.append(kwargs.get("weight_init_" + str(i), 'he_normal'))
self.std_per_layer.append(float(kwargs.get("std_layer_" + str(i), 0.005)))
self.leakiness_per_layer.append(float(kwargs.get("leakiness_layer_" + str(i), 1. / 3.)))
self.tanh_alpha_per_layer.append(float(kwargs.get("tanh_alpha_layer_" + str(i), 2. / 3.)))
self.tanh_beta_per_layer.append(float(kwargs.get("tanh_beta_layer_" + str(i), 1.7159)))
self.estimator = None
self.random_state = random_state
def _prefit(self, X, y):
self.batch_size = int(self.batch_size)
self.n_features = X.shape[1]
self.input_shape = (self.batch_size, self.n_features)
assert len(self.num_units_per_layer) == self.num_layers - 1,\
"Number of created layers is different than actual layers"
assert len(self.dropout_per_layer) == self.num_layers - 1,\
"Number of created layers is different than actual layers"
self.num_output_units = 1 # Regression
# Normalize the output
self.mean_y = np.mean(y)
self.std_y = np.std(y)
y = (y - self.mean_y) / self.std_y
if len(y.shape) == 1:
y = y[:, np.newaxis]
self.m_issparse = sp.issparse(X)
return X, y
def fit(self, X, y):
Xf, yf = self._prefit(X, y)
from implementation import FeedForwardNet
self.estimator = FeedForwardNet.FeedForwardNet(batch_size=self.batch_size,
input_shape=self.input_shape,
num_layers=self.num_layers,
num_units_per_layer=self.num_units_per_layer,
dropout_per_layer=self.dropout_per_layer,
activation_per_layer=self.activation_per_layer,
weight_init_per_layer=self.weight_init_layer,
std_per_layer=self.std_per_layer,
leakiness_per_layer=self.leakiness_per_layer,
tanh_alpha_per_layer=self.tanh_alpha_per_layer,
tanh_beta_per_layer=self.tanh_beta_per_layer,
num_output_units=self.num_output_units,
dropout_output=self.dropout_output,
learning_rate=self.learning_rate,
lr_policy=self.lr_policy,
lambda2=self.lambda2,
momentum=self.momentum,
beta1=self.beta1,
beta2=self.beta2,
rho=self.rho,
solver=self.solver,
num_epochs=self.number_epochs,
gamma=self.gamma,
power=self.power,
epoch_step=self.epoch_step,
is_sparse=self.m_issparse,
is_binary=self.m_isbinary,
is_multilabel=self.m_ismultilabel,
is_regression=self.m_isregression,
random_state=self.random_state)
self.estimator.fit(Xf, yf)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
preds = self.estimator.predict(X, self.m_issparse)
return preds * self.std_y + self.mean_y
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X, self.m_issparse)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'reg_feed_nn',
'name': 'Regression Feed Forward Neural Network',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
max_num_layers = 7 # Maximum number of layers coded
# Hacky way to condition layers params based on the number of layers
# 'c'=1, 'd'=2, 'e'=3 ,'f'=4', g ='5', h='6' + output_layer
layer_choices = [chr(i) for i in range(ord('c'), ord('b') + max_num_layers)]
batch_size = UniformIntegerHyperparameter("batch_size",
32, 4096,
log=True,
default=32)
number_epochs = UniformIntegerHyperparameter("number_epochs",
2, 80,
default=5)
num_layers = CategoricalHyperparameter("num_layers",
choices=layer_choices,
default='c')
lr = UniformFloatHyperparameter("learning_rate", 1e-6, 1.0,
log=True,
default=0.01)
l2 = UniformFloatHyperparameter("lambda2", 1e-7, 1e-2,
log=True,
default=1e-4)
dropout_output = UniformFloatHyperparameter("dropout_output",
0.0, 0.99,
default=0.5)
# Define basic hyperparameters and define the config space
# basic means that are independent from the number of layers
cs = ConfigurationSpace()
cs.add_hyperparameter(number_epochs)
cs.add_hyperparameter(batch_size)
cs.add_hyperparameter(num_layers)
cs.add_hyperparameter(lr)
cs.add_hyperparameter(l2)
cs.add_hyperparameter(dropout_output)
# Define parameters with different child parameters and conditions
solver_choices = ["adam", "adadelta", "adagrad",
"sgd", "momentum", "nesterov",
"smorm3s"]
solver = CategoricalHyperparameter(name="solver",
choices=solver_choices,
default="smorm3s")
beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1,
log=True,
default=0.1)
beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1,
log=True,
default=0.01)
rho = UniformFloatHyperparameter("rho", 0.05, 0.99,
log=True,
default=0.95)
momentum = UniformFloatHyperparameter("momentum", 0.3, 0.999,
default=0.9)
# TODO: Add policy based on this sklearn sgd
policy_choices = ['fixed', 'inv', 'exp', 'step']
lr_policy = CategoricalHyperparameter(name="lr_policy",
choices=policy_choices,
default='fixed')
gamma = UniformFloatHyperparameter(name="gamma",
lower=1e-3, upper=1e-1,
default=1e-2)
power = UniformFloatHyperparameter("power",
0.0, 1.0,
default=0.5)
epoch_step = UniformIntegerHyperparameter("epoch_step",
2, 20,
default=5)
cs.add_hyperparameter(solver)
cs.add_hyperparameter(beta1)
cs.add_hyperparameter(beta2)
cs.add_hyperparameter(momentum)
cs.add_hyperparameter(rho)
cs.add_hyperparameter(lr_policy)
cs.add_hyperparameter(gamma)
cs.add_hyperparameter(power)
cs.add_hyperparameter(epoch_step)
# Define parameters that are needed it for each layer
output_activation_choices = ['softmax', 'sigmoid', 'softplus', 'tanh']
activations_choices = ['sigmoid', 'tanh', 'scaledTanh', 'elu', 'relu', 'leaky', 'linear']
weight_choices = ['constant', 'normal', 'uniform',
'glorot_normal', 'glorot_uniform',
'he_normal', 'he_uniform',
'ortogonal', 'sparse']
# Iterate over parameters that are used in each layer
for i in range(1, max_num_layers):
layer_units = UniformIntegerHyperparameter("num_units_layer_" + str(i),
64, 4096,
log=True,
default=128)
cs.add_hyperparameter(layer_units)
layer_dropout = UniformFloatHyperparameter("dropout_layer_" + str(i),
0.0, 0.99,
default=0.5)
cs.add_hyperparameter(layer_dropout)
weight_initialization = CategoricalHyperparameter('weight_init_' + str(i),
choices=weight_choices,
default='he_normal')
cs.add_hyperparameter(weight_initialization)
layer_std = UniformFloatHyperparameter("std_layer_" + str(i),
1e-6, 0.1,
log=True,
default=0.005)
cs.add_hyperparameter(layer_std)
layer_activation = CategoricalHyperparameter("activation_layer_" + str(i),
choices=activations_choices,
default="relu")
cs.add_hyperparameter(layer_activation)
layer_leakiness = UniformFloatHyperparameter('leakiness_layer_' + str(i),
0.01, 0.99,
default=0.3)
cs.add_hyperparameter(layer_leakiness)
layer_tanh_alpha = UniformFloatHyperparameter('tanh_alpha_layer_' + str(i),
0.5, 1.0,
default=2. / 3.)
cs.add_hyperparameter(layer_tanh_alpha)
layer_tanh_beta = UniformFloatHyperparameter('tanh_beta_layer_' + str(i),
1.1, 3.0,
log=True,
default=1.7159)
cs.add_hyperparameter(layer_tanh_beta)
# TODO: Could be in a function in a new module
for i in range(2, max_num_layers):
# Condition layers parameter on layer choice
layer_unit_param = cs.get_hyperparameter("num_units_layer_" + str(i))
layer_cond = InCondition(child=layer_unit_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition dropout parameter on layer choice
layer_dropout_param = cs.get_hyperparameter("dropout_layer_" + str(i))
layer_cond = InCondition(child=layer_dropout_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition weight initialization on layer choice
layer_weight_param = cs.get_hyperparameter("weight_init_" + str(i))
layer_cond = InCondition(child=layer_weight_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition std parameter on weight layer initialization choice
layer_std_param = cs.get_hyperparameter("std_layer_" + str(i))
weight_cond = EqualsCondition(child=layer_std_param,
parent=layer_weight_param,
value='normal')
cs.add_condition(weight_cond)
# Condition activation parameter on layer choice
layer_activation_param = cs.get_hyperparameter("activation_layer_" + str(i))
layer_cond = InCondition(child=layer_activation_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition leakiness on activation choice
layer_leakiness_param = cs.get_hyperparameter("leakiness_layer_" + str(i))
activation_cond = EqualsCondition(child=layer_leakiness_param,
parent=layer_activation_param,
value='leaky')
cs.add_condition(activation_cond)
# Condition tanh on activation choice
layer_tanh_alpha_param = cs.get_hyperparameter("tanh_alpha_layer_" + str(i))
activation_cond = EqualsCondition(child=layer_tanh_alpha_param,
parent=layer_activation_param,
value='scaledTanh')
cs.add_condition(activation_cond)
layer_tanh_beta_param = cs.get_hyperparameter("tanh_beta_layer_" + str(i))
activation_cond = EqualsCondition(child=layer_tanh_beta_param,
parent=layer_activation_param,
value='scaledTanh')
cs.add_condition(activation_cond)
# Conditioning on solver
momentum_depends_on_solver = InCondition(momentum, solver,
values=["momentum", "nesterov"])
beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam")
beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam")
rho_depends_on_solver = EqualsCondition(rho, solver, "adadelta")
cs.add_condition(momentum_depends_on_solver)
cs.add_condition(beta1_depends_on_solver)
cs.add_condition(beta2_depends_on_solver)
cs.add_condition(rho_depends_on_solver)
# Conditioning on learning rate policy
lr_policy_depends_on_solver = InCondition(lr_policy, solver,
["adadelta", "adagrad", "sgd",
"momentum", "nesterov"])
gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy,
values=["inv", "exp", "step"])
power_depends_on_policy = EqualsCondition(power, lr_policy, "inv")
epoch_step_depends_on_policy = EqualsCondition(epoch_step, lr_policy, "step")
cs.add_condition(lr_policy_depends_on_solver)
cs.add_condition(gamma_depends_on_policy)
cs.add_condition(power_depends_on_policy)
cs.add_condition(epoch_step_depends_on_policy)
return cs
| mit | -3,206,617,817,468,268,500 | 50.763587 | 102 | 0.484435 | false |
RohitDas/cubeproject | lib/django/utils/ipv6.py | 26 | 7971 | # This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. https://github.com/google/ipaddr-py
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import range
from django.utils.translation import ugettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Cleans an IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continuous zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message, code='invalid')
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in an expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
return ip_str.rsplit(':', 1)[1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for __ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
| bsd-3-clause | 4,517,284,391,431,646,000 | 28.413284 | 75 | 0.585874 | false |
simplyvikram/google-chartwrapper | templating/djangoproj/views.py | 9 | 1670 | # -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
def example(request):
greek_elections = [
{ 'type': 'Pie3D',
'title': 'Greek Elections 2009',
'data': [43.92, 33.48, 7.54, 5.63, 4.60, 2.53, 2.3],
'labels': 'ΠΑΣΟΚ|ΝΔ|ΚΚΕ|ΛΑΟΣ|ΣΥΡΙΖΑ|Οικολόγοι Πράσινοι|Λοιποί',
'colors': '0ab927|005ac0|ff0000|100077|ffd000|99cc33|888888'
},
{ 'type': 'Pie3D',
'title': 'Greek Elections 2007',
'data': [41.83, 38.10, 8.15, 5.04, 3.80, 1.05, 2.03],
'labels': 'ΝΔ|ΠΑΣΟΚ|ΚΚΕ|ΣΥΡΙΖΑ|ΛΑΟΣ|Οικολόγοι Πράσινοι|Λοιποί',
'colors': '005ac0|0ab927|ff0000|ffd000|100077|99cc33|888888'
},
{ 'type': 'Pie3D',
'title': 'Greek Elections 2004',
'data': [45.4, 40.5, 5.9, 3.3, 2.2, 1.8, 0.9],
'labels': 'ΝΔ|ΠΑΣΟΚ|ΚΚΕ|ΣΥΡΙΖΑ|ΛΑΟΣ|ΔΗΚΚΙ|Λοιποί',
'colors': '005ac0|0ab927|ff0000|ffd000|100077|ff7f00|888888'
}
]
for g in greek_elections:
g['legend'] = map(unicode, g['data'])
return render_to_response('example.html',{
'venndata': [100,80,60,30,30,30,10],
'piedata':[60,40],
'bhgdata':['el','or'],
'20q': ['Animals','Vegetables','Minerals'],
'qrstr':'''To the human eye QR Codes look like hieroglyphics,
but they can be read by any device that has
the appropriate software installed.''',
'temps':'max 25°|min 15°',
'elections': greek_elections
})
| bsd-3-clause | 8,264,279,140,925,557,000 | 37.775 | 75 | 0.533204 | false |
tiagocardosos/stoq | stoqlib/domain/parameter.py | 2 | 2126 | # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2006-2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, http://www.gnu.org/
##
## Author(s): Stoq Team <[email protected]>
##
##
""" Domain classes for handling parameters """
# pylint: enable=E1101
from stoqlib.database.properties import BoolCol, UnicodeCol
from stoqlib.domain.base import Domain
from stoqlib.lib.translation import stoqlib_gettext as _
class ParameterData(Domain):
""" Class to store system parameters.
See also:
`schema <http://doc.stoq.com.br/schema/tables/parameter_data.html>`__
"""
__storm_table__ = 'parameter_data'
#: name of the parameter we want to query on
field_name = UnicodeCol()
#: current result(or value) of this parameter
field_value = UnicodeCol()
#: the item can't be edited through an editor.
is_editable = BoolCol()
def get_group(self):
from stoqlib.lib.parameters import sysparam
return sysparam.get_detail_by_name(self.field_name).group
def get_short_description(self):
from stoqlib.lib.parameters import sysparam
return sysparam.get_detail_by_name(self.field_name).short_desc
def get_field_value(self):
# FIXME: This is a workaround to handle some parameters which are
# locale specific.
if self.field_value:
return _(self.field_value)
return self.field_value
| gpl-2.0 | 3,888,851,263,437,780,500 | 32.21875 | 78 | 0.697554 | false |
develru/InformationDevicePy | modules/weatherdata.py | 1 | 4214 |
"""
Copyright (C) 2016 Richard Schwalk
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from enum import Enum, unique
from PyQt5.QtCore import Qt, QAbstractListModel, QObject, QVariant, QDateTime
@unique
class RoleNames(Enum):
TempRole = Qt.UserRole
DescriptionRole = Qt.UserRole + 1
TimeRole = Qt.UserRole + 2
IconRole = Qt.UserRole + 3
class ForecastDataModel(QAbstractListModel, QObject):
"""Docstring for ForecastDataModel. """
def __init__(self, parent=None):
super(ForecastDataModel, self).__init__(parent)
self._role_names = {
RoleNames.TempRole.value: b'temp',
RoleNames.DescriptionRole.value: b'description',
RoleNames.TimeRole.value: b'time',
RoleNames.IconRole.value: b'icon',
}
self._data = []
def rowCount(self, parent=None, *args, **kwargs):
return len(self._data)
def data(self, QModelIndex, role=None):
row = QModelIndex.row()
if row < 0 or row >= len(self._data):
return QVariant()
if role == RoleNames.IconRole.value:
return self._data[row].icon
elif role == RoleNames.TempRole.value:
return ForecastDataModel.format_temp(self._data[row])
elif role == RoleNames.DescriptionRole.value:
return self._data[row].description
elif role == RoleNames.TimeRole.value:
return ForecastDataModel.format_time(self._data[row])
return QVariant()
def set_all_data(self, data):
self.beginResetModel()
self._data.clear()
self._data = data
self.endResetModel()
@staticmethod
def format_temp(weather):
return '{0} °C / {1} °C'.format(weather.temp_max, weather.temp_min)
@staticmethod
def format_time(weather):
dt = QDateTime.fromTime_t(weather.time)
return dt.toString('dddd')
def roleNames(self):
return self._role_names
class BaseWeatherData:
def __init__(self):
self._description = ''
self._icon = ''
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def icon(self):
return self._icon
@icon.setter
def icon(self, value):
self._icon = value
class CurrentWeatherData(BaseWeatherData):
def __init__(self):
super(CurrentWeatherData, self).__init__()
self._temperature = 0
self._location = ''
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, value):
self._temperature = value
@property
def location_name(self):
return self._location
@location_name.setter
def location_name(self, value):
self._location = value
class WeatherForecastData(BaseWeatherData):
"""Docstring for WeatherForecastData. """
def __init__(self):
"""TODO: to be defined1. """
super(WeatherForecastData, self).__init__()
self._temp_min = 0
self._temp_max = 0
self._time = 0
@property
def temp_min(self):
return self._temp_min
@temp_min.setter
def temp_min(self, value):
self._temp_min = value
@property
def temp_max(self):
return self._temp_max
@temp_max.setter
def temp_max(self, value):
self._temp_max = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
self._time = value
| gpl-3.0 | -1,921,819,125,867,300,000 | 25.658228 | 77 | 0.624881 | false |
tdubourg/downsizing-game | transactions.py | 1 | 10019 | from enum import Enum
from utils import i, d
Resources = Enum("CASH", "VOTE", "TRUST")
class AbstractTransaction(object):
"""Transaction interface"""
last_id = -1
player_1 = None
player_2 = None
@staticmethod
def next_id():
# @TODO thread safety?
AbstractTransaction.last_id += 1
return AbstractTransaction.last_id
def __init__(self):
super(AbstractTransaction, self).__init__()
self._id = AbstractTransaction.next_id()
def is_valid(self, judge):
"""
Is the transaction valid?
:return (bool, AbstractTransaction)
(True, CopyOfTheTransaction) if the transaction is valid
(False, None) if the transaction is invalid
The copy of the transaction is to be used to avoid the transaction being modified between validation
and application
This is a non-abstract method
"""
return self._is_valid(judge)
def apply(self, judge):
"""
Apply the transaction to the players' resources
Abstract method. Has to be overridden by children
"""
raise NotImplementedError()
def clone(self):
"""
Returns a clone of current object. A clone is a 1-to-1 copy
of the current object.
Abstract method
"""
raise NotImplementedError()
def _is_valid(self, judge):
"""
Internal use only. Validates in-place the current transaction
This is not a private method, but a protected abstract one.
It has to be implemented by children, but will be called
by parent's is_valid() method unless is_valid() is overridden
"""
raise NotImplementedError()
def __str__(self):
return "Transaction, id=" + str(self._id)
def get_data(self):
return self.__dict__
class UnidirectionalTransaction(AbstractTransaction):
"""
UnidirectionalTransaction are IMMEDIATE and unidirectional (transfer is done from one player to the other,
no payback).
"""
def __init__(self, player_from, player_to, resource_type, amount):
"""
:param player_from: int
:param player_to: int
:param resource_type: Resource
:param amount: int
"""
super(UnidirectionalTransaction, self).__init__()
self.player_from = player_from
self.player_to = player_to
# Just so that we respect the interface:
self.player_1 = self.player_from
self.player_2 = self.player_to
self.resource_type = resource_type
try:
self.amount = int(amount)
except ValueError:
self.amount = -1 # So that the transaction is invalid
def _is_valid(self, judge):
if self.resource_type not in Resources:
d("Invalid resource type")
return False
if self.amount < 0:
d("Invalid amount")
return False
if not judge.is_valid_player(self.player_from) or not judge.is_valid_player(self.player_to):
d("Invalid player_from or player_to")
return False
if not judge.has_enough_resource(self.player_from, self.resource_type, self.amount):
d("player_from does not have enough resources to pay.")
return False
return True
def apply(self, judge):
i("Transaction", self._id, "is being applied.")
if not judge.clock.has_still(1):
raise Exception("Not enough remaining rounds")
return False
players_resources = judge.game.players_resources
players_resources[self.player_from][self.resource_type] -= self.amount
players_resources[self.player_to][self.resource_type] += self.amount
judge.current_player_transactions += 1
judge.clock.tick()
def clone(self):
return UnidirectionalTransaction(
self.player_from,
self.player_to,
self.resource_type,
self.amount
)
def __str__(self):
return \
super(UnidirectionalTransaction, self).__str__() \
+ "\n\t\t\tdirection=Unidirectional" \
+ "\n\t\t\tplayer_from=" + str(self.player_from) \
+ "\n\t\t\tplayer_to=" + str(self.player_to) \
+ "\n\t\t\tresource_type=" + str(self.resource_type) \
+ "\n\t\t\tamount=" + str(self.amount)
class BidirectionalTransaction(AbstractTransaction):
"""
BidirectionalTransaction are immediate bidirectional transactions. It models a "trade" where
there is a transfer of resources from a player to the other and the other pays this resources
using another resource and thus making a transfer as well.
"""
def __init__(self, player_1, player_2, rtype_1to2, amount_1to2, rtype_2to1, amount_2to1):
super(BidirectionalTransaction, self).__init__()
self.transaction_1to2 = UnidirectionalTransaction(player_1, player_2, rtype_1to2, amount_1to2)
self.transaction_2to1 = UnidirectionalTransaction(player_2, player_1, rtype_2to1, amount_2to1)
# To respect the interface
self.player_1 = player_1
self.player_2 = player_2
def _is_valid(self, judge):
# Note: We already recreated the unidirectional internal transactions so we use the no-copy/in-place
# validation method
return self.transaction_1to2._is_valid(judge) and self.transaction_2to1._is_valid(judge)
def apply(self, judge):
if not judge.clock.has_still(1):
raise Exception("Not enough remaining rounds")
return False
self.transaction_1to2.apply(judge)
self.transaction_2to1.apply(judge)
def clone(self):
return BidirectionalTransaction(
self.transaction_1to2.player_from,
self.transaction_1to2.player_to,
self.transaction_1to2.resource_type,
self.transaction_1to2.amount,
self.transaction_2to1.resource_type,
self.transaction_2to1.amount
)
def __str__(self):
return \
super(BidirectionalTransaction, self).__str__() \
+ "\n\t\tdirection=Bidirectional" \
+ "\n\t\ttransaction_1to2=" + str(self.transaction_1to2) \
+ "\n\t\ttransaction_2to1=" + str(self.transaction_2to1)
def get_data(self):
data = dict(self.__dict__)
data['transaction_1to2'] = self.transaction_1to2.get_data()
data['transaction_2to1'] = self.transaction_2to1.get_data()
return data
class ScheduledUnidirectionalTransaction(UnidirectionalTransaction):
"""
A ScheduledUnidirectionalTransaction is a scheduled transaction, that is unidirectional...
"""
def __init__(self, player_from, player_to, resource_type, amount, deadline):
self._deadline = deadline
super(ScheduledUnidirectionalTransaction, self).__init__(player_from, player_to, resource_type, amount)
def is_valid(self, judge):
# First, execute parent's checks
if not super(ScheduledUnidirectionalTransaction, self).is_valid(judge):
return False
# If nothing went wrong, execute additional checks
# We are going to check that the player can indeed play before the round it specifiedclass ScheduledUnidirectionalTransaction(UnidirectionalTransaction):
return judge.is_valid_delay()
def clone(self):
return ScheduledUnidirectionalTransaction(
self.player_from,
self.player_to,
self.resource_type,
self.amount,
self._deadline
)
def __str__(self):
return \
super(ScheduledUnidirectionalTransaction, self).__str__() \
+ "\n\t\t\tdeadline=" + str(self._deadline)
class ScheduledBidirectionalTransaction(BidirectionalTransaction):
"""
A ScheduledBidirectionalTransaction is a transaction that contains at least one
ScheduledUnidirectionalTransaction
"""
def __init__(self, player_1, player_2, rtype_1to2, amount_1to2, deadline_1to2, rtype_2to1, amount_2to1, deadline_2to1):
if deadline_1to2 is None and deadline_2to1 is None:
raise ValueError("At least one of the deadlines should not be None. At least one of the transactions have to be scheduled")
super(ScheduledBidirectionalTransaction, self).__init__(player_1, player_2, rtype_1to2, amount_1to2, rtype_2to1, amount_2to1)
if deadline_1to2 is not None:
self.transaction_1to2 = ScheduledUnidirectionalTransaction(player_1, player_2, rtype_1to2, amount_1to2, deadline_1to2)
else:
self.transaction_1to2 = UnidirectionalTransaction(player_1, player_2, rtype_1to2, amount_1to2)
if deadline_2to1 is not None:
self.transaction_2to1 = ScheduledUnidirectionalTransaction(player_2, player_1, rtype_2to1, amount_2to1, deadline_2to1)
else:
self.transaction_2to1 = UnidirectionalTransaction(player_2, player_1, rtype_2to1, amount_2to1)
def is_valid(self, judge):
# First, execute parent's checks
if not super(ScheduledBidirectionalTransaction, self).is_valid(judge):
return False
return True
def clone(self):
return ScheduledBidirectionalTransaction(
self.transaction_1to2.player_from,
self.transaction_1to2.player_to,
self.transaction_1to2.resource_type,
self.transaction_1to2.amount,
self.transaction_1to2._deadline \
if isinstance(self.transaction_1to2, ScheduledUnidirectionalTransaction) \
else None,
self.transaction_2to1.resource_type,
self.transaction_2to1.amount,
self.transaction_2to1._deadline \
if isinstance(self.transaction_2to1, ScheduledUnidirectionalTransaction) \
else None,
)
| lgpl-3.0 | -4,558,418,088,270,029,000 | 38.916335 | 161 | 0.628007 | false |
derekgreene/topic-stability | unsupervised/hungarian.py | 5 | 19460 | #!/usr/bin/python
"""
Implementation of the Hungarian (Munkres) Algorithm using Python and NumPy
References: http://www.ams.jhu.edu/~castello/362/Handouts/hungarian.pdf
http://weber.ucsd.edu/~vcrawfor/hungar.pdf
http://en.wikipedia.org/wiki/Hungarian_algorithm
http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
http://www.clapper.org/software/python/munkres/
"""
# Module Information.
__version__ = "1.1.1"
__author__ = "Thom Dedecko"
__url__ = "http://github.com/tdedecko/hungarian-algorithm"
__copyright__ = "(c) 2010 Thom Dedecko"
__license__ = "MIT License"
class HungarianError(Exception):
pass
# Import numpy. Error if fails
try:
import numpy as np
except ImportError:
raise HungarianError("NumPy is not installed.")
class Hungarian:
"""
Implementation of the Hungarian (Munkres) Algorithm using np.
Usage:
hungarian = Hungarian(cost_matrix)
hungarian.calculate()
or
hungarian = Hungarian()
hungarian.calculate(cost_matrix)
Handle Profit matrix:
hungarian = Hungarian(profit_matrix, is_profit_matrix=True)
or
cost_matrix = Hungarian.make_cost_matrix(profit_matrix)
The matrix will be automatically padded if it is not square.
For that numpy's resize function is used, which automatically adds 0's to any row/column that is added
Get results and total potential after calculation:
hungarian.get_results()
hungarian.get_total_potential()
"""
def __init__(self, input_matrix=None, is_profit_matrix=False):
"""
input_matrix is a List of Lists.
input_matrix is assumed to be a cost matrix unless is_profit_matrix is True.
"""
if input_matrix is not None:
# Save input
my_matrix = np.array(input_matrix)
self._input_matrix = np.array(input_matrix)
self._maxColumn = my_matrix.shape[1]
self._maxRow = my_matrix.shape[0]
# Adds 0s if any columns/rows are added. Otherwise stays unaltered
matrix_size = max(self._maxColumn, self._maxRow)
my_matrix.resize(matrix_size, matrix_size)
# Convert matrix to profit matrix if necessary
if is_profit_matrix:
my_matrix = self.make_cost_matrix(my_matrix)
self._cost_matrix = my_matrix
self._size = len(my_matrix)
self._shape = my_matrix.shape
# Results from algorithm.
self._results = []
self._totalPotential = 0
else:
self._cost_matrix = None
def get_results(self):
"""Get results after calculation."""
return self._results
def get_total_potential(self):
"""Returns expected value after calculation."""
return self._totalPotential
def calculate(self, input_matrix=None, is_profit_matrix=False):
"""
Implementation of the Hungarian (Munkres) Algorithm.
input_matrix is a List of Lists.
input_matrix is assumed to be a cost matrix unless is_profit_matrix is True.
"""
# Handle invalid and new matrix inputs.
if input_matrix is None and self._cost_matrix is None:
raise HungarianError("Invalid input")
elif input_matrix is not None:
self.__init__(input_matrix, is_profit_matrix)
result_matrix = self._cost_matrix.copy()
# Step 1: Subtract row mins from each row.
for index, row in enumerate(result_matrix):
result_matrix[index] -= row.min()
# Step 2: Subtract column mins from each column.
for index, column in enumerate(result_matrix.T):
result_matrix[:, index] -= column.min()
# Step 3: Use minimum number of lines to cover all zeros in the matrix.
# If the total covered rows+columns is not equal to the matrix size then adjust matrix and repeat.
total_covered = 0
while total_covered < self._size:
# Find minimum number of lines to cover all zeros in the matrix and find total covered rows and columns.
cover_zeros = CoverZeros(result_matrix)
covered_rows = cover_zeros.get_covered_rows()
covered_columns = cover_zeros.get_covered_columns()
total_covered = len(covered_rows) + len(covered_columns)
# if the total covered rows+columns is not equal to the matrix size then adjust it by min uncovered num (m).
if total_covered < self._size:
result_matrix = self._adjust_matrix_by_min_uncovered_num(result_matrix, covered_rows, covered_columns)
# Step 4: Starting with the top row, work your way downwards as you make assignments.
# Find single zeros in rows or columns.
# Add them to final result and remove them and their associated row/column from the matrix.
expected_results = min(self._maxColumn, self._maxRow)
zero_locations = (result_matrix == 0)
while len(self._results) != expected_results:
# If number of zeros in the matrix is zero before finding all the results then an error has occurred.
if not zero_locations.any():
raise HungarianError("Unable to find results. Algorithm has failed.")
# Find results and mark rows and columns for deletion
matched_rows, matched_columns = self.__find_matches(zero_locations)
# Make arbitrary selection
total_matched = len(matched_rows) + len(matched_columns)
if total_matched == 0:
matched_rows, matched_columns = self.select_arbitrary_match(zero_locations)
# Delete rows and columns
for row in matched_rows:
zero_locations[row] = False
for column in matched_columns:
zero_locations[:, column] = False
# Save Results
self.__set_results(zip(matched_rows, matched_columns))
# Calculate total potential
value = 0
for row, column in self._results:
value += self._input_matrix[row, column]
self._totalPotential = value
@staticmethod
def make_cost_matrix(profit_matrix):
"""
Converts a profit matrix into a cost matrix.
Expects NumPy objects as input.
"""
# subtract profit matrix from a matrix made of the max value of the profit matrix
matrix_shape = profit_matrix.shape
offset_matrix = np.ones(matrix_shape) * profit_matrix.max()
cost_matrix = offset_matrix - profit_matrix
return cost_matrix
def _adjust_matrix_by_min_uncovered_num(self, result_matrix, covered_rows, covered_columns):
"""Subtract m from every uncovered number and add m to every element covered with two lines."""
# Calculate minimum uncovered number (m)
elements = []
for row_index, row in enumerate(result_matrix):
if row_index not in covered_rows:
for index, element in enumerate(row):
if index not in covered_columns:
elements.append(element)
min_uncovered_num = min(elements)
# Add m to every covered element
adjusted_matrix = result_matrix
for row in covered_rows:
adjusted_matrix[row] += min_uncovered_num
for column in covered_columns:
adjusted_matrix[:, column] += min_uncovered_num
# Subtract m from every element
m_matrix = np.ones(self._shape) * min_uncovered_num
adjusted_matrix -= m_matrix
return adjusted_matrix
def __find_matches(self, zero_locations):
"""Returns rows and columns with matches in them."""
marked_rows = np.array([], dtype=int)
marked_columns = np.array([], dtype=int)
# Mark rows and columns with matches
# Iterate over rows
for index, row in enumerate(zero_locations):
row_index = np.array([index])
if np.sum(row) == 1:
column_index, = np.where(row)
marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index,
column_index)
# Iterate over columns
for index, column in enumerate(zero_locations.T):
column_index = np.array([index])
if np.sum(column) == 1:
row_index, = np.where(column)
marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index,
column_index)
return marked_rows, marked_columns
@staticmethod
def __mark_rows_and_columns(marked_rows, marked_columns, row_index, column_index):
"""Check if column or row is marked. If not marked then mark it."""
new_marked_rows = marked_rows
new_marked_columns = marked_columns
if not (marked_rows == row_index).any() and not (marked_columns == column_index).any():
new_marked_rows = np.insert(marked_rows, len(marked_rows), row_index)
new_marked_columns = np.insert(marked_columns, len(marked_columns), column_index)
return new_marked_rows, new_marked_columns
@staticmethod
def select_arbitrary_match(zero_locations):
"""Selects row column combination with minimum number of zeros in it."""
# Count number of zeros in row and column combinations
rows, columns = np.where(zero_locations)
zero_count = []
for index, row in enumerate(rows):
total_zeros = np.sum(zero_locations[row]) + np.sum(zero_locations[:, columns[index]])
zero_count.append(total_zeros)
# Get the row column combination with the minimum number of zeros.
indices = zero_count.index(min(zero_count))
row = np.array([rows[indices]])
column = np.array([columns[indices]])
return row, column
def __set_results(self, result_lists):
"""Set results during calculation."""
# Check if results values are out of bound from input matrix (because of matrix being padded).
# Add results to results list.
for result in result_lists:
row, column = result
if row < self._maxRow and column < self._maxColumn:
new_result = (int(row), int(column))
self._results.append(new_result)
class CoverZeros:
"""
Use minimum number of lines to cover all zeros in the matrix.
Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf
"""
def __init__(self, matrix):
"""
Input a matrix and save it as a boolean matrix to designate zero locations.
Run calculation procedure to generate results.
"""
# Find zeros in matrix
self._zero_locations = (matrix == 0)
self._shape = matrix.shape
# Choices starts without any choices made.
self._choices = np.zeros(self._shape, dtype=bool)
self._marked_rows = []
self._marked_columns = []
# marks rows and columns
self.__calculate()
# Draw lines through all unmarked rows and all marked columns.
self._covered_rows = list(set(range(self._shape[0])) - set(self._marked_rows))
self._covered_columns = self._marked_columns
def get_covered_rows(self):
"""Return list of covered rows."""
return self._covered_rows
def get_covered_columns(self):
"""Return list of covered columns."""
return self._covered_columns
def __calculate(self):
"""
Calculates minimum number of lines necessary to cover all zeros in a matrix.
Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf
"""
while True:
# Erase all marks.
self._marked_rows = []
self._marked_columns = []
# Mark all rows in which no choice has been made.
for index, row in enumerate(self._choices):
if not row.any():
self._marked_rows.append(index)
# If no marked rows then finish.
if not self._marked_rows:
return True
# Mark all columns not already marked which have zeros in marked rows.
num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows()
# If no new marked columns then finish.
if num_marked_columns == 0:
return True
# While there is some choice in every marked column.
while self.__choice_in_all_marked_columns():
# Some Choice in every marked column.
# Mark all rows not already marked which have choices in marked columns.
num_marked_rows = self.__mark_new_rows_with_choices_in_marked_columns()
# If no new marks then Finish.
if num_marked_rows == 0:
return True
# Mark all columns not already marked which have zeros in marked rows.
num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows()
# If no new marked columns then finish.
if num_marked_columns == 0:
return True
# No choice in one or more marked columns.
# Find a marked column that does not have a choice.
choice_column_index = self.__find_marked_column_without_choice()
while choice_column_index is not None:
# Find a zero in the column indexed that does not have a row with a choice.
choice_row_index = self.__find_row_without_choice(choice_column_index)
# Check if an available row was found.
new_choice_column_index = None
if choice_row_index is None:
# Find a good row to accomodate swap. Find its column pair.
choice_row_index, new_choice_column_index = \
self.__find_best_choice_row_and_new_column(choice_column_index)
# Delete old choice.
self._choices[choice_row_index, new_choice_column_index] = False
# Set zero to choice.
self._choices[choice_row_index, choice_column_index] = True
# Loop again if choice is added to a row with a choice already in it.
choice_column_index = new_choice_column_index
def __mark_new_columns_with_zeros_in_marked_rows(self):
"""Mark all columns not already marked which have zeros in marked rows."""
num_marked_columns = 0
for index, column in enumerate(self._zero_locations.T):
if index not in self._marked_columns:
if column.any():
row_indices, = np.where(column)
zeros_in_marked_rows = (set(self._marked_rows) & set(row_indices)) != set([])
if zeros_in_marked_rows:
self._marked_columns.append(index)
num_marked_columns += 1
return num_marked_columns
def __mark_new_rows_with_choices_in_marked_columns(self):
"""Mark all rows not already marked which have choices in marked columns."""
num_marked_rows = 0
for index, row in enumerate(self._choices):
if index not in self._marked_rows:
if row.any():
column_index, = np.where(row)
if column_index in self._marked_columns:
self._marked_rows.append(index)
num_marked_rows += 1
return num_marked_rows
def __choice_in_all_marked_columns(self):
"""Return Boolean True if there is a choice in all marked columns. Returns boolean False otherwise."""
for column_index in self._marked_columns:
if not self._choices[:, column_index].any():
return False
return True
def __find_marked_column_without_choice(self):
"""Find a marked column that does not have a choice."""
for column_index in self._marked_columns:
if not self._choices[:, column_index].any():
return column_index
raise HungarianError(
"Could not find a column without a choice. Failed to cover matrix zeros. Algorithm has failed.")
def __find_row_without_choice(self, choice_column_index):
"""Find a row without a choice in it for the column indexed. If a row does not exist then return None."""
row_indices, = np.where(self._zero_locations[:, choice_column_index])
for row_index in row_indices:
if not self._choices[row_index].any():
return row_index
# All rows have choices. Return None.
return None
def __find_best_choice_row_and_new_column(self, choice_column_index):
"""
Find a row index to use for the choice so that the column that needs to be changed is optimal.
Return a random row and column if unable to find an optimal selection.
"""
row_indices, = np.where(self._zero_locations[:, choice_column_index])
for row_index in row_indices:
column_indices, = np.where(self._choices[row_index])
column_index = column_indices[0]
if self.__find_row_without_choice(column_index) is not None:
return row_index, column_index
# Cannot find optimal row and column. Return a random row and column.
from random import shuffle
shuffle(row_indices)
column_index, = np.where(self._choices[row_indices[0]])
return row_indices[0], column_index[0]
if __name__ == '__main__':
profit_matrix = [
[62, 75, 80, 93, 95, 97],
[75, 80, 82, 85, 71, 97],
[80, 75, 81, 98, 90, 97],
[78, 82, 84, 80, 50, 98],
[90, 85, 85, 80, 85, 99],
[65, 75, 80, 75, 68, 96]]
hungarian = Hungarian(profit_matrix, is_profit_matrix=True)
hungarian.calculate()
print("Expected value:\t\t543")
print("Calculated value:\t", hungarian.get_total_potential()) # = 543
print("Expected results:\n\t[(0, 4), (2, 3), (5, 5), (4, 0), (1, 1), (3, 2)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
cost_matrix = [
[4, 2, 8],
[4, 3, 7],
[3, 1, 6]]
hungarian = Hungarian(cost_matrix)
print('calculating...')
hungarian.calculate()
print("Expected value:\t\t12")
print("Calculated value:\t", hungarian.get_total_potential()) # = 12
print("Expected results:\n\t[(0, 1), (1, 0), (2, 2)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
profit_matrix = [
[62, 75, 80, 93, 0, 97],
[75, 0, 82, 85, 71, 97],
[80, 75, 81, 0, 90, 97],
[78, 82, 0, 80, 50, 98],
[0, 85, 85, 80, 85, 99],
[65, 75, 80, 75, 68, 0]]
hungarian = Hungarian()
hungarian.calculate(profit_matrix, is_profit_matrix=True)
print("Expected value:\t\t523")
print("Calculated value:\t", hungarian.get_total_potential()) # = 523
print("Expected results:\n\t[(0, 3), (2, 4), (3, 0), (5, 2), (1, 5), (4, 1)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
| apache-2.0 | -2,089,404,783,177,320,700 | 39.711297 | 120 | 0.593371 | false |
openai/baselines | baselines/gail/dataset/mujoco_dset.py | 1 | 4448 | '''
Data structure of the input .npz:
the data is save in python dictionary format with keys: 'acs', 'ep_rets', 'rews', 'obs'
the values of each item is a list storing the expert trajectory sequentially
a transition can be: (data['obs'][t], data['acs'][t], data['obs'][t+1]) and get reward data['rews'][t]
'''
from baselines import logger
import numpy as np
class Dset(object):
def __init__(self, inputs, labels, randomize):
self.inputs = inputs
self.labels = labels
assert len(self.inputs) == len(self.labels)
self.randomize = randomize
self.num_pairs = len(inputs)
self.init_pointer()
def init_pointer(self):
self.pointer = 0
if self.randomize:
idx = np.arange(self.num_pairs)
np.random.shuffle(idx)
self.inputs = self.inputs[idx, :]
self.labels = self.labels[idx, :]
def get_next_batch(self, batch_size):
# if batch_size is negative -> return all
if batch_size < 0:
return self.inputs, self.labels
if self.pointer + batch_size >= self.num_pairs:
self.init_pointer()
end = self.pointer + batch_size
inputs = self.inputs[self.pointer:end, :]
labels = self.labels[self.pointer:end, :]
self.pointer = end
return inputs, labels
class Mujoco_Dset(object):
def __init__(self, expert_path, train_fraction=0.7, traj_limitation=-1, randomize=True):
traj_data = np.load(expert_path)
if traj_limitation < 0:
traj_limitation = len(traj_data['obs'])
obs = traj_data['obs'][:traj_limitation]
acs = traj_data['acs'][:traj_limitation]
# obs, acs: shape (N, L, ) + S where N = # episodes, L = episode length
# and S is the environment observation/action space.
# Flatten to (N * L, prod(S))
if len(obs.shape) > 2:
self.obs = np.reshape(obs, [-1, np.prod(obs.shape[2:])])
self.acs = np.reshape(acs, [-1, np.prod(acs.shape[2:])])
else:
self.obs = np.vstack(obs)
self.acs = np.vstack(acs)
self.rets = traj_data['ep_rets'][:traj_limitation]
self.avg_ret = sum(self.rets)/len(self.rets)
self.std_ret = np.std(np.array(self.rets))
if len(self.acs) > 2:
self.acs = np.squeeze(self.acs)
assert len(self.obs) == len(self.acs)
self.num_traj = min(traj_limitation, len(traj_data['obs']))
self.num_transition = len(self.obs)
self.randomize = randomize
self.dset = Dset(self.obs, self.acs, self.randomize)
# for behavior cloning
self.train_set = Dset(self.obs[:int(self.num_transition*train_fraction), :],
self.acs[:int(self.num_transition*train_fraction), :],
self.randomize)
self.val_set = Dset(self.obs[int(self.num_transition*train_fraction):, :],
self.acs[int(self.num_transition*train_fraction):, :],
self.randomize)
self.log_info()
def log_info(self):
logger.log("Total trajectories: %d" % self.num_traj)
logger.log("Total transitions: %d" % self.num_transition)
logger.log("Average returns: %f" % self.avg_ret)
logger.log("Std for returns: %f" % self.std_ret)
def get_next_batch(self, batch_size, split=None):
if split is None:
return self.dset.get_next_batch(batch_size)
elif split == 'train':
return self.train_set.get_next_batch(batch_size)
elif split == 'val':
return self.val_set.get_next_batch(batch_size)
else:
raise NotImplementedError
def plot(self):
import matplotlib.pyplot as plt
plt.hist(self.rets)
plt.savefig("histogram_rets.png")
plt.close()
def test(expert_path, traj_limitation, plot):
dset = Mujoco_Dset(expert_path, traj_limitation=traj_limitation)
if plot:
dset.plot()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--expert_path", type=str, default="../data/deterministic.trpo.Hopper.0.00.npz")
parser.add_argument("--traj_limitation", type=int, default=None)
parser.add_argument("--plot", type=bool, default=False)
args = parser.parse_args()
test(args.expert_path, args.traj_limitation, args.plot)
| mit | 4,034,818,253,619,395,000 | 38.017544 | 104 | 0.590603 | false |
rhelmer/socorro-lib | socorro/lib/context_tools.py | 10 | 1083 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from contextlib import contextmanager
from socorro.lib.util import FakeLogger
#--------------------------------------------------------------------------
@contextmanager
def temp_file_context(raw_dump_path, logger=None):
"""this contextmanager implements conditionally deleting a pathname
at the end of a context if the pathname indicates that it is a temp
file by having the word 'TEMPORARY' embedded in it."""
try:
yield raw_dump_path
finally:
if 'TEMPORARY' in raw_dump_path:
try:
os.unlink(raw_dump_path)
except OSError:
if logger is None:
logger = FakeLogger()
logger.warning(
'unable to delete %s. manual deletion is required.',
raw_dump_path,
exc_info=True
)
| mpl-2.0 | -1,485,149,046,700,728,600 | 35.1 | 75 | 0.566944 | false |
gbbr/superdesk | server/app.py | 8 | 1307 | #!/usr/bin/env python
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
import settings
from superdesk.factory import get_app as superdesk_app
if os.environ.get('NEW_RELIC_LICENSE_KEY'):
try:
import newrelic.agent
newrelic.agent.initialize(os.path.abspath(os.path.join(os.path.dirname(__file__), 'newrelic.ini')))
except ImportError:
pass
def get_app(config=None):
"""App factory.
:param config: configuration that can override config from `settings.py`
:return: a new SuperdeskEve app instance
"""
if config is None:
config = {}
config['APP_ABSPATH'] = os.path.abspath(os.path.dirname(__file__))
for key in dir(settings):
if key.isupper():
config.setdefault(key, getattr(settings, key))
app = superdesk_app(config)
return app
if __name__ == '__main__':
debug = True
host = '0.0.0.0'
port = int(os.environ.get('PORT', '5000'))
app = get_app()
app.run(host=host, port=port, debug=debug, use_reloader=debug)
| agpl-3.0 | -7,921,608,483,047,963,000 | 25.14 | 107 | 0.65723 | false |
murali-munna/scikit-learn | sklearn/learning_curve.py | 110 | 13467 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause | -2,920,845,170,559,162,400 | 41.752381 | 79 | 0.639266 | false |
saisai/phantomjs | src/qt/qtbase/util/local_database/enumdata.py | 102 | 38654 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
##
## $QT_END_LICENSE$
##
#############################################################################
# language_list and country_list reflect the current values of enums in qlocale.h
# If new xml language files are available in CLDR, these languages and countries
# need to be *appended* to this list.
language_list = {
0 : [ "AnyLanguage", " " ],
1 : [ "C", " " ],
2 : [ "Abkhazian", "ab" ],
3 : [ "Oromo", "om" ], # macrolanguage
4 : [ "Afar", "aa" ],
5 : [ "Afrikaans", "af" ],
6 : [ "Albanian", "sq" ], # macrolanguage
7 : [ "Amharic", "am" ],
8 : [ "Arabic", "ar" ], # macrolanguage
9 : [ "Armenian", "hy" ],
10 : [ "Assamese", "as" ],
11 : [ "Aymara", "ay" ], # macrolanguage
12 : [ "Azerbaijani", "az" ], # macrolanguage
13 : [ "Bashkir", "ba" ],
14 : [ "Basque", "eu" ],
15 : [ "Bengali", "bn" ],
16 : [ "Dzongkha", "dz" ],
17 : [ "Bihari", "bh" ],
18 : [ "Bislama", "bi" ],
19 : [ "Breton", "br" ],
20 : [ "Bulgarian", "bg" ],
21 : [ "Burmese", "my" ],
22 : [ "Belarusian", "be" ],
23 : [ "Khmer", "km" ],
24 : [ "Catalan", "ca" ],
25 : [ "Chinese", "zh" ], # macrolanguage
26 : [ "Corsican", "co" ],
27 : [ "Croatian", "hr" ],
28 : [ "Czech", "cs" ],
29 : [ "Danish", "da" ],
30 : [ "Dutch", "nl" ],
31 : [ "English", "en" ],
32 : [ "Esperanto", "eo" ],
33 : [ "Estonian", "et" ], # macrolanguage
34 : [ "Faroese", "fo" ],
35 : [ "Fijian", "fj" ],
36 : [ "Finnish", "fi" ],
37 : [ "French", "fr" ],
38 : [ "Western Frisian", "fy" ],
39 : [ "Gaelic", "gd" ],
40 : [ "Galician", "gl" ],
41 : [ "Georgian", "ka" ],
42 : [ "German", "de" ],
43 : [ "Greek", "el" ],
44 : [ "Greenlandic", "kl" ],
45 : [ "Guarani", "gn" ], # macrolanguage
46 : [ "Gujarati", "gu" ],
47 : [ "Hausa", "ha" ],
48 : [ "Hebrew", "he" ],
49 : [ "Hindi", "hi" ],
50 : [ "Hungarian", "hu" ],
51 : [ "Icelandic", "is" ],
52 : [ "Indonesian", "id" ],
53 : [ "Interlingua", "ia" ],
54 : [ "Interlingue", "ie" ],
55 : [ "Inuktitut", "iu" ], # macrolanguage
56 : [ "Inupiak", "ik" ], # macrolanguage
57 : [ "Irish", "ga" ],
58 : [ "Italian", "it" ],
59 : [ "Japanese", "ja" ],
60 : [ "Javanese", "jv" ],
61 : [ "Kannada", "kn" ],
62 : [ "Kashmiri", "ks" ],
63 : [ "Kazakh", "kk" ],
64 : [ "Kinyarwanda", "rw" ],
65 : [ "Kirghiz", "ky" ],
66 : [ "Korean", "ko" ],
67 : [ "Kurdish", "ku" ], # macrolanguage
68 : [ "Rundi", "rn" ],
69 : [ "Lao", "lo" ],
70 : [ "Latin", "la" ],
71 : [ "Latvian", "lv" ], # macrolanguage
72 : [ "Lingala", "ln" ],
73 : [ "Lithuanian", "lt" ],
74 : [ "Macedonian", "mk" ],
75 : [ "Malagasy", "mg" ], # macrolanguage
76 : [ "Malay", "ms" ], # macrolanguage
77 : [ "Malayalam", "ml" ],
78 : [ "Maltese", "mt" ],
79 : [ "Maori", "mi" ],
80 : [ "Marathi", "mr" ],
81 : [ "Marshallese", "mh" ],
82 : [ "Mongolian", "mn" ], # macrolanguage
83 : [ "Nauru", "na" ],
84 : [ "Nepali", "ne" ], # macrolanguage
85 : [ "NorwegianBokmal", "nb" ],
86 : [ "Occitan", "oc" ],
87 : [ "Oriya", "or" ], # macrolanguage
88 : [ "Pashto", "ps" ], # macrolanguage
89 : [ "Persian", "fa" ], # macrolanguage
90 : [ "Polish", "pl" ],
91 : [ "Portuguese", "pt" ],
92 : [ "Punjabi", "pa" ],
93 : [ "Quechua", "qu" ], # macrolanguage
94 : [ "Romansh", "rm" ],
95 : [ "Romanian", "ro" ],
96 : [ "Russian", "ru" ],
97 : [ "Samoan", "sm" ],
98 : [ "Sango", "sg" ],
99 : [ "Sanskrit", "sa" ],
100 : [ "Serbian", "sr" ],
101 : [ "Ossetic", "os" ],
102 : [ "Southern Sotho", "st" ],
103 : [ "Tswana", "tn" ],
104 : [ "Shona", "sn" ],
105 : [ "Sindhi", "sd" ],
106 : [ "Sinhala", "si" ],
107 : [ "Swati", "ss" ],
108 : [ "Slovak", "sk" ],
109 : [ "Slovenian", "sl" ],
110 : [ "Somali", "so" ],
111 : [ "Spanish", "es" ],
112 : [ "Sundanese", "su" ],
113 : [ "Swahili", "sw" ], # macrolanguage
114 : [ "Swedish", "sv" ],
115 : [ "Sardinian", "sc" ], # macrolanguage
116 : [ "Tajik", "tg" ],
117 : [ "Tamil", "ta" ],
118 : [ "Tatar", "tt" ],
119 : [ "Telugu", "te" ],
120 : [ "Thai", "th" ],
121 : [ "Tibetan", "bo" ],
122 : [ "Tigrinya", "ti" ],
123 : [ "Tongan", "to" ],
124 : [ "Tsonga", "ts" ],
125 : [ "Turkish", "tr" ],
126 : [ "Turkmen", "tk" ],
127 : [ "Tahitian", "ty" ],
128 : [ "Uighur", "ug" ],
129 : [ "Ukrainian", "uk" ],
130 : [ "Urdu", "ur" ],
131 : [ "Uzbek", "uz" ], # macrolanguage
132 : [ "Vietnamese", "vi" ],
133 : [ "Volapuk", "vo" ],
134 : [ "Welsh", "cy" ],
135 : [ "Wolof", "wo" ],
136 : [ "Xhosa", "xh" ],
137 : [ "Yiddish", "yi" ], # macrolanguage
138 : [ "Yoruba", "yo" ],
139 : [ "Zhuang", "za" ], # macrolanguage
140 : [ "Zulu", "zu" ],
141 : [ "NorwegianNynorsk", "nn" ],
142 : [ "Bosnian", "bs" ],
143 : [ "Divehi", "dv" ],
144 : [ "Manx", "gv" ],
145 : [ "Cornish", "kw" ],
146 : [ "Akan", "ak" ], # macrolanguage
147 : [ "Konkani", "kok" ],
148 : [ "Ga", "gaa" ],
149 : [ "Igbo", "ig" ],
150 : [ "Kamba", "kam" ],
151 : [ "Syriac", "syr" ],
152 : [ "Blin", "byn" ],
153 : [ "Geez", "gez" ],
154 : [ "Koro", "kfo" ],
155 : [ "Sidamo", "sid" ],
156 : [ "Atsam", "cch" ],
157 : [ "Tigre", "tig" ],
158 : [ "Jju", "kaj" ],
159 : [ "Friulian", "fur" ],
160 : [ "Venda", "ve" ],
161 : [ "Ewe", "ee" ],
162 : [ "Walamo", "wal" ],
163 : [ "Hawaiian", "haw" ],
164 : [ "Tyap", "kcg" ],
165 : [ "Nyanja", "ny" ],
166 : [ "Filipino", "fil" ],
167 : [ "Swiss German", "gsw" ],
168 : [ "Sichuan Yi", "ii" ],
169 : [ "Kpelle", "kpe" ],
170 : [ "Low German", "nds" ],
171 : [ "South Ndebele", "nr" ],
172 : [ "Northern Sotho", "nso" ],
173 : [ "Northern Sami", "se" ],
174 : [ "Taroko", "trv" ],
175 : [ "Gusii", "guz" ],
176 : [ "Taita", "dav" ],
177 : [ "Fulah", "ff" ], # macrolanguage
178 : [ "Kikuyu", "ki" ],
179 : [ "Samburu", "saq" ],
180 : [ "Sena", "seh" ],
181 : [ "North Ndebele", "nd" ],
182 : [ "Rombo", "rof" ],
183 : [ "Tachelhit", "shi" ],
184 : [ "Kabyle", "kab" ],
185 : [ "Nyankole", "nyn" ],
186 : [ "Bena", "bez" ],
187 : [ "Vunjo", "vun" ],
188 : [ "Bambara", "bm" ],
189 : [ "Embu", "ebu" ],
190 : [ "Cherokee", "chr" ],
191 : [ "Morisyen", "mfe" ],
192 : [ "Makonde", "kde" ],
193 : [ "Langi", "lag" ],
194 : [ "Ganda", "lg" ],
195 : [ "Bemba", "bem" ],
196 : [ "Kabuverdianu", "kea" ],
197 : [ "Meru", "mer" ],
198 : [ "Kalenjin", "kln" ],
199 : [ "Nama", "naq" ],
200 : [ "Machame", "jmc" ],
201 : [ "Colognian", "ksh" ],
202 : [ "Masai", "mas" ],
203 : [ "Soga", "xog" ],
204 : [ "Luyia", "luy" ],
205 : [ "Asu", "asa" ],
206 : [ "Teso", "teo" ],
207 : [ "Saho", "ssy" ],
208 : [ "Koyra Chiini", "khq" ],
209 : [ "Rwa", "rwk" ],
210 : [ "Luo", "luo" ],
211 : [ "Chiga", "cgg" ],
212 : [ "Central Morocco Tamazight", "tzm" ],
213 : [ "Koyraboro Senni", "ses" ],
214 : [ "Shambala", "ksb" ],
215 : [ "Bodo", "brx" ],
216 : [ "Avaric", "av" ],
217 : [ "Chamorro", "ch" ],
218 : [ "Chechen", "ce" ],
219 : [ "Church", "cu" ], # macrolanguage
220 : [ "Chuvash", "cv" ],
221 : [ "Cree", "cr" ], # macrolanguage
222 : [ "Haitian", "ht" ],
223 : [ "Herero", "hz" ],
224 : [ "Hiri Motu", "ho" ],
225 : [ "Kanuri", "kr" ], # macrolanguage
226 : [ "Komi", "kv" ], # macrolanguage
227 : [ "Kongo", "kg" ], # macrolanguage
228 : [ "Kwanyama", "kj" ],
229 : [ "Limburgish", "li" ],
230 : [ "LubaKatanga", "lu" ],
231 : [ "Luxembourgish", "lb" ],
232 : [ "Navaho", "nv" ],
233 : [ "Ndonga", "ng" ],
234 : [ "Ojibwa", "oj" ], # macrolanguage
235 : [ "Pali", "pi" ], # macrolanguage
236 : [ "Walloon", "wa" ],
237 : [ "Aghem", "agq" ],
238 : [ "Basaa", "bas" ],
239 : [ "Zarma", "dje" ],
240 : [ "Duala", "dua" ],
241 : [ "JolaFonyi", "dyo" ],
242 : [ "Ewondo", "ewo" ],
243 : [ "Bafia", "ksf" ],
244 : [ "MakhuwaMeetto", "mgh" ],
245 : [ "Mundang", "mua" ],
246 : [ "Kwasio", "nmg" ],
247 : [ "Nuer", "nus" ],
248 : [ "Sakha", "sah" ],
249 : [ "Sangu", "sbp" ],
250 : [ "Congo Swahili", "swc" ],
251 : [ "Tasawaq", "twq" ],
252 : [ "Vai", "vai" ],
253 : [ "Walser", "wae" ],
254 : [ "Yangben", "yav" ],
255 : [ "Avestan", "ae" ],
256 : [ "Asturian", "ast" ],
257 : [ "Ngomba", "jgo" ],
258 : [ "Kako", "kkj" ],
259 : [ "Meta", "mgo" ],
260 : [ "Ngiemboon", "nnh" ],
261 : [ "Aragonese", "an" ],
262 : [ "Akkadian", "akk" ],
263 : [ "AncientEgyptian", "egy" ],
264 : [ "AncientGreek", "grc" ],
265 : [ "Aramaic", "arc" ],
266 : [ "Balinese", "ban" ],
267 : [ "Bamun", "bax" ],
268 : [ "BatakToba", "bbc" ],
269 : [ "Buginese", "bug" ],
270 : [ "Buhid", "bku" ],
271 : [ "Carian", "xcr" ],
272 : [ "Chakma", "ccp" ],
273 : [ "ClassicalMandaic", "myz" ],
274 : [ "Coptic", "cop" ],
275 : [ "Dogri", "doi" ], # macrolanguage
276 : [ "EasternCham", "cjm" ],
277 : [ "EasternKayah", "eky" ],
278 : [ "Etruscan", "ett" ],
279 : [ "Gothic", "got" ],
280 : [ "Hanunoo", "hnn" ],
281 : [ "Ingush", "inh" ],
282 : [ "LargeFloweryMiao", "hmd" ],
283 : [ "Lepcha", "lep" ],
284 : [ "Limbu", "lif" ],
285 : [ "Lisu", "lis" ],
286 : [ "Lu", "khb" ],
287 : [ "Lycian", "xlc" ],
288 : [ "Lydian", "xld" ],
289 : [ "Mandingo", "man" ], # macrolanguage
290 : [ "Manipuri", "mni" ],
291 : [ "Meroitic", "xmr" ],
292 : [ "NorthernThai", "nod" ],
293 : [ "OldIrish", "sga" ],
294 : [ "OldNorse", "non" ],
295 : [ "OldPersian", "peo" ],
296 : [ "OldTurkish", "otk" ],
297 : [ "Pahlavi", "pal" ],
298 : [ "Parthian", "xpr" ],
299 : [ "Phoenician", "phn" ],
300 : [ "PrakritLanguage", "pra" ],
301 : [ "Rejang", "rej" ],
302 : [ "Sabaean", "xsa" ],
303 : [ "Samaritan", "smp" ],
304 : [ "Santali", "sat" ],
305 : [ "Saurashtra", "saz" ],
306 : [ "Sora", "srb" ],
307 : [ "Sylheti", "syl" ],
308 : [ "Tagbanwa", "tbw" ],
309 : [ "TaiDam", "blt" ],
310 : [ "TaiNua", "tdd" ],
311 : [ "Ugaritic", "uga" ],
312 : [ "Akoose", "bss" ],
313 : [ "Lakota", "lkt" ],
314 : [ "Standard Moroccan Tamazight", "zgh" ]
}
country_list = {
0 : [ "AnyCountry", "ZZ" ],
1 : [ "Afghanistan", "AF" ],
2 : [ "Albania", "AL" ],
3 : [ "Algeria", "DZ" ],
4 : [ "AmericanSamoa", "AS" ],
5 : [ "Andorra", "AD" ],
6 : [ "Angola", "AO" ],
7 : [ "Anguilla", "AI" ],
8 : [ "Antarctica", "AQ" ],
9 : [ "AntiguaAndBarbuda", "AG" ],
10 : [ "Argentina", "AR" ],
11 : [ "Armenia", "AM" ],
12 : [ "Aruba", "AW" ],
13 : [ "Australia", "AU" ],
14 : [ "Austria", "AT" ],
15 : [ "Azerbaijan", "AZ" ],
16 : [ "Bahamas", "BS" ],
17 : [ "Bahrain", "BH" ],
18 : [ "Bangladesh", "BD" ],
19 : [ "Barbados", "BB" ],
20 : [ "Belarus", "BY" ],
21 : [ "Belgium", "BE" ],
22 : [ "Belize", "BZ" ],
23 : [ "Benin", "BJ" ],
24 : [ "Bermuda", "BM" ],
25 : [ "Bhutan", "BT" ],
26 : [ "Bolivia", "BO" ],
27 : [ "BosniaAndHerzegowina", "BA" ],
28 : [ "Botswana", "BW" ],
29 : [ "BouvetIsland", "BV" ],
30 : [ "Brazil", "BR" ],
31 : [ "BritishIndianOceanTerritory", "IO" ],
32 : [ "Brunei", "BN" ],
33 : [ "Bulgaria", "BG" ],
34 : [ "BurkinaFaso", "BF" ],
35 : [ "Burundi", "BI" ],
36 : [ "Cambodia", "KH" ],
37 : [ "Cameroon", "CM" ],
38 : [ "Canada", "CA" ],
39 : [ "CapeVerde", "CV" ],
40 : [ "CaymanIslands", "KY" ],
41 : [ "CentralAfricanRepublic", "CF" ],
42 : [ "Chad", "TD" ],
43 : [ "Chile", "CL" ],
44 : [ "China", "CN" ],
45 : [ "ChristmasIsland", "CX" ],
46 : [ "CocosIslands", "CC" ],
47 : [ "Colombia", "CO" ],
48 : [ "Comoros", "KM" ],
49 : [ "CongoKinshasa", "CD" ],
50 : [ "CongoBrazzaville", "CG" ],
51 : [ "CookIslands", "CK" ],
52 : [ "CostaRica", "CR" ],
53 : [ "IvoryCoast", "CI" ],
54 : [ "Croatia", "HR" ],
55 : [ "Cuba", "CU" ],
56 : [ "Cyprus", "CY" ],
57 : [ "CzechRepublic", "CZ" ],
58 : [ "Denmark", "DK" ],
59 : [ "Djibouti", "DJ" ],
60 : [ "Dominica", "DM" ],
61 : [ "DominicanRepublic", "DO" ],
62 : [ "EastTimor", "TL" ],
63 : [ "Ecuador", "EC" ],
64 : [ "Egypt", "EG" ],
65 : [ "ElSalvador", "SV" ],
66 : [ "EquatorialGuinea", "GQ" ],
67 : [ "Eritrea", "ER" ],
68 : [ "Estonia", "EE" ],
69 : [ "Ethiopia", "ET" ],
70 : [ "FalklandIslands", "FK" ],
71 : [ "FaroeIslands", "FO" ],
72 : [ "Fiji", "FJ" ],
73 : [ "Finland", "FI" ],
74 : [ "France", "FR" ],
75 : [ "Guernsey", "GG" ],
76 : [ "FrenchGuiana", "GF" ],
77 : [ "FrenchPolynesia", "PF" ],
78 : [ "FrenchSouthernTerritories", "TF" ],
79 : [ "Gabon", "GA" ],
80 : [ "Gambia", "GM" ],
81 : [ "Georgia", "GE" ],
82 : [ "Germany", "DE" ],
83 : [ "Ghana", "GH" ],
84 : [ "Gibraltar", "GI" ],
85 : [ "Greece", "GR" ],
86 : [ "Greenland", "GL" ],
87 : [ "Grenada", "GD" ],
88 : [ "Guadeloupe", "GP" ],
89 : [ "Guam", "GU" ],
90 : [ "Guatemala", "GT" ],
91 : [ "Guinea", "GN" ],
92 : [ "GuineaBissau", "GW" ],
93 : [ "Guyana", "GY" ],
94 : [ "Haiti", "HT" ],
95 : [ "HeardAndMcDonaldIslands", "HM" ],
96 : [ "Honduras", "HN" ],
97 : [ "HongKong", "HK" ],
98 : [ "Hungary", "HU" ],
99 : [ "Iceland", "IS" ],
100 : [ "India", "IN" ],
101 : [ "Indonesia", "ID" ],
102 : [ "Iran", "IR" ],
103 : [ "Iraq", "IQ" ],
104 : [ "Ireland", "IE" ],
105 : [ "Israel", "IL" ],
106 : [ "Italy", "IT" ],
107 : [ "Jamaica", "JM" ],
108 : [ "Japan", "JP" ],
109 : [ "Jordan", "JO" ],
110 : [ "Kazakhstan", "KZ" ],
111 : [ "Kenya", "KE" ],
112 : [ "Kiribati", "KI" ],
113 : [ "NorthKorea", "KP" ],
114 : [ "SouthKorea", "KR" ],
115 : [ "Kuwait", "KW" ],
116 : [ "Kyrgyzstan", "KG" ],
117 : [ "Laos", "LA" ],
118 : [ "Latvia", "LV" ],
119 : [ "Lebanon", "LB" ],
120 : [ "Lesotho", "LS" ],
121 : [ "Liberia", "LR" ],
122 : [ "Libya", "LY" ],
123 : [ "Liechtenstein", "LI" ],
124 : [ "Lithuania", "LT" ],
125 : [ "Luxembourg", "LU" ],
126 : [ "Macau", "MO" ],
127 : [ "Macedonia", "MK" ],
128 : [ "Madagascar", "MG" ],
129 : [ "Malawi", "MW" ],
130 : [ "Malaysia", "MY" ],
131 : [ "Maldives", "MV" ],
132 : [ "Mali", "ML" ],
133 : [ "Malta", "MT" ],
134 : [ "MarshallIslands", "MH" ],
135 : [ "Martinique", "MQ" ],
136 : [ "Mauritania", "MR" ],
137 : [ "Mauritius", "MU" ],
138 : [ "Mayotte", "YT" ],
139 : [ "Mexico", "MX" ],
140 : [ "Micronesia", "FM" ],
141 : [ "Moldova", "MD" ],
142 : [ "Monaco", "MC" ],
143 : [ "Mongolia", "MN" ],
144 : [ "Montserrat", "MS" ],
145 : [ "Morocco", "MA" ],
146 : [ "Mozambique", "MZ" ],
147 : [ "Myanmar", "MM" ],
148 : [ "Namibia", "NA" ],
149 : [ "Nauru", "NR" ],
150 : [ "Nepal", "NP" ],
151 : [ "Netherlands", "NL" ],
152 : [ "CuraSao", "CW" ],
153 : [ "NewCaledonia", "NC" ],
154 : [ "NewZealand", "NZ" ],
155 : [ "Nicaragua", "NI" ],
156 : [ "Niger", "NE" ],
157 : [ "Nigeria", "NG" ],
158 : [ "Niue", "NU" ],
159 : [ "NorfolkIsland", "NF" ],
160 : [ "NorthernMarianaIslands", "MP" ],
161 : [ "Norway", "NO" ],
162 : [ "Oman", "OM" ],
163 : [ "Pakistan", "PK" ],
164 : [ "Palau", "PW" ],
165 : [ "PalestinianTerritories", "PS" ],
166 : [ "Panama", "PA" ],
167 : [ "PapuaNewGuinea", "PG" ],
168 : [ "Paraguay", "PY" ],
169 : [ "Peru", "PE" ],
170 : [ "Philippines", "PH" ],
171 : [ "Pitcairn", "PN" ],
172 : [ "Poland", "PL" ],
173 : [ "Portugal", "PT" ],
174 : [ "PuertoRico", "PR" ],
175 : [ "Qatar", "QA" ],
176 : [ "Reunion", "RE" ],
177 : [ "Romania", "RO" ],
178 : [ "Russia", "RU" ],
179 : [ "Rwanda", "RW" ],
180 : [ "SaintKittsAndNevis", "KN" ],
181 : [ "SaintLucia", "LC" ],
182 : [ "SaintVincentAndTheGrenadines", "VC" ],
183 : [ "Samoa", "WS" ],
184 : [ "SanMarino", "SM" ],
185 : [ "SaoTomeAndPrincipe", "ST" ],
186 : [ "SaudiArabia", "SA" ],
187 : [ "Senegal", "SN" ],
188 : [ "Seychelles", "SC" ],
189 : [ "SierraLeone", "SL" ],
190 : [ "Singapore", "SG" ],
191 : [ "Slovakia", "SK" ],
192 : [ "Slovenia", "SI" ],
193 : [ "SolomonIslands", "SB" ],
194 : [ "Somalia", "SO" ],
195 : [ "SouthAfrica", "ZA" ],
196 : [ "SouthGeorgiaAndTheSouthSandwichIslands", "GS" ],
197 : [ "Spain", "ES" ],
198 : [ "SriLanka", "LK" ],
199 : [ "SaintHelena", "SH" ],
200 : [ "SaintPierreAndMiquelon", "PM" ],
201 : [ "Sudan", "SD" ],
202 : [ "Suriname", "SR" ],
203 : [ "SvalbardAndJanMayenIslands", "SJ" ],
204 : [ "Swaziland", "SZ" ],
205 : [ "Sweden", "SE" ],
206 : [ "Switzerland", "CH" ],
207 : [ "Syria", "SY" ],
208 : [ "Taiwan", "TW" ],
209 : [ "Tajikistan", "TJ" ],
210 : [ "Tanzania", "TZ" ],
211 : [ "Thailand", "TH" ],
212 : [ "Togo", "TG" ],
213 : [ "Tokelau", "TK" ],
214 : [ "Tonga", "TO" ],
215 : [ "TrinidadAndTobago", "TT" ],
216 : [ "Tunisia", "TN" ],
217 : [ "Turkey", "TR" ],
218 : [ "Turkmenistan", "TM" ],
219 : [ "TurksAndCaicosIslands", "TC" ],
220 : [ "Tuvalu", "TV" ],
221 : [ "Uganda", "UG" ],
222 : [ "Ukraine", "UA" ],
223 : [ "UnitedArabEmirates", "AE" ],
224 : [ "UnitedKingdom", "GB" ],
225 : [ "UnitedStates", "US" ],
226 : [ "UnitedStatesMinorOutlyingIslands", "UM" ],
227 : [ "Uruguay", "UY" ],
228 : [ "Uzbekistan", "UZ" ],
229 : [ "Vanuatu", "VU" ],
230 : [ "VaticanCityState", "VA" ],
231 : [ "Venezuela", "VE" ],
232 : [ "Vietnam", "VN" ],
233 : [ "BritishVirginIslands", "VG" ],
234 : [ "UnitedStatesVirginIslands", "VI" ],
235 : [ "WallisAndFutunaIslands", "WF" ],
236 : [ "WesternSahara", "EH" ],
237 : [ "Yemen", "YE" ],
238 : [ "CanaryIslands", "IC" ],
239 : [ "Zambia", "ZM" ],
240 : [ "Zimbabwe", "ZW" ],
241 : [ "ClippertonIsland", "CP" ],
242 : [ "Montenegro", "ME" ],
243 : [ "Serbia", "RS" ],
244 : [ "Saint Barthelemy", "BL" ],
245 : [ "Saint Martin", "MF" ],
246 : [ "LatinAmericaAndTheCaribbean", "419" ],
247 : [ "AscensionIsland", "AC" ],
248 : [ "AlandIslands", "AX" ],
249 : [ "DiegoGarcia", "DG" ],
250 : [ "CeutaAndMelilla", "EA" ],
251 : [ "IsleOfMan", "IM" ],
252 : [ "Jersey", "JE" ],
253 : [ "TristanDaCunha", "TA" ],
254 : [ "SouthSudan", "SS" ],
255 : [ "Bonaire", "BQ" ],
256 : [ "SintMaarten", "SX" ],
257 : [ "Kosovo", "XK" ]
}
script_list = {
0 : [ "AnyScript", "Zzzz" ],
1 : [ "Arabic", "Arab" ],
2 : [ "Cyrillic", "Cyrl" ],
3 : [ "Deseret", "Dsrt" ],
4 : [ "Gurmukhi", "Guru" ],
5 : [ "Simplified Han", "Hans" ],
6 : [ "Traditional Han", "Hant" ],
7 : [ "Latin", "Latn" ],
8 : [ "Mongolian", "Mong" ],
9 : [ "Tifinagh", "Tfng" ],
10 : [ "Armenian", "Armn" ],
11 : [ "Bengali", "Beng" ],
12 : [ "Cherokee", "Cher" ],
13 : [ "Devanagari", "Deva" ],
14 : [ "Ethiopic", "Ethi" ],
15 : [ "Georgian", "Geor" ],
16 : [ "Greek", "Grek" ],
17 : [ "Gujarati", "Gujr" ],
18 : [ "Hebrew", "Hebr" ],
19 : [ "Japanese", "Jpan" ],
20 : [ "Khmer", "Khmr" ],
21 : [ "Kannada", "Knda" ],
22 : [ "Korean", "Kore" ],
23 : [ "Lao", "Laoo" ],
24 : [ "Malayalam", "Mlym" ],
25 : [ "Myanmar", "Mymr" ],
26 : [ "Oriya", "Orya" ],
27 : [ "Tamil", "Taml" ],
28 : [ "Telugu", "Telu" ],
29 : [ "Thaana", "Thaa" ],
30 : [ "Thai", "Thai" ],
31 : [ "Tibetan", "Tibt" ],
32 : [ "Sinhala", "Sinh" ],
33 : [ "Syriac", "Syrc" ],
34 : [ "Yi", "Yiii" ],
35 : [ "Vai", "Vaii" ],
36 : [ "Avestan", "Avst" ],
37 : [ "Balinese", "Bali" ],
38 : [ "Bamum", "Bamu" ],
39 : [ "Batak", "Batk" ],
40 : [ "Bopomofo", "Bopo" ],
41 : [ "Brahmi", "Brah" ],
42 : [ "Buginese", "Bugi" ],
43 : [ "Buhid", "Buhd" ],
44 : [ "CanadianAboriginal", "Cans" ],
45 : [ "Carian", "Cari" ],
46 : [ "Chakma", "Cakm" ],
47 : [ "Cham", "Cham" ],
48 : [ "Coptic", "Copt" ],
49 : [ "Cypriot", "Cprt" ],
50 : [ "Egyptian Hieroglyphs", "Egyp" ],
51 : [ "Fraser", "Lisu" ],
52 : [ "Glagolitic", "Glag" ],
53 : [ "Gothic", "Goth" ],
54 : [ "Han", "Hani" ],
55 : [ "Hangul", "Hang" ],
56 : [ "Hanunoo", "Hano" ],
57 : [ "Imperial Aramaic", "Armi" ],
58 : [ "Inscriptional Pahlavi", "Phli" ],
59 : [ "Inscriptional Parthian", "Prti" ],
60 : [ "Javanese", "Java" ],
61 : [ "Kaithi", "Kthi" ],
62 : [ "Katakana", "Kana" ],
63 : [ "Kayah Li", "Kali" ],
64 : [ "Kharoshthi", "Khar" ],
65 : [ "Lanna", "Lana" ],
66 : [ "Lepcha", "Lepc" ],
67 : [ "Limbu", "Limb" ],
68 : [ "Linear B", "Linb" ],
69 : [ "Lycian", "Lyci" ],
70 : [ "Lydian", "Lydi" ],
71 : [ "Mandaean", "Mand" ],
72 : [ "Meitei Mayek", "Mtei" ],
73 : [ "Meroitic", "Mero" ],
74 : [ "Meroitic Cursive", "Merc" ],
75 : [ "Nko", "Nkoo" ],
76 : [ "New Tai Lue", "Talu" ],
77 : [ "Ogham", "Ogam" ],
78 : [ "Ol Chiki", "Olck" ],
79 : [ "Old Italic", "Ital" ],
80 : [ "Old Persian", "Xpeo" ],
81 : [ "Old South Arabian", "Sarb" ],
82 : [ "Orkhon", "Orkh" ],
83 : [ "Osmanya", "Osma" ],
84 : [ "Phags Pa", "Phag" ],
85 : [ "Phoenician", "Phnx" ],
86 : [ "Pollard Phonetic", "Plrd" ],
87 : [ "Rejang", "Rjng" ],
88 : [ "Runic", "Runr" ],
89 : [ "Samaritan", "Samr" ],
90 : [ "Saurashtra", "Saur" ],
91 : [ "Sharada", "Shrd" ],
92 : [ "Shavian", "Shaw" ],
93 : [ "Sora Sompeng", "Sora" ],
94 : [ "Cuneiform", "Xsux" ],
95 : [ "Sundanese", "Sund" ],
96 : [ "Syloti Nagri", "Sylo" ],
97 : [ "Tagalog", "Tglg" ],
98 : [ "Tagbanwa", "Tagb" ],
99 : [ "Tai Le", "Tale" ],
100 : [ "Tai Viet", "Tavt" ],
101 : [ "Takri", "Takr" ],
102 : [ "Ugaritic", "Ugar" ],
103 : [ "Braille", "Brai" ],
104 : [ "Hiragana", "Hira" ]
# ### : [ "Blissymbols", "Blis" ],
# ### : [ "Linear A", "Lina" ],
# ### : [ "Naxi Geba", "Nkgb" ],
# ### : [ "Pahawh Hmong", "Hmng" ],
# ### : [ "Varang Kshiti", "Wara" ],
}
def countryCodeToId(code):
if not code:
return 0
for country_id in country_list:
if country_list[country_id][1] == code:
return country_id
return -1
def languageCodeToId(code):
if not code:
return 0
for language_id in language_list:
if language_list[language_id][1] == code:
return language_id
return -1
def scriptCodeToId(code):
if not code:
return 0
for script_id in script_list:
if script_list[script_id][1] == code:
return script_id
return -1
| bsd-3-clause | -1,086,870,543,704,711,300 | 49.727034 | 81 | 0.294019 | false |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/pip/utils/glibc.py | 350 | 2939 | from __future__ import absolute_import
import re
import ctypes
import platform
import warnings
def glibc_version_string():
"Returns glibc version string, or None if not using glibc."
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
process_namespace = ctypes.CDLL(None)
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
# Separated out from have_compatible_glibc for easier unit testing
def check_glibc_version(version_str, required_major, minimum_minor):
# Parse string and check against requested version.
#
# We use a regexp instead of str.split because we want to discard any
# random junk that might come after the minor version -- this might happen
# in patched/forked versions of glibc (e.g. Linaro's version of glibc
# uses version strings like "2.20-2014.11"). See gh-3588.
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn("Expected glibc version with 2 components major.minor,"
" got: %s" % version_str, RuntimeWarning)
return False
return (int(m.group("major")) == required_major and
int(m.group("minor")) >= minimum_minor)
def have_compatible_glibc(required_major, minimum_minor):
version_str = glibc_version_string()
if version_str is None:
return False
return check_glibc_version(version_str, required_major, minimum_minor)
# platform.libc_ver regularly returns completely nonsensical glibc
# versions. E.g. on my computer, platform says:
#
# ~$ python2.7 -c 'import platform; print(platform.libc_ver())'
# ('glibc', '2.7')
# ~$ python3.5 -c 'import platform; print(platform.libc_ver())'
# ('glibc', '2.9')
#
# But the truth is:
#
# ~$ ldd --version
# ldd (Debian GLIBC 2.22-11) 2.22
#
# This is unfortunate, because it means that the linehaul data on libc
# versions that was generated by pip 8.1.2 and earlier is useless and
# misleading. Solution: instead of using platform, use our code that actually
# works.
def libc_ver():
glibc_version = glibc_version_string()
if glibc_version is None:
# For non-glibc platforms, fall back on platform.libc_ver
return platform.libc_ver()
else:
return ("glibc", glibc_version)
| mit | -5,932,181,460,405,319,000 | 35.283951 | 78 | 0.677101 | false |
shubhangiKishore/pattern | examples/08-server/03-wiki/wiki.py | 3 | 4618 | from __future__ import print_function
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.server import App, template, threadsafe
from codecs import open
# This example demonstrates a simple wiki served by pattern.server.
# A wiki is a web app where each page can be edited (e.g, Wikipedia).
# We will store the contents of each page as a file in /data.
app = App(name="wiki")
# Our wiki app has a single URL handler listening at the root ("/").
# It takes any combination of positional and keyword arguments.
# This means that any URL will be routed to the index() function.
# For example, http://127.0.0.1:8080/pages/bio.html?edit calls index()
# with path=("pages", "bio.html") and data={"edit": ""}.
@app.route("/")
def index(*path, **data):
#print "path:", path
#print "data:", data
# Construct a file name in /data from the URL path.
# For example, path=("pages", "bio.html")
# is mapped to "/data/pages/bio.html.txt".
page = "/".join(path)
page = page if page else "index.html"
page = page.replace(" ", "-")
page = page + ".txt"
page = os.path.join(app.path, "data", page) # Absolute paths are safer.
#print "page:", page
# If the URL ends in "?save", update the page content.
if "save" in data and "content" in data:
return save(page, src=data["content"])
# If the URL ends in "?edit", show the page editor.
if "edit" in data:
return edit(page)
# If the page does not exist, show the page editor.
if not os.path.exists(page):
return edit(page)
# Show the page.
else:
return view(page)
# The pattern.server module has a simple template() function
# that takes a file path or a string and optional parameters.
# Placeholders in the template source (e.g., "$name")
# are replaced with the parameter values.
# Below is a template with placeholders for page name and content.
# The page content is loaded from a file stored in /data.
# The page name is parsed from the filename,
# e.g., "/data/index.html.txt" => "index.html".
wiki = """
<!doctype html>
<html>
<head>
<title>$name</title>
<meta charset="utf-8">
</head>
<body>
<h3>$name</h3>
$content
<br>
<a href="?edit">edit</a>
</body>
</html>
"""
# The name() function takes a file path (e.g., "/data/index.html.txt")
# and returns the page name ("index.html").
def name(page):
name = os.path.basename(page) # "/data/index.html.txt" => "index.html.txt"
name = os.path.splitext(name)[0] # ("index.html", ".txt") => "index.html"
return name
# We could also have a function for a *display* name (e.g., "Index").
# Something like:
def displayname(page):
return name(name(page)).replace("-", " ").title()
# The view() function is called when a page needs to be displayed.
# Our template has two placeholders: the page $name and $content.
# We load the $content from the contents of the given file path.
# We load the $name using the name() function above.
def view(page):
print(displayname(page))
return template(wiki, name=name(page), content=open(page).read())
# The edit() function is called when a URL ends in "?edit",
# e.g., http://127.0.0.1:8080/index.html?edit.
# In this case, we don't show the contents of "/data/index.html.txt" directly,
# but wrapped inside a <textarea> for editing instead.
# Once the user is done editing and clicks "Submit",
# the browser redirects to http://127.0.0.1:8080/index.html?save,
# posting the data inside the <textarea> to the server.
# We can catch it as the optional "content" parameter of the index() function
# (since the name of the <textarea> is "content").
def edit(page):
s = open(page).read() if os.path.exists(page) else ""
s = '<form method="post" action="?save">' \
'<textarea name="content" rows="10" cols="80">%s</textarea><br>' \
'<input type="submit">' \
'</form>' % s
return template(wiki, name=name(page), content=s)
# The save() function is called when edited content is posted to the server.
# It creates a file in /data and stores the content.
@threadsafe
def save(page, src):
f = open(page, "w")
f.write(src.encode("utf-8"))
f.close()
return view(page)
# Writing HTML by hand in the <textarea> becomes tedious after a while,
# so we could for example extend save() with a parser for Markdown syntax:
# http://en.wikipedia.org/wiki/Markdown,
# http://pythonhosted.org/Markdown/,
# or replace the <textarea> with a visual TinyMCE editor:
# http://www.tinymce.com.
app.run("127.0.0.1", port=8080) | bsd-3-clause | -2,578,841,253,475,727,000 | 34.259542 | 87 | 0.657644 | false |
kornicameister/ansible-modules-extras | network/openvswitch_port.py | 12 | 8270 | #!/usr/bin/python
#coding: utf-8 -*-
# pylint: disable=C0111
# (c) 2013, David Stygstra <[email protected]>
#
# Portions copyright @ 2015 VMware, Inc.
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: openvswitch_port
version_added: 1.4
author: "David Stygstra (@stygstra)"
short_description: Manage Open vSwitch ports
requirements: [ ovs-vsctl ]
description:
- Manage Open vSwitch ports
options:
bridge:
required: true
description:
- Name of bridge to manage
port:
required: true
description:
- Name of port to manage on the bridge
tag:
version_added: 2.2
required: false
description:
- VLAN tag for this port
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the port should exist
timeout:
required: false
default: 5
description:
- How long to wait for ovs-vswitchd to respond
external_ids:
version_added: 2.0
required: false
default: {}
description:
- Dictionary of external_ids applied to a port.
set:
version_added: 2.0
required: false
default: None
description:
- Set a single property on a port.
'''
EXAMPLES = '''
# Creates port eth2 on bridge br-ex
- openvswitch_port: bridge=br-ex port=eth2 state=present
# Creates port eth6 and set ofport equal to 6.
- openvswitch_port: bridge=bridge-loop port=eth6 state=present
set="Interface eth6 ofport_request=6"
# Creates port vlan10 with tag 10 on bridge br-ex
- openvswitch_port: bridge=br-ex port=vlan10 tag=10 state=present
set="Interface vlan10 type=internal"
# Assign interface id server1-vifeth6 and mac address 00:00:5E:00:53:23
# to port vifeth6 and setup port to be managed by a controller.
- openvswitch_port: bridge=br-int port=vifeth6 state=present
args:
external_ids:
iface-id: "{{inventory_hostname}}-vifeth6"
attached-mac: "00:00:5E:00:53:23"
vm-id: "{{inventory_hostname}}"
iface-status: "active"
'''
# pylint: disable=W0703
def truncate_before(value, srch):
""" Return content of str before the srch parameters. """
before_index = value.find(srch)
if (before_index >= 0):
return value[:before_index]
else:
return value
def _set_to_get(set_cmd, module):
""" Convert set command to get command and set value.
return tuple (get command, set value)
"""
##
# If set has option: then we want to truncate just before that.
set_cmd = truncate_before(set_cmd, " option:")
get_cmd = set_cmd.split(" ")
(key, value) = get_cmd[-1].split("=")
module.log("get commands %s " % key)
return (["--", "get"] + get_cmd[:-1] + [key], value)
# pylint: disable=R0902
class OVSPort(object):
""" Interface to OVS port. """
def __init__(self, module):
self.module = module
self.bridge = module.params['bridge']
self.port = module.params['port']
self.tag = module.params['tag']
self.state = module.params['state']
self.timeout = module.params['timeout']
self.set_opt = module.params.get('set', None)
def _vsctl(self, command, check_rc=True):
'''Run ovs-vsctl command'''
cmd = ['ovs-vsctl', '-t', str(self.timeout)] + command
return self.module.run_command(cmd, check_rc=check_rc)
def exists(self):
'''Check if the port already exists'''
(rtc, out, err) = self._vsctl(['list-ports', self.bridge])
if rtc != 0:
self.module.fail_json(msg=err)
return any(port.rstrip() == self.port for port in out.split('\n'))
def set(self, set_opt):
""" Set attributes on a port. """
self.module.log("set called %s" % set_opt)
if (not set_opt):
return False
(get_cmd, set_value) = _set_to_get(set_opt, self.module)
(rtc, out, err) = self._vsctl(get_cmd, False)
if rtc != 0:
##
# ovs-vsctl -t 5 -- get Interface port external_ids:key
# returns failure if key does not exist.
out = None
else:
out = out.strip("\n")
out = out.strip('"')
if (out == set_value):
return False
(rtc, out, err) = self._vsctl(["--", "set"] + set_opt.split(" "))
if rtc != 0:
self.module.fail_json(msg=err)
return True
def add(self):
'''Add the port'''
cmd = ['add-port', self.bridge, self.port]
if self.tag:
cmd += ["tag=" + self.tag]
if self.set and self.set_opt:
cmd += ["--", "set"]
cmd += self.set_opt.split(" ")
(rtc, _, err) = self._vsctl(cmd)
if rtc != 0:
self.module.fail_json(msg=err)
return True
def delete(self):
'''Remove the port'''
(rtc, _, err) = self._vsctl(['del-port', self.bridge, self.port])
if rtc != 0:
self.module.fail_json(msg=err)
def check(self):
'''Run check mode'''
try:
if self.state == 'absent' and self.exists():
changed = True
elif self.state == 'present' and not self.exists():
changed = True
else:
changed = False
except Exception, earg:
self.module.fail_json(msg=str(earg))
self.module.exit_json(changed=changed)
def run(self):
'''Make the necessary changes'''
changed = False
try:
if self.state == 'absent':
if self.exists():
self.delete()
changed = True
elif self.state == 'present':
##
# Add any missing ports.
if (not self.exists()):
self.add()
changed = True
##
# If the -- set changed check here and make changes
# but this only makes sense when state=present.
if (not changed):
changed = self.set(self.set_opt) or changed
items = self.module.params['external_ids'].items()
for (key, value) in items:
value = value.replace('"', '')
fmt_opt = "Interface %s external_ids:%s=%s"
external_id = fmt_opt % (self.port, key, value)
changed = self.set(external_id) or changed
##
except Exception, earg:
self.module.fail_json(msg=str(earg))
self.module.exit_json(changed=changed)
# pylint: disable=E0602
def main():
""" Entry point. """
module = AnsibleModule(
argument_spec={
'bridge': {'required': True},
'port': {'required': True},
'tag': {'required': False},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'},
'set': {'required': False, 'default': None},
'external_ids': {'default': {}, 'required': False, 'type': 'dict'},
},
supports_check_mode=True,
)
port = OVSPort(module)
if module.check_mode:
port.check()
else:
port.run()
# pylint: disable=W0614
# pylint: disable=W0401
# pylint: disable=W0622
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -8,886,964,949,496,621,000 | 29.404412 | 79 | 0.557678 | false |
firebitsbr/infernal-twin | build/pillow/build/lib.linux-i686-2.7/PIL/BufrStubImagePlugin.py | 77 | 1504 | #
# The Python Imaging Library
# $Id$
#
# BUFR stub adapter
#
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFile
_handler = None
##
# Install application-specific BUFR image handler.
#
# @param handler Handler object.
def register_handler(handler):
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix):
return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC"
class BufrStubImageFile(ImageFile.StubImageFile):
format = "BUFR"
format_description = "BUFR"
def _open(self):
offset = self.fp.tell()
if not _accept(self.fp.read(8)):
raise SyntaxError("Not a BUFR file")
self.fp.seek(offset)
# make something up
self.mode = "F"
self.size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr("_handler", "save"):
raise IOError("BUFR save handler not installed")
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
Image.register_save(BufrStubImageFile.format, _save)
Image.register_extension(BufrStubImageFile.format, ".bufr")
| gpl-3.0 | -6,408,433,156,340,313,000 | 19.888889 | 73 | 0.589761 | false |
wanderer2/pymc3 | pymc3/tests/test_glm.py | 1 | 3145 | import numpy as np
from .helpers import SeededTest
from pymc3 import glm, Model, Uniform, Normal, find_MAP, Slice, sample
# Generate data
def generate_data(intercept, slope, size=700):
x = np.linspace(-1, 1, size)
y = intercept + x * slope
return x, y
class TestGLM(SeededTest):
@classmethod
def setUpClass(cls):
super(TestGLM, cls).setUpClass()
cls.intercept = 1
cls.slope = 3
cls.sd = .05
x_linear, cls.y_linear = generate_data(cls.intercept, cls.slope, size=1000)
cls.y_linear += np.random.normal(size=1000, scale=cls.sd)
cls.data_linear = dict(x=x_linear, y=cls.y_linear)
x_logistic, y_logistic = generate_data(cls.intercept, cls.slope, size=3000)
y_logistic = 1 / (1 + np.exp(-y_logistic))
bern_trials = [np.random.binomial(1, i) for i in y_logistic]
cls.data_logistic = dict(x=x_logistic, y=bern_trials)
def test_linear_component(self):
with Model() as model:
y_est, _ = glm.linear_component('y ~ x', self.data_linear)
sigma = Uniform('sigma', 0, 20)
Normal('y_obs', mu=y_est, sd=sigma, observed=self.y_linear)
start = find_MAP(vars=[sigma])
step = Slice(model.vars)
trace = sample(500, step, start, progressbar=False, random_seed=self.random_seed)
self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1)
self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1)
self.assertAlmostEqual(np.mean(trace['sigma']), self.sd, 1)
def test_glm(self):
with Model() as model:
glm.glm('y ~ x', self.data_linear)
step = Slice(model.vars)
trace = sample(500, step, progressbar=False, random_seed=self.random_seed)
self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1)
self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1)
self.assertAlmostEqual(np.mean(trace['sd']), self.sd, 1)
def test_glm_link_func(self):
with Model() as model:
glm.glm('y ~ x', self.data_logistic,
family=glm.families.Binomial(link=glm.families.logit))
step = Slice(model.vars)
trace = sample(1000, step, progressbar=False, random_seed=self.random_seed)
self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1)
self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1)
def test_more_than_one_glm_is_ok(self):
with Model():
glm.glm('y ~ x', self.data_logistic,
family=glm.families.Binomial(link=glm.families.logit),
name='glm1')
glm.glm('y ~ x', self.data_logistic,
family=glm.families.Binomial(link=glm.families.logit),
name='glm2')
def test_from_xy(self):
with Model():
glm.glm.from_xy(
self.data_logistic['x'],
self.data_logistic['y'],
family=glm.families.Binomial(link=glm.families.logit),
name='glm1')
| apache-2.0 | -5,631,591,739,067,999,000 | 39.320513 | 93 | 0.585374 | false |
bugobliterator/ardupilot-chibios | Tools/ardupilotwaf/px_mkfw.py | 18 | 4864 | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2012, 2013 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
#
# PX4 firmware image generator
#
# The PX4 firmware file is a JSON-encoded Python object, containing
# metadata fields and a zlib-compressed base64-encoded firmware image.
#
import sys
import argparse
import json
import base64
import zlib
import time
import subprocess
#
# Construct a basic firmware description
#
def mkdesc():
proto = {}
proto['magic'] = "PX4FWv1"
proto['board_id'] = 0
proto['board_revision'] = 0
proto['version'] = ""
proto['summary'] = ""
proto['description'] = ""
proto['git_identity'] = ""
proto['build_time'] = 0
proto['image'] = bytes()
proto['image_size'] = 0
return proto
# Parse commandline
parser = argparse.ArgumentParser(description="Firmware generator for the PX autopilot system.")
parser.add_argument("--prototype", action="store", help="read a prototype description from a file")
parser.add_argument("--board_id", action="store", help="set the board ID required")
parser.add_argument("--board_revision", action="store", help="set the board revision required")
parser.add_argument("--version", action="store", help="set a version string")
parser.add_argument("--summary", action="store", help="set a brief description")
parser.add_argument("--description", action="store", help="set a longer description")
parser.add_argument("--git_identity", action="store", help="the working directory to check for git identity")
parser.add_argument("--parameter_xml", action="store", help="the parameters.xml file")
parser.add_argument("--airframe_xml", action="store", help="the airframes.xml file")
parser.add_argument("--image", action="store", help="the firmware image")
args = parser.parse_args()
# Fetch the firmware descriptor prototype if specified
if args.prototype != None:
f = open(args.prototype,"r")
desc = json.load(f)
f.close()
else:
desc = mkdesc()
desc['build_time'] = int(time.time())
if args.board_id != None:
desc['board_id'] = int(args.board_id)
if args.board_revision != None:
desc['board_revision'] = int(args.board_revision)
if args.version != None:
desc['version'] = str(args.version)
if args.summary != None:
desc['summary'] = str(args.summary)
if args.description != None:
desc['description'] = str(args.description)
if args.git_identity != None:
cmd = " ".join(["git", "--git-dir", args.git_identity + "/.git", "describe", "--always", "--dirty"])
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
desc['git_identity'] = str(p.read().strip())
p.close()
if args.parameter_xml != None:
f = open(args.parameter_xml, "rb")
bytes = f.read()
desc['parameter_xml_size'] = len(bytes)
desc['parameter_xml'] = base64.b64encode(zlib.compress(bytes,9)).decode('utf-8')
desc['mav_autopilot'] = 12 # 12 = MAV_AUTOPILOT_PX4
if args.airframe_xml != None:
f = open(args.airframe_xml, "rb")
bytes = f.read()
desc['airframe_xml_size'] = len(bytes)
desc['airframe_xml'] = base64.b64encode(zlib.compress(bytes,9)).decode('utf-8')
if args.image != None:
f = open(args.image, "rb")
bytes = f.read()
desc['image_size'] = len(bytes)
desc['image'] = base64.b64encode(zlib.compress(bytes,9)).decode('utf-8')
print(json.dumps(desc, indent=4))
| gpl-3.0 | -8,231,865,211,962,592,000 | 38.544715 | 109 | 0.700247 | false |
InakiZabala/odoomrp-wip | purchase_secondary_unit/models/pricelist.py | 21 | 2471 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
from openerp.addons import decimal_precision as dp
class ProductPricelistItem(models.Model):
_inherit = 'product.pricelist.item'
@api.one
@api.depends('product_id', 'product_tmpl_id')
def _get_uop_id(self):
if self.product_id:
self.uop_id = self.product_id.uop_id
elif self.product_tmpl_id:
self.uop_id = self.product_tmpl_id.uop_id
else:
self.uop_id = False
price_surcharge_uop = fields.Float(
string='Price Surcharge for UoP',
digits=dp.get_precision('Product Price'),
help='Specify the fixed amount to add or substract (if negative) to'
' the amount calculated with the discount.')
uop_id = fields.Many2one(
comodel_name='product.uom', string='Unit of Purchase',
compute=_get_uop_id, readonly=True)
@api.onchange('price_surcharge')
def onchange_price_surcharge(self):
if self.product_id:
self.price_surcharge_uop = (
self.price_surcharge / self.product_id.uop_coeff)
elif self.product_tmpl_id:
self.price_surcharge_uop = (
self.price_surcharge / self.product_tmpl_id.uop_coeff)
@api.onchange('price_surcharge_uop')
def onchange_price_surcharge_uop(self):
if self.product_id:
self.price_surcharge = (
self.price_surcharge_uop * self.product_id.uop_coeff)
elif self.product_tmpl_id:
self.price_surcharge = (
self.price_surcharge_uop * self.product_tmpl_id.uop_coeff)
| agpl-3.0 | 2,091,665,262,968,588,800 | 39.508197 | 78 | 0.609875 | false |
inspyration/odoo | doc/_themes/odoodoc/sphinx_monkeypatch.py | 24 | 3166 | # -*- coding: utf-8 -*-
import sphinx.roles
import sphinx.environment
from sphinx.writers.html import HTMLTranslator
from docutils.writers.html4css1 import HTMLTranslator as DocutilsTranslator
def patch():
# navify toctree (oh god)
@monkey(sphinx.environment.BuildEnvironment)
def resolve_toctree(old_resolve, self, *args, **kwargs):
""" If navbar, bootstrapify TOC to yield a navbar
"""
navbar = kwargs.pop('navbar', None)
toc = old_resolve(self, *args, **kwargs)
if toc is None:
return None
navbarify(toc[0], navbar=navbar)
return toc
# monkeypatch visit_table to remove border and add .table
HTMLTranslator.visit_table = visit_table
# disable colspec crap
HTMLTranslator.write_colspecs = lambda self: None
# copy data- attributes straight from source to dest
HTMLTranslator.starttag = starttag_data
def navbarify(node, navbar=None):
"""
:param node: toctree node to navbarify
:param navbar: Whether this toctree is a 'main' navbar, a 'side' navbar or
not a navbar at all
"""
if navbar == 'side':
for n in node.traverse():
if n.tagname == 'bullet_list':
n['classes'].append('nav')
elif navbar == 'main':
# add classes to just toplevel
node['classes'].extend(['nav', 'navbar-nav', 'navbar-right'])
for list_item in node.children:
# bullet_list
# list_item
# compact_paragraph
# reference
# bullet_list
# list_item
# compact_paragraph
# reference
# no bullet_list.list_item -> don't dropdownify
if not list_item.children[1].children:
return
list_item['classes'].append('dropdown')
# list_item.compact_paragraph.reference
link = list_item.children[0].children[0]
link['classes'].append('dropdown-toggle')
link.attributes['data-toggle'] = 'dropdown'
# list_item.bullet_list
list_item.children[1]['classes'].append('dropdown-menu')
def visit_table(self, node):
"""
* remove border
* add table class
"""
self._table_row_index = 0
self.context.append(self.compact_p)
self.compact_p = True
classes = ' '.join({'table', self.settings.table_style}).strip()
self.body.append(self.starttag(node, 'table', CLASS=classes))
def starttag_data(self, node, tagname, suffix='\n', empty=False, **attributes):
attributes.update(
(k, v) for k, v in node.attributes.iteritems()
if k.startswith('data-')
)
# oh dear
return DocutilsTranslator.starttag(
self, node, tagname, suffix=suffix, empty=empty, **attributes)
class monkey(object):
def __init__(self, obj):
self.obj = obj
def __call__(self, fn):
name = fn.__name__
old = getattr(self.obj, name)
setattr(self.obj, name, lambda self_, *args, **kwargs: \
fn(old, self_, *args, **kwargs))
| agpl-3.0 | -3,458,771,989,107,372,000 | 34.177778 | 79 | 0.581491 | false |
vprime/puuuu | env/lib/python2.7/site-packages/django/core/management/base.py | 104 | 15912 | """
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
from __future__ import unicode_literals
import os
import sys
from optparse import make_option, OptionParser
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import force_str
from django.utils.six import StringIO
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(object):
"""
Wrapper around stdout/stderr
"""
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
if hasattr(out, 'isatty') and out.isatty():
self.style_func = style_func
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = [f for f in (style_func, self.style_func, lambda x:x)
if f is not None][0]
self._out.write(force_str(style_func(msg)))
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of being
forcibly set to 'en-us'.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrim.auth
permissions) as making the locale differ from the de facto default
'en-us' might cause unintended effects.
This option can't be False when the can_import_settings option is set
to False too because attempting to set the locale needs access to
settings. This condition will generate a CommandError.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Raise on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
try:
self.execute(*args, **options.__dict__)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# self.stderr is not guaranteed to be set here
stderr = getattr(self, 'stderr', OutputWrapper(sys.stderr, self.style.ERROR))
stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``, except if force-skipped).
"""
self.stdout = OutputWrapper(options.get('stdout', sys.stdout))
self.stderr = OutputWrapper(options.get('stderr', sys.stderr), self.style.ERROR)
if self.can_import_settings:
from django.conf import settings
saved_locale = None
if not self.leave_locale_alone:
# Only mess with locales if we can assume we have a working
# settings file, because django.utils.translation requires settings
# (The final saying about whether the i18n machinery is active will be
# found in the value of the USE_I18N setting)
if not self.can_import_settings:
raise CommandError("Incompatible values of 'leave_locale_alone' "
"(%s) and 'can_import_settings' (%s) command "
"options." % (self.leave_locale_alone,
self.can_import_settings))
# Switch to US English, because django-admin.py creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.activate('en-us')
try:
if self.requires_model_validation and not options.get('skip_validation'):
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;"))
finally:
if saved_locale is not None:
translation.activate(saved_locale)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found" % (num_errors, '' if num_errors == 1 else 's'))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
| mit | -5,101,765,490,586,179,000 | 36.706161 | 177 | 0.623492 | false |
datapythonista/pandas | pandas/tests/util/test_validate_args_and_kwargs.py | 8 | 2391 | import pytest
from pandas.util._validators import validate_args_and_kwargs
_fname = "func"
def test_invalid_total_length_max_length_one():
compat_args = ("foo",)
kwargs = {"foo": "FOO"}
args = ("FoO", "BaZ")
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (
fr"{_fname}\(\) takes at most {max_length} "
fr"argument \({actual_length} given\)"
)
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_invalid_total_length_max_length_multiple():
compat_args = ("foo", "bar", "baz")
kwargs = {"foo": "FOO", "bar": "BAR"}
args = ("FoO", "BaZ")
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (
fr"{_fname}\(\) takes at most {max_length} "
fr"arguments \({actual_length} given\)"
)
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
@pytest.mark.parametrize("args,kwargs", [((), {"foo": -5, "bar": 2}), ((-5, 2), {})])
def test_missing_args_or_kwargs(args, kwargs):
bad_arg = "bar"
min_fname_arg_count = 2
compat_args = {"foo": -5, bad_arg: 1}
msg = (
fr"the '{bad_arg}' parameter is not supported "
fr"in the pandas implementation of {_fname}\(\)"
)
with pytest.raises(ValueError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_duplicate_argument():
min_fname_arg_count = 2
compat_args = {"foo": None, "bar": None, "baz": None}
kwargs = {"foo": None, "bar": None}
args = (None,) # duplicate value for "foo"
msg = fr"{_fname}\(\) got multiple values for keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = {"foo": 1, "bar": None, "baz": -2}
kwargs = {"baz": -2}
args = (1, None)
min_fname_arg_count = 2
validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
| bsd-3-clause | -8,938,038,104,307,669,000 | 28.518519 | 88 | 0.60895 | false |
nanobox-io/git | contrib/hooks/multimail/git_multimail.py | 186 | 110172 | #! /usr/bin/env python2
# Copyright (c) 2015 Matthieu Moy and others
# Copyright (c) 2012-2014 Michael Haggerty and others
# Derived from contrib/hooks/post-receive-email, which is
# Copyright (c) 2007 Andy Parkins
# and also includes contributions by other authors.
#
# This file is part of git-multimail.
#
# git-multimail is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License version
# 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
"""Generate notification emails for pushes to a git repository.
This hook sends emails describing changes introduced by pushes to a
git repository. For each reference that was changed, it emits one
ReferenceChange email summarizing how the reference was changed,
followed by one Revision email for each new commit that was introduced
by the reference change.
Each commit is announced in exactly one Revision email. If the same
commit is merged into another branch in the same or a later push, then
the ReferenceChange email will list the commit's SHA1 and its one-line
summary, but no new Revision email will be generated.
This script is designed to be used as a "post-receive" hook in a git
repository (see githooks(5)). It can also be used as an "update"
script, but this usage is not completely reliable and is deprecated.
To help with debugging, this script accepts a --stdout option, which
causes the emails to be written to standard output rather than sent
using sendmail.
See the accompanying README file for the complete documentation.
"""
import sys
import os
import re
import bisect
import socket
import subprocess
import shlex
import optparse
import smtplib
import time
try:
from email.utils import make_msgid
from email.utils import getaddresses
from email.utils import formataddr
from email.utils import formatdate
from email.header import Header
except ImportError:
# Prior to Python 2.5, the email module used different names:
from email.Utils import make_msgid
from email.Utils import getaddresses
from email.Utils import formataddr
from email.Utils import formatdate
from email.Header import Header
DEBUG = False
ZEROS = '0' * 40
LOGBEGIN = '- Log -----------------------------------------------------------------\n'
LOGEND = '-----------------------------------------------------------------------\n'
ADDR_HEADERS = set(['from', 'to', 'cc', 'bcc', 'reply-to', 'sender'])
# It is assumed in many places that the encoding is uniformly UTF-8,
# so changing these constants is unsupported. But define them here
# anyway, to make it easier to find (at least most of) the places
# where the encoding is important.
(ENCODING, CHARSET) = ('UTF-8', 'utf-8')
REF_CREATED_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s created'
' (now %(newrev_short)s)'
)
REF_UPDATED_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s updated'
' (%(oldrev_short)s -> %(newrev_short)s)'
)
REF_DELETED_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s deleted'
' (was %(oldrev_short)s)'
)
COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE = (
'%(emailprefix)s%(refname_type)s %(short_refname)s updated: %(oneline)s'
)
REFCHANGE_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Subject: %(subject)s
MIME-Version: 1.0
Content-Type: text/plain; charset=%(charset)s
Content-Transfer-Encoding: 8bit
Message-ID: %(msgid)s
From: %(fromaddr)s
Reply-To: %(reply_to)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Oldrev: %(oldrev)s
X-Git-Newrev: %(newrev)s
Auto-Submitted: auto-generated
"""
REFCHANGE_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a change to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
FOOTER_TEMPLATE = """\
-- \n\
To stop receiving notification emails like this one, please contact
%(administrator)s.
"""
REWIND_ONLY_TEMPLATE = """\
This update removed existing revisions from the reference, leaving the
reference pointing at a previous point in the repository history.
* -- * -- N %(refname)s (%(newrev_short)s)
\\
O -- O -- O (%(oldrev_short)s)
Any revisions marked "omits" are not gone; other references still
refer to them. Any revisions marked "discards" are gone forever.
"""
NON_FF_TEMPLATE = """\
This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
%(refname_type)s are not in the new version. This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:
* -- * -- B -- O -- O -- O (%(oldrev_short)s)
\\
N -- N -- N %(refname)s (%(newrev_short)s)
You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.
Any revisions marked "omits" are not gone; other references still
refer to them. Any revisions marked "discards" are gone forever.
"""
NO_NEW_REVISIONS_TEMPLATE = """\
No new revisions were added by this update.
"""
DISCARDED_REVISIONS_TEMPLATE = """\
This change permanently discards the following revisions:
"""
NO_DISCARDED_REVISIONS_TEMPLATE = """\
The revisions that were on this %(refname_type)s are still contained in
other references; therefore, this change does not discard any commits
from the repository.
"""
NEW_REVISIONS_TEMPLATE = """\
The %(tot)s revisions listed above as "new" are entirely new to this
repository and will be described in separate emails. The revisions
listed as "adds" were already present in the repository and have only
been added to this reference.
"""
TAG_CREATED_TEMPLATE = """\
at %(newrev_short)-9s (%(newrev_type)s)
"""
TAG_UPDATED_TEMPLATE = """\
*** WARNING: tag %(short_refname)s was modified! ***
from %(oldrev_short)-9s (%(oldrev_type)s)
to %(newrev_short)-9s (%(newrev_type)s)
"""
TAG_DELETED_TEMPLATE = """\
*** WARNING: tag %(short_refname)s was deleted! ***
"""
# The template used in summary tables. It looks best if this uses the
# same alignment as TAG_CREATED_TEMPLATE and TAG_UPDATED_TEMPLATE.
BRIEF_SUMMARY_TEMPLATE = """\
%(action)10s %(rev_short)-9s %(text)s
"""
NON_COMMIT_UPDATE_TEMPLATE = """\
This is an unusual reference change because the reference did not
refer to a commit either before or after the change. We do not know
how to provide full information about this reference change.
"""
REVISION_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Cc: %(cc_recipients)s
Subject: %(emailprefix)s%(num)02d/%(tot)02d: %(oneline)s
MIME-Version: 1.0
Content-Type: text/plain; charset=%(charset)s
Content-Transfer-Encoding: 8bit
From: %(fromaddr)s
Reply-To: %(reply_to)s
In-Reply-To: %(reply_to_msgid)s
References: %(reply_to_msgid)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Rev: %(rev)s
Auto-Submitted: auto-generated
"""
REVISION_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a commit to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
REVISION_FOOTER_TEMPLATE = FOOTER_TEMPLATE
# Combined, meaning refchange+revision email (for single-commit additions)
COMBINED_HEADER_TEMPLATE = """\
Date: %(send_date)s
To: %(recipients)s
Subject: %(subject)s
MIME-Version: 1.0
Content-Type: text/plain; charset=%(charset)s
Content-Transfer-Encoding: 8bit
Message-ID: %(msgid)s
From: %(fromaddr)s
Reply-To: %(reply_to)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
X-Git-Reftype: %(refname_type)s
X-Git-Oldrev: %(oldrev)s
X-Git-Newrev: %(newrev)s
X-Git-Rev: %(rev)s
Auto-Submitted: auto-generated
"""
COMBINED_INTRO_TEMPLATE = """\
This is an automated email from the git hooks/post-receive script.
%(pusher)s pushed a commit to %(refname_type)s %(short_refname)s
in repository %(repo_shortname)s.
"""
COMBINED_FOOTER_TEMPLATE = FOOTER_TEMPLATE
class CommandError(Exception):
def __init__(self, cmd, retcode):
self.cmd = cmd
self.retcode = retcode
Exception.__init__(
self,
'Command "%s" failed with retcode %s' % (' '.join(cmd), retcode,)
)
class ConfigurationException(Exception):
pass
# The "git" program (this could be changed to include a full path):
GIT_EXECUTABLE = 'git'
# How "git" should be invoked (including global arguments), as a list
# of words. This variable is usually initialized automatically by
# read_git_output() via choose_git_command(), but if a value is set
# here then it will be used unconditionally.
GIT_CMD = None
def choose_git_command():
"""Decide how to invoke git, and record the choice in GIT_CMD."""
global GIT_CMD
if GIT_CMD is None:
try:
# Check to see whether the "-c" option is accepted (it was
# only added in Git 1.7.2). We don't actually use the
# output of "git --version", though if we needed more
# specific version information this would be the place to
# do it.
cmd = [GIT_EXECUTABLE, '-c', 'foo.bar=baz', '--version']
read_output(cmd)
GIT_CMD = [GIT_EXECUTABLE, '-c', 'i18n.logoutputencoding=%s' % (ENCODING,)]
except CommandError:
GIT_CMD = [GIT_EXECUTABLE]
def read_git_output(args, input=None, keepends=False, **kw):
"""Read the output of a Git command."""
if GIT_CMD is None:
choose_git_command()
return read_output(GIT_CMD + args, input=input, keepends=keepends, **kw)
def read_output(cmd, input=None, keepends=False, **kw):
if input:
stdin = subprocess.PIPE
else:
stdin = None
p = subprocess.Popen(
cmd, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw
)
(out, err) = p.communicate(input)
retcode = p.wait()
if retcode:
raise CommandError(cmd, retcode)
if not keepends:
out = out.rstrip('\n\r')
return out
def read_git_lines(args, keepends=False, **kw):
"""Return the lines output by Git command.
Return as single lines, with newlines stripped off."""
return read_git_output(args, keepends=True, **kw).splitlines(keepends)
def git_rev_list_ish(cmd, spec, args=None, **kw):
"""Common functionality for invoking a 'git rev-list'-like command.
Parameters:
* cmd is the Git command to run, e.g., 'rev-list' or 'log'.
* spec is a list of revision arguments to pass to the named
command. If None, this function returns an empty list.
* args is a list of extra arguments passed to the named command.
* All other keyword arguments (if any) are passed to the
underlying read_git_lines() function.
Return the output of the Git command in the form of a list, one
entry per output line.
"""
if spec is None:
return []
if args is None:
args = []
args = [cmd, '--stdin'] + args
spec_stdin = ''.join(s + '\n' for s in spec)
return read_git_lines(args, input=spec_stdin, **kw)
def git_rev_list(spec, **kw):
"""Run 'git rev-list' with the given list of revision arguments.
See git_rev_list_ish() for parameter and return value
documentation.
"""
return git_rev_list_ish('rev-list', spec, **kw)
def git_log(spec, **kw):
"""Run 'git log' with the given list of revision arguments.
See git_rev_list_ish() for parameter and return value
documentation.
"""
return git_rev_list_ish('log', spec, **kw)
def header_encode(text, header_name=None):
"""Encode and line-wrap the value of an email header field."""
try:
if isinstance(text, str):
text = text.decode(ENCODING, 'replace')
return Header(text, header_name=header_name).encode()
except UnicodeEncodeError:
return Header(text, header_name=header_name, charset=CHARSET,
errors='replace').encode()
def addr_header_encode(text, header_name=None):
"""Encode and line-wrap the value of an email header field containing
email addresses."""
return Header(
', '.join(
formataddr((header_encode(name), emailaddr))
for name, emailaddr in getaddresses([text])
),
header_name=header_name
).encode()
class Config(object):
def __init__(self, section, git_config=None):
"""Represent a section of the git configuration.
If git_config is specified, it is passed to "git config" in
the GIT_CONFIG environment variable, meaning that "git config"
will read the specified path rather than the Git default
config paths."""
self.section = section
if git_config:
self.env = os.environ.copy()
self.env['GIT_CONFIG'] = git_config
else:
self.env = None
@staticmethod
def _split(s):
"""Split NUL-terminated values."""
words = s.split('\0')
assert words[-1] == ''
return words[:-1]
def get(self, name, default=None):
try:
values = self._split(read_git_output(
['config', '--get', '--null', '%s.%s' % (self.section, name)],
env=self.env, keepends=True,
))
assert len(values) == 1
return values[0]
except CommandError:
return default
def get_bool(self, name, default=None):
try:
value = read_git_output(
['config', '--get', '--bool', '%s.%s' % (self.section, name)],
env=self.env,
)
except CommandError:
return default
return value == 'true'
def get_all(self, name, default=None):
"""Read a (possibly multivalued) setting from the configuration.
Return the result as a list of values, or default if the name
is unset."""
try:
return self._split(read_git_output(
['config', '--get-all', '--null', '%s.%s' % (self.section, name)],
env=self.env, keepends=True,
))
except CommandError, e:
if e.retcode == 1:
# "the section or key is invalid"; i.e., there is no
# value for the specified key.
return default
else:
raise
def get_recipients(self, name, default=None):
"""Read a recipients list from the configuration.
Return the result as a comma-separated list of email
addresses, or default if the option is unset. If the setting
has multiple values, concatenate them with comma separators."""
lines = self.get_all(name, default=None)
if lines is None:
return default
return ', '.join(line.strip() for line in lines)
def set(self, name, value):
read_git_output(
['config', '%s.%s' % (self.section, name), value],
env=self.env,
)
def add(self, name, value):
read_git_output(
['config', '--add', '%s.%s' % (self.section, name), value],
env=self.env,
)
def __contains__(self, name):
return self.get_all(name, default=None) is not None
# We don't use this method anymore internally, but keep it here in
# case somebody is calling it from their own code:
def has_key(self, name):
return name in self
def unset_all(self, name):
try:
read_git_output(
['config', '--unset-all', '%s.%s' % (self.section, name)],
env=self.env,
)
except CommandError, e:
if e.retcode == 5:
# The name doesn't exist, which is what we wanted anyway...
pass
else:
raise
def set_recipients(self, name, value):
self.unset_all(name)
for pair in getaddresses([value]):
self.add(name, formataddr(pair))
def generate_summaries(*log_args):
"""Generate a brief summary for each revision requested.
log_args are strings that will be passed directly to "git log" as
revision selectors. Iterate over (sha1_short, subject) for each
commit specified by log_args (subject is the first line of the
commit message as a string without EOLs)."""
cmd = [
'log', '--abbrev', '--format=%h %s',
] + list(log_args) + ['--']
for line in read_git_lines(cmd):
yield tuple(line.split(' ', 1))
def limit_lines(lines, max_lines):
for (index, line) in enumerate(lines):
if index < max_lines:
yield line
if index >= max_lines:
yield '... %d lines suppressed ...\n' % (index + 1 - max_lines,)
def limit_linelength(lines, max_linelength):
for line in lines:
# Don't forget that lines always include a trailing newline.
if len(line) > max_linelength + 1:
line = line[:max_linelength - 7] + ' [...]\n'
yield line
class CommitSet(object):
"""A (constant) set of object names.
The set should be initialized with full SHA1 object names. The
__contains__() method returns True iff its argument is an
abbreviation of any the names in the set."""
def __init__(self, names):
self._names = sorted(names)
def __len__(self):
return len(self._names)
def __contains__(self, sha1_abbrev):
"""Return True iff this set contains sha1_abbrev (which might be abbreviated)."""
i = bisect.bisect_left(self._names, sha1_abbrev)
return i < len(self) and self._names[i].startswith(sha1_abbrev)
class GitObject(object):
def __init__(self, sha1, type=None):
if sha1 == ZEROS:
self.sha1 = self.type = self.commit_sha1 = None
else:
self.sha1 = sha1
self.type = type or read_git_output(['cat-file', '-t', self.sha1])
if self.type == 'commit':
self.commit_sha1 = self.sha1
elif self.type == 'tag':
try:
self.commit_sha1 = read_git_output(
['rev-parse', '--verify', '%s^0' % (self.sha1,)]
)
except CommandError:
# Cannot deref tag to determine commit_sha1
self.commit_sha1 = None
else:
self.commit_sha1 = None
self.short = read_git_output(['rev-parse', '--short', sha1])
def get_summary(self):
"""Return (sha1_short, subject) for this commit."""
if not self.sha1:
raise ValueError('Empty commit has no summary')
return iter(generate_summaries('--no-walk', self.sha1)).next()
def __eq__(self, other):
return isinstance(other, GitObject) and self.sha1 == other.sha1
def __hash__(self):
return hash(self.sha1)
def __nonzero__(self):
return bool(self.sha1)
def __str__(self):
return self.sha1 or ZEROS
class Change(object):
"""A Change that has been made to the Git repository.
Abstract class from which both Revisions and ReferenceChanges are
derived. A Change knows how to generate a notification email
describing itself."""
def __init__(self, environment):
self.environment = environment
self._values = None
def _compute_values(self):
"""Return a dictionary {keyword: expansion} for this Change.
Derived classes overload this method to add more entries to
the return value. This method is used internally by
get_values(). The return value should always be a new
dictionary."""
return self.environment.get_values()
def get_values(self, **extra_values):
"""Return a dictionary {keyword: expansion} for this Change.
Return a dictionary mapping keywords to the values that they
should be expanded to for this Change (used when interpolating
template strings). If any keyword arguments are supplied, add
those to the return value as well. The return value is always
a new dictionary."""
if self._values is None:
self._values = self._compute_values()
values = self._values.copy()
if extra_values:
values.update(extra_values)
return values
def expand(self, template, **extra_values):
"""Expand template.
Expand the template (which should be a string) using string
interpolation of the values for this Change. If any keyword
arguments are provided, also include those in the keywords
available for interpolation."""
return template % self.get_values(**extra_values)
def expand_lines(self, template, **extra_values):
"""Break template into lines and expand each line."""
values = self.get_values(**extra_values)
for line in template.splitlines(True):
yield line % values
def expand_header_lines(self, template, **extra_values):
"""Break template into lines and expand each line as an RFC 2822 header.
Encode values and split up lines that are too long. Silently
skip lines that contain references to unknown variables."""
values = self.get_values(**extra_values)
for line in template.splitlines():
(name, value) = line.split(':', 1)
try:
value = value % values
except KeyError, e:
if DEBUG:
self.environment.log_warning(
'Warning: unknown variable %r in the following line; line skipped:\n'
' %s\n'
% (e.args[0], line,)
)
else:
if name.lower() in ADDR_HEADERS:
value = addr_header_encode(value, name)
else:
value = header_encode(value, name)
for splitline in ('%s: %s\n' % (name, value)).splitlines(True):
yield splitline
def generate_email_header(self):
"""Generate the RFC 2822 email headers for this Change, a line at a time.
The output should not include the trailing blank line."""
raise NotImplementedError()
def generate_email_intro(self):
"""Generate the email intro for this Change, a line at a time.
The output will be used as the standard boilerplate at the top
of the email body."""
raise NotImplementedError()
def generate_email_body(self):
"""Generate the main part of the email body, a line at a time.
The text in the body might be truncated after a specified
number of lines (see multimailhook.emailmaxlines)."""
raise NotImplementedError()
def generate_email_footer(self):
"""Generate the footer of the email, a line at a time.
The footer is always included, irrespective of
multimailhook.emailmaxlines."""
raise NotImplementedError()
def generate_email(self, push, body_filter=None, extra_header_values={}):
"""Generate an email describing this change.
Iterate over the lines (including the header lines) of an
email describing this change. If body_filter is not None,
then use it to filter the lines that are intended for the
email body.
The extra_header_values field is received as a dict and not as
**kwargs, to allow passing other keyword arguments in the
future (e.g. passing extra values to generate_email_intro()"""
for line in self.generate_email_header(**extra_header_values):
yield line
yield '\n'
for line in self.generate_email_intro():
yield line
body = self.generate_email_body(push)
if body_filter is not None:
body = body_filter(body)
for line in body:
yield line
for line in self.generate_email_footer():
yield line
class Revision(Change):
"""A Change consisting of a single git commit."""
CC_RE = re.compile(r'^\s*C[Cc]:\s*(?P<to>[^#]+@[^\s#]*)\s*(#.*)?$')
def __init__(self, reference_change, rev, num, tot):
Change.__init__(self, reference_change.environment)
self.reference_change = reference_change
self.rev = rev
self.change_type = self.reference_change.change_type
self.refname = self.reference_change.refname
self.num = num
self.tot = tot
self.author = read_git_output(['log', '--no-walk', '--format=%aN <%aE>', self.rev.sha1])
self.recipients = self.environment.get_revision_recipients(self)
self.cc_recipients = ''
if self.environment.get_scancommitforcc():
self.cc_recipients = ', '.join(to.strip() for to in self._cc_recipients())
if self.cc_recipients:
self.environment.log_msg(
'Add %s to CC for %s\n' % (self.cc_recipients, self.rev.sha1))
def _cc_recipients(self):
cc_recipients = []
message = read_git_output(['log', '--no-walk', '--format=%b', self.rev.sha1])
lines = message.strip().split('\n')
for line in lines:
m = re.match(self.CC_RE, line)
if m:
cc_recipients.append(m.group('to'))
return cc_recipients
def _compute_values(self):
values = Change._compute_values(self)
oneline = read_git_output(
['log', '--format=%s', '--no-walk', self.rev.sha1]
)
values['rev'] = self.rev.sha1
values['rev_short'] = self.rev.short
values['change_type'] = self.change_type
values['refname'] = self.refname
values['short_refname'] = self.reference_change.short_refname
values['refname_type'] = self.reference_change.refname_type
values['reply_to_msgid'] = self.reference_change.msgid
values['num'] = self.num
values['tot'] = self.tot
values['recipients'] = self.recipients
if self.cc_recipients:
values['cc_recipients'] = self.cc_recipients
values['oneline'] = oneline
values['author'] = self.author
reply_to = self.environment.get_reply_to_commit(self)
if reply_to:
values['reply_to'] = reply_to
return values
def generate_email_header(self, **extra_values):
for line in self.expand_header_lines(
REVISION_HEADER_TEMPLATE, **extra_values
):
yield line
def generate_email_intro(self):
for line in self.expand_lines(REVISION_INTRO_TEMPLATE):
yield line
def generate_email_body(self, push):
"""Show this revision."""
return read_git_lines(
['log'] + self.environment.commitlogopts + ['-1', self.rev.sha1],
keepends=True,
)
def generate_email_footer(self):
return self.expand_lines(REVISION_FOOTER_TEMPLATE)
class ReferenceChange(Change):
"""A Change to a Git reference.
An abstract class representing a create, update, or delete of a
Git reference. Derived classes handle specific types of reference
(e.g., tags vs. branches). These classes generate the main
reference change email summarizing the reference change and
whether it caused any any commits to be added or removed.
ReferenceChange objects are usually created using the static
create() method, which has the logic to decide which derived class
to instantiate."""
REF_RE = re.compile(r'^refs\/(?P<area>[^\/]+)\/(?P<shortname>.*)$')
@staticmethod
def create(environment, oldrev, newrev, refname):
"""Return a ReferenceChange object representing the change.
Return an object that represents the type of change that is being
made. oldrev and newrev should be SHA1s or ZEROS."""
old = GitObject(oldrev)
new = GitObject(newrev)
rev = new or old
# The revision type tells us what type the commit is, combined with
# the location of the ref we can decide between
# - working branch
# - tracking branch
# - unannotated tag
# - annotated tag
m = ReferenceChange.REF_RE.match(refname)
if m:
area = m.group('area')
short_refname = m.group('shortname')
else:
area = ''
short_refname = refname
if rev.type == 'tag':
# Annotated tag:
klass = AnnotatedTagChange
elif rev.type == 'commit':
if area == 'tags':
# Non-annotated tag:
klass = NonAnnotatedTagChange
elif area == 'heads':
# Branch:
klass = BranchChange
elif area == 'remotes':
# Tracking branch:
environment.log_warning(
'*** Push-update of tracking branch %r\n'
'*** - incomplete email generated.\n'
% (refname,)
)
klass = OtherReferenceChange
else:
# Some other reference namespace:
environment.log_warning(
'*** Push-update of strange reference %r\n'
'*** - incomplete email generated.\n'
% (refname,)
)
klass = OtherReferenceChange
else:
# Anything else (is there anything else?)
environment.log_warning(
'*** Unknown type of update to %r (%s)\n'
'*** - incomplete email generated.\n'
% (refname, rev.type,)
)
klass = OtherReferenceChange
return klass(
environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
def __init__(self, environment, refname, short_refname, old, new, rev):
Change.__init__(self, environment)
self.change_type = {
(False, True): 'create',
(True, True): 'update',
(True, False): 'delete',
}[bool(old), bool(new)]
self.refname = refname
self.short_refname = short_refname
self.old = old
self.new = new
self.rev = rev
self.msgid = make_msgid()
self.diffopts = environment.diffopts
self.graphopts = environment.graphopts
self.logopts = environment.logopts
self.commitlogopts = environment.commitlogopts
self.showgraph = environment.refchange_showgraph
self.showlog = environment.refchange_showlog
self.header_template = REFCHANGE_HEADER_TEMPLATE
self.intro_template = REFCHANGE_INTRO_TEMPLATE
self.footer_template = FOOTER_TEMPLATE
def _compute_values(self):
values = Change._compute_values(self)
values['change_type'] = self.change_type
values['refname_type'] = self.refname_type
values['refname'] = self.refname
values['short_refname'] = self.short_refname
values['msgid'] = self.msgid
values['recipients'] = self.recipients
values['oldrev'] = str(self.old)
values['oldrev_short'] = self.old.short
values['newrev'] = str(self.new)
values['newrev_short'] = self.new.short
if self.old:
values['oldrev_type'] = self.old.type
if self.new:
values['newrev_type'] = self.new.type
reply_to = self.environment.get_reply_to_refchange(self)
if reply_to:
values['reply_to'] = reply_to
return values
def send_single_combined_email(self, known_added_sha1s):
"""Determine if a combined refchange/revision email should be sent
If there is only a single new (non-merge) commit added by a
change, it is useful to combine the ReferenceChange and
Revision emails into one. In such a case, return the single
revision; otherwise, return None.
This method is overridden in BranchChange."""
return None
def generate_combined_email(self, push, revision, body_filter=None, extra_header_values={}):
"""Generate an email describing this change AND specified revision.
Iterate over the lines (including the header lines) of an
email describing this change. If body_filter is not None,
then use it to filter the lines that are intended for the
email body.
The extra_header_values field is received as a dict and not as
**kwargs, to allow passing other keyword arguments in the
future (e.g. passing extra values to generate_email_intro()
This method is overridden in BranchChange."""
raise NotImplementedError
def get_subject(self):
template = {
'create': REF_CREATED_SUBJECT_TEMPLATE,
'update': REF_UPDATED_SUBJECT_TEMPLATE,
'delete': REF_DELETED_SUBJECT_TEMPLATE,
}[self.change_type]
return self.expand(template)
def generate_email_header(self, **extra_values):
if 'subject' not in extra_values:
extra_values['subject'] = self.get_subject()
for line in self.expand_header_lines(
self.header_template, **extra_values
):
yield line
def generate_email_intro(self):
for line in self.expand_lines(self.intro_template):
yield line
def generate_email_body(self, push):
"""Call the appropriate body-generation routine.
Call one of generate_create_summary() /
generate_update_summary() / generate_delete_summary()."""
change_summary = {
'create': self.generate_create_summary,
'delete': self.generate_delete_summary,
'update': self.generate_update_summary,
}[self.change_type](push)
for line in change_summary:
yield line
for line in self.generate_revision_change_summary(push):
yield line
def generate_email_footer(self):
return self.expand_lines(self.footer_template)
def generate_revision_change_graph(self, push):
if self.showgraph:
args = ['--graph'] + self.graphopts
for newold in ('new', 'old'):
has_newold = False
spec = push.get_commits_spec(newold, self)
for line in git_log(spec, args=args, keepends=True):
if not has_newold:
has_newold = True
yield '\n'
yield 'Graph of %s commits:\n\n' % (
{'new': 'new', 'old': 'discarded'}[newold],)
yield ' ' + line
if has_newold:
yield '\n'
def generate_revision_change_log(self, new_commits_list):
if self.showlog:
yield '\n'
yield 'Detailed log of new commits:\n\n'
for line in read_git_lines(
['log', '--no-walk']
+ self.logopts
+ new_commits_list
+ ['--'],
keepends=True,
):
yield line
def generate_new_revision_summary(self, tot, new_commits_list, push):
for line in self.expand_lines(NEW_REVISIONS_TEMPLATE, tot=tot):
yield line
for line in self.generate_revision_change_graph(push):
yield line
for line in self.generate_revision_change_log(new_commits_list):
yield line
def generate_revision_change_summary(self, push):
"""Generate a summary of the revisions added/removed by this change."""
if self.new.commit_sha1 and not self.old.commit_sha1:
# A new reference was created. List the new revisions
# brought by the new reference (i.e., those revisions that
# were not in the repository before this reference
# change).
sha1s = list(push.get_new_commits(self))
sha1s.reverse()
tot = len(sha1s)
new_revisions = [
Revision(self, GitObject(sha1), num=i + 1, tot=tot)
for (i, sha1) in enumerate(sha1s)
]
if new_revisions:
yield self.expand('This %(refname_type)s includes the following new commits:\n')
yield '\n'
for r in new_revisions:
(sha1, subject) = r.rev.get_summary()
yield r.expand(
BRIEF_SUMMARY_TEMPLATE, action='new', text=subject,
)
yield '\n'
for line in self.generate_new_revision_summary(
tot, [r.rev.sha1 for r in new_revisions], push):
yield line
else:
for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE):
yield line
elif self.new.commit_sha1 and self.old.commit_sha1:
# A reference was changed to point at a different commit.
# List the revisions that were removed and/or added *from
# that reference* by this reference change, along with a
# diff between the trees for its old and new values.
# List of the revisions that were added to the branch by
# this update. Note this list can include revisions that
# have already had notification emails; we want such
# revisions in the summary even though we will not send
# new notification emails for them.
adds = list(generate_summaries(
'--topo-order', '--reverse', '%s..%s'
% (self.old.commit_sha1, self.new.commit_sha1,)
))
# List of the revisions that were removed from the branch
# by this update. This will be empty except for
# non-fast-forward updates.
discards = list(generate_summaries(
'%s..%s' % (self.new.commit_sha1, self.old.commit_sha1,)
))
if adds:
new_commits_list = push.get_new_commits(self)
else:
new_commits_list = []
new_commits = CommitSet(new_commits_list)
if discards:
discarded_commits = CommitSet(push.get_discarded_commits(self))
else:
discarded_commits = CommitSet([])
if discards and adds:
for (sha1, subject) in discards:
if sha1 in discarded_commits:
action = 'discards'
else:
action = 'omits'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
for (sha1, subject) in adds:
if sha1 in new_commits:
action = 'new'
else:
action = 'adds'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
yield '\n'
for line in self.expand_lines(NON_FF_TEMPLATE):
yield line
elif discards:
for (sha1, subject) in discards:
if sha1 in discarded_commits:
action = 'discards'
else:
action = 'omits'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
yield '\n'
for line in self.expand_lines(REWIND_ONLY_TEMPLATE):
yield line
elif adds:
(sha1, subject) = self.old.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='from',
rev_short=sha1, text=subject,
)
for (sha1, subject) in adds:
if sha1 in new_commits:
action = 'new'
else:
action = 'adds'
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action=action,
rev_short=sha1, text=subject,
)
yield '\n'
if new_commits:
for line in self.generate_new_revision_summary(
len(new_commits), new_commits_list, push):
yield line
else:
for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE):
yield line
for line in self.generate_revision_change_graph(push):
yield line
# The diffstat is shown from the old revision to the new
# revision. This is to show the truth of what happened in
# this change. There's no point showing the stat from the
# base to the new revision because the base is effectively a
# random revision at this point - the user will be interested
# in what this revision changed - including the undoing of
# previous revisions in the case of non-fast-forward updates.
yield '\n'
yield 'Summary of changes:\n'
for line in read_git_lines(
['diff-tree']
+ self.diffopts
+ ['%s..%s' % (self.old.commit_sha1, self.new.commit_sha1,)],
keepends=True,
):
yield line
elif self.old.commit_sha1 and not self.new.commit_sha1:
# A reference was deleted. List the revisions that were
# removed from the repository by this reference change.
sha1s = list(push.get_discarded_commits(self))
tot = len(sha1s)
discarded_revisions = [
Revision(self, GitObject(sha1), num=i + 1, tot=tot)
for (i, sha1) in enumerate(sha1s)
]
if discarded_revisions:
for line in self.expand_lines(DISCARDED_REVISIONS_TEMPLATE):
yield line
yield '\n'
for r in discarded_revisions:
(sha1, subject) = r.rev.get_summary()
yield r.expand(
BRIEF_SUMMARY_TEMPLATE, action='discards', text=subject,
)
for line in self.generate_revision_change_graph(push):
yield line
else:
for line in self.expand_lines(NO_DISCARDED_REVISIONS_TEMPLATE):
yield line
elif not self.old.commit_sha1 and not self.new.commit_sha1:
for line in self.expand_lines(NON_COMMIT_UPDATE_TEMPLATE):
yield line
def generate_create_summary(self, push):
"""Called for the creation of a reference."""
# This is a new reference and so oldrev is not valid
(sha1, subject) = self.new.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='at',
rev_short=sha1, text=subject,
)
yield '\n'
def generate_update_summary(self, push):
"""Called for the change of a pre-existing branch."""
return iter([])
def generate_delete_summary(self, push):
"""Called for the deletion of any type of reference."""
(sha1, subject) = self.old.get_summary()
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='was',
rev_short=sha1, text=subject,
)
yield '\n'
class BranchChange(ReferenceChange):
refname_type = 'branch'
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
self._single_revision = None
def send_single_combined_email(self, known_added_sha1s):
if not self.environment.combine_when_single_commit:
return None
# In the sadly-all-too-frequent usecase of people pushing only
# one of their commits at a time to a repository, users feel
# the reference change summary emails are noise rather than
# important signal. This is because, in this particular
# usecase, there is a reference change summary email for each
# new commit, and all these summaries do is point out that
# there is one new commit (which can readily be inferred by
# the existence of the individual revision email that is also
# sent). In such cases, our users prefer there to be a combined
# reference change summary/new revision email.
#
# So, if the change is an update and it doesn't discard any
# commits, and it adds exactly one non-merge commit (gerrit
# forces a workflow where every commit is individually merged
# and the git-multimail hook fired off for just this one
# change), then we send a combined refchange/revision email.
try:
# If this change is a reference update that doesn't discard
# any commits...
if self.change_type != 'update':
return None
if read_git_lines(
['merge-base', self.old.sha1, self.new.sha1]
) != [self.old.sha1]:
return None
# Check if this update introduced exactly one non-merge
# commit:
def split_line(line):
"""Split line into (sha1, [parent,...])."""
words = line.split()
return (words[0], words[1:])
# Get the new commits introduced by the push as a list of
# (sha1, [parent,...])
new_commits = [
split_line(line)
for line in read_git_lines(
[
'log', '-3', '--format=%H %P',
'%s..%s' % (self.old.sha1, self.new.sha1),
]
)
]
if not new_commits:
return None
# If the newest commit is a merge, save it for a later check
# but otherwise ignore it
merge = None
tot = len(new_commits)
if len(new_commits[0][1]) > 1:
merge = new_commits[0][0]
del new_commits[0]
# Our primary check: we can't combine if more than one commit
# is introduced. We also currently only combine if the new
# commit is a non-merge commit, though it may make sense to
# combine if it is a merge as well.
if not (
len(new_commits) == 1
and len(new_commits[0][1]) == 1
and new_commits[0][0] in known_added_sha1s
):
return None
# We do not want to combine revision and refchange emails if
# those go to separate locations.
rev = Revision(self, GitObject(new_commits[0][0]), 1, tot)
if rev.recipients != self.recipients:
return None
# We ignored the newest commit if it was just a merge of the one
# commit being introduced. But we don't want to ignore that
# merge commit it it involved conflict resolutions. Check that.
if merge and merge != read_git_output(['diff-tree', '--cc', merge]):
return None
# We can combine the refchange and one new revision emails
# into one. Return the Revision that a combined email should
# be sent about.
return rev
except CommandError:
# Cannot determine number of commits in old..new or new..old;
# don't combine reference/revision emails:
return None
def generate_combined_email(self, push, revision, body_filter=None, extra_header_values={}):
values = revision.get_values()
if extra_header_values:
values.update(extra_header_values)
if 'subject' not in extra_header_values:
values['subject'] = self.expand(COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE, **values)
self._single_revision = revision
self.header_template = COMBINED_HEADER_TEMPLATE
self.intro_template = COMBINED_INTRO_TEMPLATE
self.footer_template = COMBINED_FOOTER_TEMPLATE
for line in self.generate_email(push, body_filter, values):
yield line
def generate_email_body(self, push):
'''Call the appropriate body generation routine.
If this is a combined refchange/revision email, the special logic
for handling this combined email comes from this function. For
other cases, we just use the normal handling.'''
# If self._single_revision isn't set; don't override
if not self._single_revision:
for line in super(BranchChange, self).generate_email_body(push):
yield line
return
# This is a combined refchange/revision email; we first provide
# some info from the refchange portion, and then call the revision
# generate_email_body function to handle the revision portion.
adds = list(generate_summaries(
'--topo-order', '--reverse', '%s..%s'
% (self.old.commit_sha1, self.new.commit_sha1,)
))
yield self.expand("The following commit(s) were added to %(refname)s by this push:\n")
for (sha1, subject) in adds:
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='new',
rev_short=sha1, text=subject,
)
yield self._single_revision.rev.short + " is described below\n"
yield '\n'
for line in self._single_revision.generate_email_body(push):
yield line
class AnnotatedTagChange(ReferenceChange):
refname_type = 'annotated tag'
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_announce_recipients(self)
self.show_shortlog = environment.announce_show_shortlog
ANNOTATED_TAG_FORMAT = (
'%(*objectname)\n'
'%(*objecttype)\n'
'%(taggername)\n'
'%(taggerdate)'
)
def describe_tag(self, push):
"""Describe the new value of an annotated tag."""
# Use git for-each-ref to pull out the individual fields from
# the tag
[tagobject, tagtype, tagger, tagged] = read_git_lines(
['for-each-ref', '--format=%s' % (self.ANNOTATED_TAG_FORMAT,), self.refname],
)
yield self.expand(
BRIEF_SUMMARY_TEMPLATE, action='tagging',
rev_short=tagobject, text='(%s)' % (tagtype,),
)
if tagtype == 'commit':
# If the tagged object is a commit, then we assume this is a
# release, and so we calculate which tag this tag is
# replacing
try:
prevtag = read_git_output(['describe', '--abbrev=0', '%s^' % (self.new,)])
except CommandError:
prevtag = None
if prevtag:
yield ' replaces %s\n' % (prevtag,)
else:
prevtag = None
yield ' length %s bytes\n' % (read_git_output(['cat-file', '-s', tagobject]),)
yield ' tagged by %s\n' % (tagger,)
yield ' on %s\n' % (tagged,)
yield '\n'
# Show the content of the tag message; this might contain a
# change log or release notes so is worth displaying.
yield LOGBEGIN
contents = list(read_git_lines(['cat-file', 'tag', self.new.sha1], keepends=True))
contents = contents[contents.index('\n') + 1:]
if contents and contents[-1][-1:] != '\n':
contents.append('\n')
for line in contents:
yield line
if self.show_shortlog and tagtype == 'commit':
# Only commit tags make sense to have rev-list operations
# performed on them
yield '\n'
if prevtag:
# Show changes since the previous release
revlist = read_git_output(
['rev-list', '--pretty=short', '%s..%s' % (prevtag, self.new,)],
keepends=True,
)
else:
# No previous tag, show all the changes since time
# began
revlist = read_git_output(
['rev-list', '--pretty=short', '%s' % (self.new,)],
keepends=True,
)
for line in read_git_lines(['shortlog'], input=revlist, keepends=True):
yield line
yield LOGEND
yield '\n'
def generate_create_summary(self, push):
"""Called for the creation of an annotated tag."""
for line in self.expand_lines(TAG_CREATED_TEMPLATE):
yield line
for line in self.describe_tag(push):
yield line
def generate_update_summary(self, push):
"""Called for the update of an annotated tag.
This is probably a rare event and may not even be allowed."""
for line in self.expand_lines(TAG_UPDATED_TEMPLATE):
yield line
for line in self.describe_tag(push):
yield line
def generate_delete_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_DELETED_TEMPLATE):
yield line
yield self.expand(' tag was %(oldrev_short)s\n')
yield '\n'
class NonAnnotatedTagChange(ReferenceChange):
refname_type = 'tag'
def __init__(self, environment, refname, short_refname, old, new, rev):
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=short_refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
def generate_create_summary(self, push):
"""Called for the creation of an annotated tag."""
for line in self.expand_lines(TAG_CREATED_TEMPLATE):
yield line
def generate_update_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_UPDATED_TEMPLATE):
yield line
def generate_delete_summary(self, push):
"""Called when a non-annotated reference is updated."""
for line in self.expand_lines(TAG_DELETED_TEMPLATE):
yield line
for line in ReferenceChange.generate_delete_summary(self, push):
yield line
class OtherReferenceChange(ReferenceChange):
refname_type = 'reference'
def __init__(self, environment, refname, short_refname, old, new, rev):
# We use the full refname as short_refname, because otherwise
# the full name of the reference would not be obvious from the
# text of the email.
ReferenceChange.__init__(
self, environment,
refname=refname, short_refname=refname,
old=old, new=new, rev=rev,
)
self.recipients = environment.get_refchange_recipients(self)
class Mailer(object):
"""An object that can send emails."""
def send(self, lines, to_addrs):
"""Send an email consisting of lines.
lines must be an iterable over the lines constituting the
header and body of the email. to_addrs is a list of recipient
addresses (can be needed even if lines already contains a
"To:" field). It can be either a string (comma-separated list
of email addresses) or a Python list of individual email
addresses.
"""
raise NotImplementedError()
class SendMailer(Mailer):
"""Send emails using 'sendmail -oi -t'."""
SENDMAIL_CANDIDATES = [
'/usr/sbin/sendmail',
'/usr/lib/sendmail',
]
@staticmethod
def find_sendmail():
for path in SendMailer.SENDMAIL_CANDIDATES:
if os.access(path, os.X_OK):
return path
else:
raise ConfigurationException(
'No sendmail executable found. '
'Try setting multimailhook.sendmailCommand.'
)
def __init__(self, command=None, envelopesender=None):
"""Construct a SendMailer instance.
command should be the command and arguments used to invoke
sendmail, as a list of strings. If an envelopesender is
provided, it will also be passed to the command, via '-f
envelopesender'."""
if command:
self.command = command[:]
else:
self.command = [self.find_sendmail(), '-oi', '-t']
if envelopesender:
self.command.extend(['-f', envelopesender])
def send(self, lines, to_addrs):
try:
p = subprocess.Popen(self.command, stdin=subprocess.PIPE)
except OSError, e:
sys.stderr.write(
'*** Cannot execute command: %s\n' % ' '.join(self.command)
+ '*** %s\n' % str(e)
+ '*** Try setting multimailhook.mailer to "smtp"\n'
'*** to send emails without using the sendmail command.\n'
)
sys.exit(1)
try:
p.stdin.writelines(lines)
except Exception, e:
sys.stderr.write(
'*** Error while generating commit email\n'
'*** - mail sending aborted.\n'
)
try:
# subprocess.terminate() is not available in Python 2.4
p.terminate()
except AttributeError:
pass
raise e
else:
p.stdin.close()
retcode = p.wait()
if retcode:
raise CommandError(self.command, retcode)
class SMTPMailer(Mailer):
"""Send emails using Python's smtplib."""
def __init__(self, envelopesender, smtpserver,
smtpservertimeout=10.0, smtpserverdebuglevel=0,
smtpencryption='none',
smtpuser='', smtppass='',
):
if not envelopesender:
sys.stderr.write(
'fatal: git_multimail: cannot use SMTPMailer without a sender address.\n'
'please set either multimailhook.envelopeSender or user.email\n'
)
sys.exit(1)
if smtpencryption == 'ssl' and not (smtpuser and smtppass):
raise ConfigurationException(
'Cannot use SMTPMailer with security option ssl '
'without options username and password.'
)
self.envelopesender = envelopesender
self.smtpserver = smtpserver
self.smtpservertimeout = smtpservertimeout
self.smtpserverdebuglevel = smtpserverdebuglevel
self.security = smtpencryption
self.username = smtpuser
self.password = smtppass
try:
def call(klass, server, timeout):
try:
return klass(server, timeout=timeout)
except TypeError:
# Old Python versions do not have timeout= argument.
return klass(server)
if self.security == 'none':
self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout)
elif self.security == 'ssl':
self.smtp = call(smtplib.SMTP_SSL, self.smtpserver, timeout=self.smtpservertimeout)
elif self.security == 'tls':
if ':' not in self.smtpserver:
self.smtpserver += ':587' # default port for TLS
self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout)
self.smtp.ehlo()
self.smtp.starttls()
self.smtp.ehlo()
else:
sys.stdout.write('*** Error: Control reached an invalid option. ***')
sys.exit(1)
if self.smtpserverdebuglevel > 0:
sys.stdout.write(
"*** Setting debug on for SMTP server connection (%s) ***\n"
% self.smtpserverdebuglevel)
self.smtp.set_debuglevel(self.smtpserverdebuglevel)
except Exception, e:
sys.stderr.write(
'*** Error establishing SMTP connection to %s ***\n'
% self.smtpserver)
sys.stderr.write('*** %s\n' % str(e))
sys.exit(1)
def __del__(self):
if hasattr(self, 'smtp'):
self.smtp.quit()
def send(self, lines, to_addrs):
try:
if self.username or self.password:
sys.stderr.write("*** Authenticating as %s ***\n" % self.username)
self.smtp.login(self.username, self.password)
msg = ''.join(lines)
# turn comma-separated list into Python list if needed.
if isinstance(to_addrs, basestring):
to_addrs = [email for (name, email) in getaddresses([to_addrs])]
self.smtp.sendmail(self.envelopesender, to_addrs, msg)
except Exception, e:
sys.stderr.write('*** Error sending email ***\n')
sys.stderr.write('*** %s\n' % str(e))
self.smtp.quit()
sys.exit(1)
class OutputMailer(Mailer):
"""Write emails to an output stream, bracketed by lines of '=' characters.
This is intended for debugging purposes."""
SEPARATOR = '=' * 75 + '\n'
def __init__(self, f):
self.f = f
def send(self, lines, to_addrs):
self.f.write(self.SEPARATOR)
self.f.writelines(lines)
self.f.write(self.SEPARATOR)
def get_git_dir():
"""Determine GIT_DIR.
Determine GIT_DIR either from the GIT_DIR environment variable or
from the working directory, using Git's usual rules."""
try:
return read_git_output(['rev-parse', '--git-dir'])
except CommandError:
sys.stderr.write('fatal: git_multimail: not in a git directory\n')
sys.exit(1)
class Environment(object):
"""Describes the environment in which the push is occurring.
An Environment object encapsulates information about the local
environment. For example, it knows how to determine:
* the name of the repository to which the push occurred
* what user did the push
* what users want to be informed about various types of changes.
An Environment object is expected to have the following methods:
get_repo_shortname()
Return a short name for the repository, for display
purposes.
get_repo_path()
Return the absolute path to the Git repository.
get_emailprefix()
Return a string that will be prefixed to every email's
subject.
get_pusher()
Return the username of the person who pushed the changes.
This value is used in the email body to indicate who
pushed the change.
get_pusher_email() (may return None)
Return the email address of the person who pushed the
changes. The value should be a single RFC 2822 email
address as a string; e.g., "Joe User <[email protected]>"
if available, otherwise "[email protected]". If set, the
value is used as the Reply-To address for refchange
emails. If it is impossible to determine the pusher's
email, this attribute should be set to None (in which case
no Reply-To header will be output).
get_sender()
Return the address to be used as the 'From' email address
in the email envelope.
get_fromaddr()
Return the 'From' email address used in the email 'From:'
headers. (May be a full RFC 2822 email address like 'Joe
User <[email protected]>'.)
get_administrator()
Return the name and/or email of the repository
administrator. This value is used in the footer as the
person to whom requests to be removed from the
notification list should be sent. Ideally, it should
include a valid email address.
get_reply_to_refchange()
get_reply_to_commit()
Return the address to use in the email "Reply-To" header,
as a string. These can be an RFC 2822 email address, or
None to omit the "Reply-To" header.
get_reply_to_refchange() is used for refchange emails;
get_reply_to_commit() is used for individual commit
emails.
They should also define the following attributes:
announce_show_shortlog (bool)
True iff announce emails should include a shortlog.
refchange_showgraph (bool)
True iff refchanges emails should include a detailed graph.
refchange_showlog (bool)
True iff refchanges emails should include a detailed log.
diffopts (list of strings)
The options that should be passed to 'git diff' for the
summary email. The value should be a list of strings
representing words to be passed to the command.
graphopts (list of strings)
Analogous to diffopts, but contains options passed to
'git log --graph' when generating the detailed graph for
a set of commits (see refchange_showgraph)
logopts (list of strings)
Analogous to diffopts, but contains options passed to
'git log' when generating the detailed log for a set of
commits (see refchange_showlog)
commitlogopts (list of strings)
The options that should be passed to 'git log' for each
commit mail. The value should be a list of strings
representing words to be passed to the command.
quiet (bool)
On success do not write to stderr
stdout (bool)
Write email to stdout rather than emailing. Useful for debugging
combine_when_single_commit (bool)
True if a combined email should be produced when a single
new commit is pushed to a branch, False otherwise.
"""
REPO_NAME_RE = re.compile(r'^(?P<name>.+?)(?:\.git)$')
def __init__(self, osenv=None):
self.osenv = osenv or os.environ
self.announce_show_shortlog = False
self.maxcommitemails = 500
self.diffopts = ['--stat', '--summary', '--find-copies-harder']
self.graphopts = ['--oneline', '--decorate']
self.logopts = []
self.refchange_showgraph = False
self.refchange_showlog = False
self.commitlogopts = ['-C', '--stat', '-p', '--cc']
self.quiet = False
self.stdout = False
self.combine_when_single_commit = True
self.COMPUTED_KEYS = [
'administrator',
'charset',
'emailprefix',
'fromaddr',
'pusher',
'pusher_email',
'repo_path',
'repo_shortname',
'sender',
]
self._values = None
def get_repo_shortname(self):
"""Use the last part of the repo path, with ".git" stripped off if present."""
basename = os.path.basename(os.path.abspath(self.get_repo_path()))
m = self.REPO_NAME_RE.match(basename)
if m:
return m.group('name')
else:
return basename
def get_pusher(self):
raise NotImplementedError()
def get_pusher_email(self):
return None
def get_fromaddr(self):
config = Config('user')
fromname = config.get('name', default='')
fromemail = config.get('email', default='')
if fromemail:
return formataddr([fromname, fromemail])
return self.get_sender()
def get_administrator(self):
return 'the administrator of this repository'
def get_emailprefix(self):
return ''
def get_repo_path(self):
if read_git_output(['rev-parse', '--is-bare-repository']) == 'true':
path = get_git_dir()
else:
path = read_git_output(['rev-parse', '--show-toplevel'])
return os.path.abspath(path)
def get_charset(self):
return CHARSET
def get_values(self):
"""Return a dictionary {keyword: expansion} for this Environment.
This method is called by Change._compute_values(). The keys
in the returned dictionary are available to be used in any of
the templates. The dictionary is created by calling
self.get_NAME() for each of the attributes named in
COMPUTED_KEYS and recording those that do not return None.
The return value is always a new dictionary."""
if self._values is None:
values = {}
for key in self.COMPUTED_KEYS:
value = getattr(self, 'get_%s' % (key,))()
if value is not None:
values[key] = value
self._values = values
return self._values.copy()
def get_refchange_recipients(self, refchange):
"""Return the recipients for notifications about refchange.
Return the list of email addresses to which notifications
about the specified ReferenceChange should be sent."""
raise NotImplementedError()
def get_announce_recipients(self, annotated_tag_change):
"""Return the recipients for notifications about annotated_tag_change.
Return the list of email addresses to which notifications
about the specified AnnotatedTagChange should be sent."""
raise NotImplementedError()
def get_reply_to_refchange(self, refchange):
return self.get_pusher_email()
def get_revision_recipients(self, revision):
"""Return the recipients for messages about revision.
Return the list of email addresses to which notifications
about the specified Revision should be sent. This method
could be overridden, for example, to take into account the
contents of the revision when deciding whom to notify about
it. For example, there could be a scheme for users to express
interest in particular files or subdirectories, and only
receive notification emails for revisions that affecting those
files."""
raise NotImplementedError()
def get_reply_to_commit(self, revision):
return revision.author
def filter_body(self, lines):
"""Filter the lines intended for an email body.
lines is an iterable over the lines that would go into the
email body. Filter it (e.g., limit the number of lines, the
line length, character set, etc.), returning another iterable.
See FilterLinesEnvironmentMixin and MaxlinesEnvironmentMixin
for classes implementing this functionality."""
return lines
def log_msg(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
sys.stderr.write(msg)
def log_warning(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
sys.stderr.write(msg)
def log_error(self, msg):
"""Write the string msg on a log file or on stderr.
Sends the text to stderr by default, override to change the behavior."""
sys.stderr.write(msg)
class ConfigEnvironmentMixin(Environment):
"""A mixin that sets self.config to its constructor's config argument.
This class's constructor consumes the "config" argument.
Mixins that need to inspect the config should inherit from this
class (1) to make sure that "config" is still in the constructor
arguments with its own constructor runs and/or (2) to be sure that
self.config is set after construction."""
def __init__(self, config, **kw):
super(ConfigEnvironmentMixin, self).__init__(**kw)
self.config = config
class ConfigOptionsEnvironmentMixin(ConfigEnvironmentMixin):
"""An Environment that reads most of its information from "git config"."""
def __init__(self, config, **kw):
super(ConfigOptionsEnvironmentMixin, self).__init__(
config=config, **kw
)
for var, cfg in (
('announce_show_shortlog', 'announceshortlog'),
('refchange_showgraph', 'refchangeShowGraph'),
('refchange_showlog', 'refchangeshowlog'),
('quiet', 'quiet'),
('stdout', 'stdout'),
):
val = config.get_bool(cfg)
if val is not None:
setattr(self, var, val)
maxcommitemails = config.get('maxcommitemails')
if maxcommitemails is not None:
try:
self.maxcommitemails = int(maxcommitemails)
except ValueError:
self.log_warning(
'*** Malformed value for multimailhook.maxCommitEmails: %s\n' % maxcommitemails
+ '*** Expected a number. Ignoring.\n'
)
diffopts = config.get('diffopts')
if diffopts is not None:
self.diffopts = shlex.split(diffopts)
graphopts = config.get('graphOpts')
if graphopts is not None:
self.graphopts = shlex.split(graphopts)
logopts = config.get('logopts')
if logopts is not None:
self.logopts = shlex.split(logopts)
commitlogopts = config.get('commitlogopts')
if commitlogopts is not None:
self.commitlogopts = shlex.split(commitlogopts)
reply_to = config.get('replyTo')
self.__reply_to_refchange = config.get('replyToRefchange', default=reply_to)
if (
self.__reply_to_refchange is not None
and self.__reply_to_refchange.lower() == 'author'
):
raise ConfigurationException(
'"author" is not an allowed setting for replyToRefchange'
)
self.__reply_to_commit = config.get('replyToCommit', default=reply_to)
combine = config.get_bool('combineWhenSingleCommit')
if combine is not None:
self.combine_when_single_commit = combine
def get_administrator(self):
return (
self.config.get('administrator')
or self.get_sender()
or super(ConfigOptionsEnvironmentMixin, self).get_administrator()
)
def get_repo_shortname(self):
return (
self.config.get('reponame')
or super(ConfigOptionsEnvironmentMixin, self).get_repo_shortname()
)
def get_emailprefix(self):
emailprefix = self.config.get('emailprefix')
if emailprefix is not None:
emailprefix = emailprefix.strip()
if emailprefix:
return emailprefix + ' '
else:
return ''
else:
return '[%s] ' % (self.get_repo_shortname(),)
def get_sender(self):
return self.config.get('envelopesender')
def get_fromaddr(self):
fromaddr = self.config.get('from')
if fromaddr:
return fromaddr
return super(ConfigOptionsEnvironmentMixin, self).get_fromaddr()
def get_reply_to_refchange(self, refchange):
if self.__reply_to_refchange is None:
return super(ConfigOptionsEnvironmentMixin, self).get_reply_to_refchange(refchange)
elif self.__reply_to_refchange.lower() == 'pusher':
return self.get_pusher_email()
elif self.__reply_to_refchange.lower() == 'none':
return None
else:
return self.__reply_to_refchange
def get_reply_to_commit(self, revision):
if self.__reply_to_commit is None:
return super(ConfigOptionsEnvironmentMixin, self).get_reply_to_commit(revision)
elif self.__reply_to_commit.lower() == 'author':
return revision.author
elif self.__reply_to_commit.lower() == 'pusher':
return self.get_pusher_email()
elif self.__reply_to_commit.lower() == 'none':
return None
else:
return self.__reply_to_commit
def get_scancommitforcc(self):
return self.config.get('scancommitforcc')
class FilterLinesEnvironmentMixin(Environment):
"""Handle encoding and maximum line length of body lines.
emailmaxlinelength (int or None)
The maximum length of any single line in the email body.
Longer lines are truncated at that length with ' [...]'
appended.
strict_utf8 (bool)
If this field is set to True, then the email body text is
expected to be UTF-8. Any invalid characters are
converted to U+FFFD, the Unicode replacement character
(encoded as UTF-8, of course).
"""
def __init__(self, strict_utf8=True, emailmaxlinelength=500, **kw):
super(FilterLinesEnvironmentMixin, self).__init__(**kw)
self.__strict_utf8 = strict_utf8
self.__emailmaxlinelength = emailmaxlinelength
def filter_body(self, lines):
lines = super(FilterLinesEnvironmentMixin, self).filter_body(lines)
if self.__strict_utf8:
lines = (line.decode(ENCODING, 'replace') for line in lines)
# Limit the line length in Unicode-space to avoid
# splitting characters:
if self.__emailmaxlinelength:
lines = limit_linelength(lines, self.__emailmaxlinelength)
lines = (line.encode(ENCODING, 'replace') for line in lines)
elif self.__emailmaxlinelength:
lines = limit_linelength(lines, self.__emailmaxlinelength)
return lines
class ConfigFilterLinesEnvironmentMixin(
ConfigEnvironmentMixin,
FilterLinesEnvironmentMixin,
):
"""Handle encoding and maximum line length based on config."""
def __init__(self, config, **kw):
strict_utf8 = config.get_bool('emailstrictutf8', default=None)
if strict_utf8 is not None:
kw['strict_utf8'] = strict_utf8
emailmaxlinelength = config.get('emailmaxlinelength')
if emailmaxlinelength is not None:
kw['emailmaxlinelength'] = int(emailmaxlinelength)
super(ConfigFilterLinesEnvironmentMixin, self).__init__(
config=config, **kw
)
class MaxlinesEnvironmentMixin(Environment):
"""Limit the email body to a specified number of lines."""
def __init__(self, emailmaxlines, **kw):
super(MaxlinesEnvironmentMixin, self).__init__(**kw)
self.__emailmaxlines = emailmaxlines
def filter_body(self, lines):
lines = super(MaxlinesEnvironmentMixin, self).filter_body(lines)
if self.__emailmaxlines:
lines = limit_lines(lines, self.__emailmaxlines)
return lines
class ConfigMaxlinesEnvironmentMixin(
ConfigEnvironmentMixin,
MaxlinesEnvironmentMixin,
):
"""Limit the email body to the number of lines specified in config."""
def __init__(self, config, **kw):
emailmaxlines = int(config.get('emailmaxlines', default='0'))
super(ConfigMaxlinesEnvironmentMixin, self).__init__(
config=config,
emailmaxlines=emailmaxlines,
**kw
)
class FQDNEnvironmentMixin(Environment):
"""A mixin that sets the host's FQDN to its constructor argument."""
def __init__(self, fqdn, **kw):
super(FQDNEnvironmentMixin, self).__init__(**kw)
self.COMPUTED_KEYS += ['fqdn']
self.__fqdn = fqdn
def get_fqdn(self):
"""Return the fully-qualified domain name for this host.
Return None if it is unavailable or unwanted."""
return self.__fqdn
class ConfigFQDNEnvironmentMixin(
ConfigEnvironmentMixin,
FQDNEnvironmentMixin,
):
"""Read the FQDN from the config."""
def __init__(self, config, **kw):
fqdn = config.get('fqdn')
super(ConfigFQDNEnvironmentMixin, self).__init__(
config=config,
fqdn=fqdn,
**kw
)
class ComputeFQDNEnvironmentMixin(FQDNEnvironmentMixin):
"""Get the FQDN by calling socket.getfqdn()."""
def __init__(self, **kw):
super(ComputeFQDNEnvironmentMixin, self).__init__(
fqdn=socket.getfqdn(),
**kw
)
class PusherDomainEnvironmentMixin(ConfigEnvironmentMixin):
"""Deduce pusher_email from pusher by appending an emaildomain."""
def __init__(self, **kw):
super(PusherDomainEnvironmentMixin, self).__init__(**kw)
self.__emaildomain = self.config.get('emaildomain')
def get_pusher_email(self):
if self.__emaildomain:
# Derive the pusher's full email address in the default way:
return '%s@%s' % (self.get_pusher(), self.__emaildomain)
else:
return super(PusherDomainEnvironmentMixin, self).get_pusher_email()
class StaticRecipientsEnvironmentMixin(Environment):
"""Set recipients statically based on constructor parameters."""
def __init__(
self,
refchange_recipients, announce_recipients, revision_recipients, scancommitforcc,
**kw
):
super(StaticRecipientsEnvironmentMixin, self).__init__(**kw)
# The recipients for various types of notification emails, as
# RFC 2822 email addresses separated by commas (or the empty
# string if no recipients are configured). Although there is
# a mechanism to choose the recipient lists based on on the
# actual *contents* of the change being reported, we only
# choose based on the *type* of the change. Therefore we can
# compute them once and for all:
if not (refchange_recipients
or announce_recipients
or revision_recipients
or scancommitforcc):
raise ConfigurationException('No email recipients configured!')
self.__refchange_recipients = refchange_recipients
self.__announce_recipients = announce_recipients
self.__revision_recipients = revision_recipients
def get_refchange_recipients(self, refchange):
return self.__refchange_recipients
def get_announce_recipients(self, annotated_tag_change):
return self.__announce_recipients
def get_revision_recipients(self, revision):
return self.__revision_recipients
class ConfigRecipientsEnvironmentMixin(
ConfigEnvironmentMixin,
StaticRecipientsEnvironmentMixin
):
"""Determine recipients statically based on config."""
def __init__(self, config, **kw):
super(ConfigRecipientsEnvironmentMixin, self).__init__(
config=config,
refchange_recipients=self._get_recipients(
config, 'refchangelist', 'mailinglist',
),
announce_recipients=self._get_recipients(
config, 'announcelist', 'refchangelist', 'mailinglist',
),
revision_recipients=self._get_recipients(
config, 'commitlist', 'mailinglist',
),
scancommitforcc=config.get('scancommitforcc'),
**kw
)
def _get_recipients(self, config, *names):
"""Return the recipients for a particular type of message.
Return the list of email addresses to which a particular type
of notification email should be sent, by looking at the config
value for "multimailhook.$name" for each of names. Use the
value from the first name that is configured. The return
value is a (possibly empty) string containing RFC 2822 email
addresses separated by commas. If no configuration could be
found, raise a ConfigurationException."""
for name in names:
retval = config.get_recipients(name)
if retval is not None:
return retval
else:
return ''
class ProjectdescEnvironmentMixin(Environment):
"""Make a "projectdesc" value available for templates.
By default, it is set to the first line of $GIT_DIR/description
(if that file is present and appears to be set meaningfully)."""
def __init__(self, **kw):
super(ProjectdescEnvironmentMixin, self).__init__(**kw)
self.COMPUTED_KEYS += ['projectdesc']
def get_projectdesc(self):
"""Return a one-line descripition of the project."""
git_dir = get_git_dir()
try:
projectdesc = open(os.path.join(git_dir, 'description')).readline().strip()
if projectdesc and not projectdesc.startswith('Unnamed repository'):
return projectdesc
except IOError:
pass
return 'UNNAMED PROJECT'
class GenericEnvironmentMixin(Environment):
def get_pusher(self):
return self.osenv.get('USER', self.osenv.get('USERNAME', 'unknown user'))
class GenericEnvironment(
ProjectdescEnvironmentMixin,
ConfigMaxlinesEnvironmentMixin,
ComputeFQDNEnvironmentMixin,
ConfigFilterLinesEnvironmentMixin,
ConfigRecipientsEnvironmentMixin,
PusherDomainEnvironmentMixin,
ConfigOptionsEnvironmentMixin,
GenericEnvironmentMixin,
Environment,
):
pass
class GitoliteEnvironmentMixin(Environment):
def get_repo_shortname(self):
# The gitolite environment variable $GL_REPO is a pretty good
# repo_shortname (though it's probably not as good as a value
# the user might have explicitly put in his config).
return (
self.osenv.get('GL_REPO', None)
or super(GitoliteEnvironmentMixin, self).get_repo_shortname()
)
def get_pusher(self):
return self.osenv.get('GL_USER', 'unknown user')
def get_fromaddr(self):
GL_USER = self.osenv.get('GL_USER')
if GL_USER is not None:
# Find the path to gitolite.conf. Note that gitolite v3
# did away with the GL_ADMINDIR and GL_CONF environment
# variables (they are now hard-coded).
GL_ADMINDIR = self.osenv.get(
'GL_ADMINDIR',
os.path.expanduser(os.path.join('~', '.gitolite')))
GL_CONF = self.osenv.get(
'GL_CONF',
os.path.join(GL_ADMINDIR, 'conf', 'gitolite.conf'))
if os.path.isfile(GL_CONF):
f = open(GL_CONF, 'rU')
try:
in_user_emails_section = False
re_template = r'^\s*#\s*{}\s*$'
re_begin, re_user, re_end = (
re.compile(re_template.format(x))
for x in (
r'BEGIN\s+USER\s+EMAILS',
re.escape(GL_USER) + r'\s+(.*)',
r'END\s+USER\s+EMAILS',
))
for l in f:
l = l.rstrip('\n')
if not in_user_emails_section:
if re_begin.match(l):
in_user_emails_section = True
continue
if re_end.match(l):
break
m = re_user.match(l)
if m:
return m.group(1)
finally:
f.close()
return super(GitoliteEnvironmentMixin, self).get_fromaddr()
class IncrementalDateTime(object):
"""Simple wrapper to give incremental date/times.
Each call will result in a date/time a second later than the
previous call. This can be used to falsify email headers, to
increase the likelihood that email clients sort the emails
correctly."""
def __init__(self):
self.time = time.time()
def next(self):
formatted = formatdate(self.time, True)
self.time += 1
return formatted
class GitoliteEnvironment(
ProjectdescEnvironmentMixin,
ConfigMaxlinesEnvironmentMixin,
ComputeFQDNEnvironmentMixin,
ConfigFilterLinesEnvironmentMixin,
ConfigRecipientsEnvironmentMixin,
PusherDomainEnvironmentMixin,
ConfigOptionsEnvironmentMixin,
GitoliteEnvironmentMixin,
Environment,
):
pass
class Push(object):
"""Represent an entire push (i.e., a group of ReferenceChanges).
It is easy to figure out what commits were added to a *branch* by
a Reference change:
git rev-list change.old..change.new
or removed from a *branch*:
git rev-list change.new..change.old
But it is not quite so trivial to determine which entirely new
commits were added to the *repository* by a push and which old
commits were discarded by a push. A big part of the job of this
class is to figure out these things, and to make sure that new
commits are only detailed once even if they were added to multiple
references.
The first step is to determine the "other" references--those
unaffected by the current push. They are computed by listing all
references then removing any affected by this push. The results
are stored in Push._other_ref_sha1s.
The commits contained in the repository before this push were
git rev-list other1 other2 other3 ... change1.old change2.old ...
Where "changeN.old" is the old value of one of the references
affected by this push.
The commits contained in the repository after this push are
git rev-list other1 other2 other3 ... change1.new change2.new ...
The commits added by this push are the difference between these
two sets, which can be written
git rev-list \
^other1 ^other2 ... \
^change1.old ^change2.old ... \
change1.new change2.new ...
The commits removed by this push can be computed by
git rev-list \
^other1 ^other2 ... \
^change1.new ^change2.new ... \
change1.old change2.old ...
The last point is that it is possible that other pushes are
occurring simultaneously to this one, so reference values can
change at any time. It is impossible to eliminate all race
conditions, but we reduce the window of time during which problems
can occur by translating reference names to SHA1s as soon as
possible and working with SHA1s thereafter (because SHA1s are
immutable)."""
# A map {(changeclass, changetype): integer} specifying the order
# that reference changes will be processed if multiple reference
# changes are included in a single push. The order is significant
# mostly because new commit notifications are threaded together
# with the first reference change that includes the commit. The
# following order thus causes commits to be grouped with branch
# changes (as opposed to tag changes) if possible.
SORT_ORDER = dict(
(value, i) for (i, value) in enumerate([
(BranchChange, 'update'),
(BranchChange, 'create'),
(AnnotatedTagChange, 'update'),
(AnnotatedTagChange, 'create'),
(NonAnnotatedTagChange, 'update'),
(NonAnnotatedTagChange, 'create'),
(BranchChange, 'delete'),
(AnnotatedTagChange, 'delete'),
(NonAnnotatedTagChange, 'delete'),
(OtherReferenceChange, 'update'),
(OtherReferenceChange, 'create'),
(OtherReferenceChange, 'delete'),
])
)
def __init__(self, changes, ignore_other_refs=False):
self.changes = sorted(changes, key=self._sort_key)
self.__other_ref_sha1s = None
self.__cached_commits_spec = {}
if ignore_other_refs:
self.__other_ref_sha1s = set()
@classmethod
def _sort_key(klass, change):
return (klass.SORT_ORDER[change.__class__, change.change_type], change.refname,)
@property
def _other_ref_sha1s(self):
"""The GitObjects referred to by references unaffected by this push.
"""
if self.__other_ref_sha1s is None:
# The refnames being changed by this push:
updated_refs = set(
change.refname
for change in self.changes
)
# The SHA-1s of commits referred to by all references in this
# repository *except* updated_refs:
sha1s = set()
fmt = (
'%(objectname) %(objecttype) %(refname)\n'
'%(*objectname) %(*objecttype) %(refname)'
)
for line in read_git_lines(
['for-each-ref', '--format=%s' % (fmt,)]):
(sha1, type, name) = line.split(' ', 2)
if sha1 and type == 'commit' and name not in updated_refs:
sha1s.add(sha1)
self.__other_ref_sha1s = sha1s
return self.__other_ref_sha1s
def _get_commits_spec_incl(self, new_or_old, reference_change=None):
"""Get new or old SHA-1 from one or each of the changed refs.
Return a list of SHA-1 commit identifier strings suitable as
arguments to 'git rev-list' (or 'git log' or ...). The
returned identifiers are either the old or new values from one
or all of the changed references, depending on the values of
new_or_old and reference_change.
new_or_old is either the string 'new' or the string 'old'. If
'new', the returned SHA-1 identifiers are the new values from
each changed reference. If 'old', the SHA-1 identifiers are
the old values from each changed reference.
If reference_change is specified and not None, only the new or
old reference from the specified reference is included in the
return value.
This function returns None if there are no matching revisions
(e.g., because a branch was deleted and new_or_old is 'new').
"""
if not reference_change:
incl_spec = sorted(
getattr(change, new_or_old).sha1
for change in self.changes
if getattr(change, new_or_old)
)
if not incl_spec:
incl_spec = None
elif not getattr(reference_change, new_or_old).commit_sha1:
incl_spec = None
else:
incl_spec = [getattr(reference_change, new_or_old).commit_sha1]
return incl_spec
def _get_commits_spec_excl(self, new_or_old):
"""Get exclusion revisions for determining new or discarded commits.
Return a list of strings suitable as arguments to 'git
rev-list' (or 'git log' or ...) that will exclude all
commits that, depending on the value of new_or_old, were
either previously in the repository (useful for determining
which commits are new to the repository) or currently in the
repository (useful for determining which commits were
discarded from the repository).
new_or_old is either the string 'new' or the string 'old'. If
'new', the commits to be excluded are those that were in the
repository before the push. If 'old', the commits to be
excluded are those that are currently in the repository. """
old_or_new = {'old': 'new', 'new': 'old'}[new_or_old]
excl_revs = self._other_ref_sha1s.union(
getattr(change, old_or_new).sha1
for change in self.changes
if getattr(change, old_or_new).type in ['commit', 'tag']
)
return ['^' + sha1 for sha1 in sorted(excl_revs)]
def get_commits_spec(self, new_or_old, reference_change=None):
"""Get rev-list arguments for added or discarded commits.
Return a list of strings suitable as arguments to 'git
rev-list' (or 'git log' or ...) that select those commits
that, depending on the value of new_or_old, are either new to
the repository or were discarded from the repository.
new_or_old is either the string 'new' or the string 'old'. If
'new', the returned list is used to select commits that are
new to the repository. If 'old', the returned value is used
to select the commits that have been discarded from the
repository.
If reference_change is specified and not None, the new or
discarded commits are limited to those that are reachable from
the new or old value of the specified reference.
This function returns None if there are no added (or discarded)
revisions.
"""
key = (new_or_old, reference_change)
if key not in self.__cached_commits_spec:
ret = self._get_commits_spec_incl(new_or_old, reference_change)
if ret is not None:
ret.extend(self._get_commits_spec_excl(new_or_old))
self.__cached_commits_spec[key] = ret
return self.__cached_commits_spec[key]
def get_new_commits(self, reference_change=None):
"""Return a list of commits added by this push.
Return a list of the object names of commits that were added
by the part of this push represented by reference_change. If
reference_change is None, then return a list of *all* commits
added by this push."""
spec = self.get_commits_spec('new', reference_change)
return git_rev_list(spec)
def get_discarded_commits(self, reference_change):
"""Return a list of commits discarded by this push.
Return a list of the object names of commits that were
entirely discarded from the repository by the part of this
push represented by reference_change."""
spec = self.get_commits_spec('old', reference_change)
return git_rev_list(spec)
def send_emails(self, mailer, body_filter=None):
"""Use send all of the notification emails needed for this push.
Use send all of the notification emails (including reference
change emails and commit emails) needed for this push. Send
the emails using mailer. If body_filter is not None, then use
it to filter the lines that are intended for the email
body."""
# The sha1s of commits that were introduced by this push.
# They will be removed from this set as they are processed, to
# guarantee that one (and only one) email is generated for
# each new commit.
unhandled_sha1s = set(self.get_new_commits())
send_date = IncrementalDateTime()
for change in self.changes:
sha1s = []
for sha1 in reversed(list(self.get_new_commits(change))):
if sha1 in unhandled_sha1s:
sha1s.append(sha1)
unhandled_sha1s.remove(sha1)
# Check if we've got anyone to send to
if not change.recipients:
change.environment.log_warning(
'*** no recipients configured so no email will be sent\n'
'*** for %r update %s->%s\n'
% (change.refname, change.old.sha1, change.new.sha1,)
)
else:
if not change.environment.quiet:
change.environment.log_msg(
'Sending notification emails to: %s\n' % (change.recipients,))
extra_values = {'send_date': send_date.next()}
rev = change.send_single_combined_email(sha1s)
if rev:
mailer.send(
change.generate_combined_email(self, rev, body_filter, extra_values),
rev.recipients,
)
# This change is now fully handled; no need to handle
# individual revisions any further.
continue
else:
mailer.send(
change.generate_email(self, body_filter, extra_values),
change.recipients,
)
max_emails = change.environment.maxcommitemails
if max_emails and len(sha1s) > max_emails:
change.environment.log_warning(
'*** Too many new commits (%d), not sending commit emails.\n' % len(sha1s)
+ '*** Try setting multimailhook.maxCommitEmails to a greater value\n'
+ '*** Currently, multimailhook.maxCommitEmails=%d\n' % max_emails
)
return
for (num, sha1) in enumerate(sha1s):
rev = Revision(change, GitObject(sha1), num=num + 1, tot=len(sha1s))
if not rev.recipients and rev.cc_recipients:
change.environment.log_msg('*** Replacing Cc: with To:\n')
rev.recipients = rev.cc_recipients
rev.cc_recipients = None
if rev.recipients:
extra_values = {'send_date': send_date.next()}
mailer.send(
rev.generate_email(self, body_filter, extra_values),
rev.recipients,
)
# Consistency check:
if unhandled_sha1s:
change.environment.log_error(
'ERROR: No emails were sent for the following new commits:\n'
' %s\n'
% ('\n '.join(sorted(unhandled_sha1s)),)
)
def run_as_post_receive_hook(environment, mailer):
changes = []
for line in sys.stdin:
(oldrev, newrev, refname) = line.strip().split(' ', 2)
changes.append(
ReferenceChange.create(environment, oldrev, newrev, refname)
)
push = Push(changes)
push.send_emails(mailer, body_filter=environment.filter_body)
def run_as_update_hook(environment, mailer, refname, oldrev, newrev, force_send=False):
changes = [
ReferenceChange.create(
environment,
read_git_output(['rev-parse', '--verify', oldrev]),
read_git_output(['rev-parse', '--verify', newrev]),
refname,
),
]
push = Push(changes, force_send)
push.send_emails(mailer, body_filter=environment.filter_body)
def choose_mailer(config, environment):
mailer = config.get('mailer', default='sendmail')
if mailer == 'smtp':
smtpserver = config.get('smtpserver', default='localhost')
smtpservertimeout = float(config.get('smtpservertimeout', default=10.0))
smtpserverdebuglevel = int(config.get('smtpserverdebuglevel', default=0))
smtpencryption = config.get('smtpencryption', default='none')
smtpuser = config.get('smtpuser', default='')
smtppass = config.get('smtppass', default='')
mailer = SMTPMailer(
envelopesender=(environment.get_sender() or environment.get_fromaddr()),
smtpserver=smtpserver, smtpservertimeout=smtpservertimeout,
smtpserverdebuglevel=smtpserverdebuglevel,
smtpencryption=smtpencryption,
smtpuser=smtpuser,
smtppass=smtppass,
)
elif mailer == 'sendmail':
command = config.get('sendmailcommand')
if command:
command = shlex.split(command)
mailer = SendMailer(command=command, envelopesender=environment.get_sender())
else:
environment.log_error(
'fatal: multimailhook.mailer is set to an incorrect value: "%s"\n' % mailer
+ 'please use one of "smtp" or "sendmail".\n'
)
sys.exit(1)
return mailer
KNOWN_ENVIRONMENTS = {
'generic': GenericEnvironmentMixin,
'gitolite': GitoliteEnvironmentMixin,
}
def choose_environment(config, osenv=None, env=None, recipients=None):
if not osenv:
osenv = os.environ
environment_mixins = [
ProjectdescEnvironmentMixin,
ConfigMaxlinesEnvironmentMixin,
ComputeFQDNEnvironmentMixin,
ConfigFilterLinesEnvironmentMixin,
PusherDomainEnvironmentMixin,
ConfigOptionsEnvironmentMixin,
]
environment_kw = {
'osenv': osenv,
'config': config,
}
if not env:
env = config.get('environment')
if not env:
if 'GL_USER' in osenv and 'GL_REPO' in osenv:
env = 'gitolite'
else:
env = 'generic'
environment_mixins.append(KNOWN_ENVIRONMENTS[env])
if recipients:
environment_mixins.insert(0, StaticRecipientsEnvironmentMixin)
environment_kw['refchange_recipients'] = recipients
environment_kw['announce_recipients'] = recipients
environment_kw['revision_recipients'] = recipients
environment_kw['scancommitforcc'] = config.get('scancommitforcc')
else:
environment_mixins.insert(0, ConfigRecipientsEnvironmentMixin)
environment_klass = type(
'EffectiveEnvironment',
tuple(environment_mixins) + (Environment,),
{},
)
return environment_klass(**environment_kw)
def main(args):
parser = optparse.OptionParser(
description=__doc__,
usage='%prog [OPTIONS]\n or: %prog [OPTIONS] REFNAME OLDREV NEWREV',
)
parser.add_option(
'--environment', '--env', action='store', type='choice',
choices=['generic', 'gitolite'], default=None,
help=(
'Choose type of environment is in use. Default is taken from '
'multimailhook.environment if set; otherwise "generic".'
),
)
parser.add_option(
'--stdout', action='store_true', default=False,
help='Output emails to stdout rather than sending them.',
)
parser.add_option(
'--recipients', action='store', default=None,
help='Set list of email recipients for all types of emails.',
)
parser.add_option(
'--show-env', action='store_true', default=False,
help=(
'Write to stderr the values determined for the environment '
'(intended for debugging purposes).'
),
)
parser.add_option(
'--force-send', action='store_true', default=False,
help=(
'Force sending refchange email when using as an update hook. '
'This is useful to work around the unreliable new commits '
'detection in this mode.'
),
)
(options, args) = parser.parse_args(args)
config = Config('multimailhook')
try:
environment = choose_environment(
config, osenv=os.environ,
env=options.environment,
recipients=options.recipients,
)
if options.show_env:
sys.stderr.write('Environment values:\n')
for (k, v) in sorted(environment.get_values().items()):
sys.stderr.write(' %s : %r\n' % (k, v))
sys.stderr.write('\n')
if options.stdout or environment.stdout:
mailer = OutputMailer(sys.stdout)
else:
mailer = choose_mailer(config, environment)
# Dual mode: if arguments were specified on the command line, run
# like an update hook; otherwise, run as a post-receive hook.
if args:
if len(args) != 3:
parser.error('Need zero or three non-option arguments')
(refname, oldrev, newrev) = args
run_as_update_hook(environment, mailer, refname, oldrev, newrev, options.force_send)
else:
run_as_post_receive_hook(environment, mailer)
except ConfigurationException, e:
sys.exit(str(e))
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-2.0 | -241,405,530,390,534,080 | 34.677461 | 99 | 0.588625 | false |
mancoast/CPythonPyc_test | fail/340_test_fcntl.py | 96 | 5220 | """Test program for the fcntl C module.
"""
import platform
import os
import struct
import sys
import unittest
from test.support import (verbose, TESTFN, unlink, run_unittest, import_module,
cpython_only)
# Skip test if no fcntl module.
fcntl = import_module('fcntl')
# TODO - Write tests for flock() and lockf().
def get_lockdata():
try:
os.O_LARGEFILE
except AttributeError:
start_len = "ll"
else:
start_len = "qq"
if (sys.platform.startswith(('netbsd', 'freebsd', 'openbsd', 'bsdos'))
or sys.platform == 'darwin'):
if struct.calcsize('l') == 8:
off_t = 'l'
pid_t = 'i'
else:
off_t = 'lxxxx'
pid_t = 'l'
lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0,
fcntl.F_WRLCK, 0)
elif sys.platform.startswith('gnukfreebsd'):
lockdata = struct.pack('qqihhi', 0, 0, 0, fcntl.F_WRLCK, 0, 0)
elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']:
lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
if lockdata:
if verbose:
print('struct.pack: ', repr(lockdata))
return lockdata
lockdata = get_lockdata()
class BadFile:
def __init__(self, fn):
self.fn = fn
def fileno(self):
return self.fn
class TestFcntl(unittest.TestCase):
def setUp(self):
self.f = None
def tearDown(self):
if self.f and not self.f.closed:
self.f.close()
unlink(TESTFN)
def test_fcntl_fileno(self):
# the example from the library docs
self.f = open(TESTFN, 'wb')
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print('Status from fcntl with O_NONBLOCK: ', rv)
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata)
if verbose:
print('String from fcntl with F_SETLKW: ', repr(rv))
self.f.close()
def test_fcntl_file_descriptor(self):
# again, but pass the file rather than numeric descriptor
self.f = open(TESTFN, 'wb')
rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print('Status from fcntl with O_NONBLOCK: ', rv)
rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata)
if verbose:
print('String from fcntl with F_SETLKW: ', repr(rv))
self.f.close()
def test_fcntl_bad_file(self):
with self.assertRaises(ValueError):
fcntl.fcntl(-1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(ValueError):
fcntl.fcntl(BadFile(-1), fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(TypeError):
fcntl.fcntl('spam', fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(TypeError):
fcntl.fcntl(BadFile('spam'), fcntl.F_SETFL, os.O_NONBLOCK)
@cpython_only
def test_fcntl_bad_file_overflow(self):
from _testcapi import INT_MAX, INT_MIN
# Issue 15989
with self.assertRaises(OverflowError):
fcntl.fcntl(INT_MAX + 1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(OverflowError):
fcntl.fcntl(BadFile(INT_MAX + 1), fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(OverflowError):
fcntl.fcntl(INT_MIN - 1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(OverflowError):
fcntl.fcntl(BadFile(INT_MIN - 1), fcntl.F_SETFL, os.O_NONBLOCK)
@unittest.skipIf(
platform.machine().startswith('arm') and platform.system() == 'Linux',
"ARM Linux returns EINVAL for F_NOTIFY DN_MULTISHOT")
def test_fcntl_64_bit(self):
# Issue #1309352: fcntl shouldn't fail when the third arg fits in a
# C 'long' but not in a C 'int'.
try:
cmd = fcntl.F_NOTIFY
# This flag is larger than 2**31 in 64-bit builds
flags = fcntl.DN_MULTISHOT
except AttributeError:
self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable")
fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY)
try:
fcntl.fcntl(fd, cmd, flags)
finally:
os.close(fd)
def test_flock(self):
# Solaris needs readable file for shared lock
self.f = open(TESTFN, 'wb+')
fileno = self.f.fileno()
fcntl.flock(fileno, fcntl.LOCK_SH)
fcntl.flock(fileno, fcntl.LOCK_UN)
fcntl.flock(self.f, fcntl.LOCK_SH | fcntl.LOCK_NB)
fcntl.flock(self.f, fcntl.LOCK_UN)
fcntl.flock(fileno, fcntl.LOCK_EX)
fcntl.flock(fileno, fcntl.LOCK_UN)
self.assertRaises(ValueError, fcntl.flock, -1, fcntl.LOCK_SH)
self.assertRaises(TypeError, fcntl.flock, 'spam', fcntl.LOCK_SH)
@cpython_only
def test_flock_overflow(self):
import _testcapi
self.assertRaises(OverflowError, fcntl.flock, _testcapi.INT_MAX+1,
fcntl.LOCK_SH)
def test_main():
run_unittest(TestFcntl)
if __name__ == '__main__':
test_main()
| gpl-3.0 | 8,376,566,355,159,416,000 | 33.342105 | 81 | 0.587356 | false |
rwaldron/mirovideoconverter3 | mvc/widgets/osx/control.py | 2 | 17828 | # Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
""".control - Controls."""
from AppKit import *
from Foundation import *
from objc import YES, NO, nil
from mvc.widgets import widgetconst
import wrappermap
from .base import Widget
from .helpers import NotificationForwarder
class SizedControl(Widget):
def set_size(self, size):
if size == widgetconst.SIZE_NORMAL:
self.view.cell().setControlSize_(NSRegularControlSize)
font = NSFont.systemFontOfSize_(NSFont.systemFontSize())
self.font_size = NSFont.systemFontSize()
elif size == widgetconst.SIZE_SMALL:
font = NSFont.systemFontOfSize_(NSFont.smallSystemFontSize())
self.view.cell().setControlSize_(NSSmallControlSize)
self.font_size = NSFont.smallSystemFontSize()
else:
self.view.cell().setControlSize_(NSRegularControlSize)
font = NSFont.systemFontOfSize_(NSFont.systemFontSize() * size)
self.font_size = NSFont.systemFontSize() * size
self.view.setFont_(font)
class BaseTextEntry(SizedControl):
"""See https://develop.participatoryculture.org/index.php/WidgetAPI for a description of the API for this class."""
def __init__(self, initial_text=None):
SizedControl.__init__(self)
self.view = self.make_view()
self.font = NSFont.systemFontOfSize_(NSFont.systemFontSize())
self.view.setFont_(self.font)
self.view.setEditable_(YES)
self.view.cell().setScrollable_(YES)
self.view.cell().setLineBreakMode_(NSLineBreakByClipping)
self.sizer_cell = self.view.cell().copy()
if initial_text:
self.view.setStringValue_(initial_text)
self.set_width(len(initial_text))
else:
self.set_width(10)
self.notifications = NotificationForwarder.create(self.view)
self.create_signal('activate')
self.create_signal('changed')
self.create_signal('validate')
def focus(self):
if self.view.window() is not None:
self.view.window().makeFirstResponder_(self.view)
def start_editing(self, initial_text):
self.set_text(initial_text)
self.focus()
# unselect the text and locate the cursor at the end of the entry
text_field = self.view.window().fieldEditor_forObject_(YES, self.view)
text_field.setSelectedRange_(NSMakeRange(len(self.get_text()), 0))
def viewport_created(self):
SizedControl.viewport_created(self)
self.notifications.connect(self.on_changed, 'NSControlTextDidChangeNotification')
self.notifications.connect(self.on_end_editing,
'NSControlTextDidEndEditingNotification')
def remove_viewport(self):
SizedControl.remove_viewport(self)
self.notifications.disconnect()
def baseline(self):
return -self.view.font().descender() + 2
def on_changed(self, notification):
self.emit('changed')
def on_end_editing(self, notification):
self.emit('focus-out')
def calc_size_request(self):
size = self.sizer_cell.cellSize()
return size.width, size.height
def set_text(self, text):
self.view.setStringValue_(text)
self.emit('changed')
def get_text(self):
return self.view.stringValue()
def set_width(self, chars):
self.sizer_cell.setStringValue_('X' * chars)
self.invalidate_size_request()
def set_activates_default(self, setting):
pass
def enable(self):
SizedControl.enable(self)
self.view.setEnabled_(True)
def disable(self):
SizedControl.disable(self)
self.view.setEnabled_(False)
class MiroTextField(NSTextField):
def textDidEndEditing_(self, notification):
wrappermap.wrapper(self).emit('activate')
return NSTextField.textDidEndEditing_(self, notification)
class TextEntry(BaseTextEntry):
def make_view(self):
return MiroTextField.alloc().init()
class NumberEntry(BaseTextEntry):
def make_view(self):
return MiroTextField.alloc().init()
def set_max_length(self, length):
# TODO
pass
def _filter_value(self):
"""Discard any non-numeric characters"""
digits = ''.join(x for x in self.view.stringValue() if x.isdigit())
self.view.setStringValue_(digits)
def on_changed(self, notification):
# overriding on_changed rather than connecting to it ensures that we
# filter the value before anything else connected to the signal sees it
self._filter_value()
BaseTextEntry.on_changed(self, notification)
def get_text(self):
# handles get_text between when text is entered and when on_changed
# filters it, in case that's possible
self._filter_value()
return BaseTextEntry.get_text(self)
class MiroSecureTextField(NSSecureTextField):
def textDidEndEditing_(self, notification):
wrappermap.wrapper(self).emit('activate')
return NSSecureTextField.textDidEndEditing_(self, notification)
class SecureTextEntry(BaseTextEntry):
def make_view(self):
return MiroSecureTextField.alloc().init()
class MultilineTextEntry(Widget):
def __init__(self, initial_text=None):
Widget.__init__(self)
if initial_text is None:
initial_text = ""
self.view = NSTextView.alloc().initWithFrame_(NSRect((0,0),(50,50)))
self.view.setMaxSize_((1.0e7, 1.0e7))
self.view.setHorizontallyResizable_(NO)
self.view.setVerticallyResizable_(YES)
self.notifications = NotificationForwarder.create(self.view)
self.create_signal('changed')
self.create_signal('focus-out')
if initial_text is not None:
self.set_text(initial_text)
self.set_size(widgetconst.SIZE_NORMAL)
def set_size(self, size):
if size == widgetconst.SIZE_NORMAL:
font = NSFont.systemFontOfSize_(NSFont.systemFontSize())
elif size == widgetconst.SIZE_SMALL:
self.view.cell().setControlSize_(NSSmallControlSize)
else:
raise ValueError("Unknown size: %s" % size)
self.view.setFont_(font)
def viewport_created(self):
Widget.viewport_created(self)
self.notifications.connect(self.on_changed, 'NSTextDidChangeNotification')
self.notifications.connect(self.on_end_editing,
'NSControlTextDidEndEditingNotification')
self.invalidate_size_request()
def remove_viewport(self):
Widget.remove_viewport(self)
self.notifications.disconnect()
def focus(self):
if self.view.window() is not None:
self.view.window().makeFirstResponder_(self.view)
def set_text(self, text):
self.view.setString_(text)
self.invalidate_size_request()
def get_text(self):
return self.view.string()
def on_changed(self, notification):
self.invalidate_size_request()
self.emit("changed")
def on_end_editing(self, notification):
self.emit("focus-out")
def calc_size_request(self):
layout_manager = self.view.layoutManager()
text_container = self.view.textContainer()
# The next line is there just to force cocoa to layout the text
layout_manager.glyphRangeForTextContainer_(text_container)
rect = layout_manager.usedRectForTextContainer_(text_container)
return rect.size.width, rect.size.height
def set_editable(self, editable):
if editable:
self.view.setEditable_(YES)
else:
self.view.setEditable_(NO)
class MiroButton(NSButton):
def initWithSignal_(self, signal):
self = super(MiroButton, self).init()
self.signal = signal
return self
def sendAction_to_(self, action, to):
# We override the Cocoa machinery here and just send it to our wrapper
# widget.
wrappermap.wrapper(self).emit(self.signal)
return YES
class Checkbox(SizedControl):
"""See https://develop.participatoryculture.org/index.php/WidgetAPI for a description of the API for this class."""
def __init__(self, text="", bold=False, color=None):
SizedControl.__init__(self)
self.create_signal('toggled')
self.view = MiroButton.alloc().initWithSignal_('toggled')
self.view.setButtonType_(NSSwitchButton)
self.bold = bold
self.title = text
self.font_size = NSFont.systemFontSize()
self.color = self.make_color(color)
self._set_title()
def set_size(self, size):
SizedControl.set_size(self, size)
self._set_title()
def _set_title(self):
if self.color is None:
self.view.setTitle_(self.title)
else:
attributes = {
NSForegroundColorAttributeName: self.color,
NSFontAttributeName: NSFont.systemFontOfSize_(self.font_size)
}
string = NSAttributedString.alloc().initWithString_attributes_(
self.title, attributes)
self.view.setAttributedTitle_(string)
def calc_size_request(self):
if self.manual_size_request:
width, height = self.manual_size_request
if width == -1:
width = 10000
if height == -1:
height = 10000
size = self.view.cell().cellSizeForBounds_(
NSRect((0, 0), (width, height)))
else:
size = self.view.cell().cellSize()
return (size.width, size.height)
def baseline(self):
return -self.view.font().descender() + 1
def get_checked(self):
return self.view.state() == NSOnState
def set_checked(self, value):
if value:
self.view.setState_(NSOnState)
else:
self.view.setState_(NSOffState)
def enable(self):
SizedControl.enable(self)
self.view.setEnabled_(True)
def disable(self):
SizedControl.disable(self)
self.view.setEnabled_(False)
def get_text_padding(self):
"""
Returns the amount of space the checkbox takes up before the label.
"""
# XXX FIXME
return 18
class Button(SizedControl):
"""See https://develop.participatoryculture.org/index.php/WidgetAPI for a description of the API for this class."""
def __init__(self, label, style='normal', width=0):
SizedControl.__init__(self)
self.color = None
self.title = label
self.create_signal('clicked')
self.view = MiroButton.alloc().initWithSignal_('clicked')
self.view.setButtonType_(NSMomentaryPushInButton)
self._set_title()
self.setup_style(style)
self.min_width = width
def set_text(self, label):
self.title = label
self._set_title()
def set_color(self, color):
self.color = self.make_color(color)
self._set_title()
def _set_title(self):
if self.color is None:
self.view.setTitle_(self.title)
else:
attributes = {
NSForegroundColorAttributeName: self.color,
NSFontAttributeName: self.view.font()
}
string = NSAttributedString.alloc().initWithString_attributes_(
self.title, attributes)
self.view.setAttributedTitle_(string)
def setup_style(self, style):
if style == 'normal':
self.view.setBezelStyle_(NSRoundedBezelStyle)
self.pad_height = 0
self.pad_width = 10
self.min_width = 112
elif style == 'smooth':
self.view.setBezelStyle_(NSRoundRectBezelStyle)
self.pad_width = 0
self.pad_height = 4
self.paragraph_style = NSMutableParagraphStyle.alloc().init()
self.paragraph_style.setAlignment_(NSCenterTextAlignment)
def make_default(self):
self.view.setKeyEquivalent_("\r")
def calc_size_request(self):
size = self.view.cell().cellSize()
width = max(self.min_width, size.width + self.pad_width)
height = size.height + self.pad_height
return width, height
def baseline(self):
return -self.view.font().descender() + 10 + self.pad_height
def enable(self):
SizedControl.enable(self)
self.view.setEnabled_(True)
def disable(self):
SizedControl.disable(self)
self.view.setEnabled_(False)
class MiroPopupButton(NSPopUpButton):
def init(self):
self = super(MiroPopupButton, self).init()
self.setTarget_(self)
self.setAction_('handleChange:')
return self
def handleChange_(self, sender):
wrappermap.wrapper(self).emit('changed', self.indexOfSelectedItem())
class OptionMenu(SizedControl):
def __init__(self, options):
SizedControl.__init__(self)
self.create_signal('changed')
self.view = MiroPopupButton.alloc().init()
self.options = options
for option, value in options:
self.view.addItemWithTitle_(option)
def baseline(self):
if self.view.cell().controlSize() == NSRegularControlSize:
return -self.view.font().descender() + 6
else:
return -self.view.font().descender() + 5
def calc_size_request(self):
return self.view.cell().cellSize()
def set_selected(self, index):
self.view.selectItemAtIndex_(index)
def get_selected(self):
return self.view.indexOfSelectedItem()
def enable(self):
SizedControl.enable(self)
self.view.setEnabled_(True)
def disable(self):
SizedControl.disable(self)
self.view.setEnabled_(False)
def set_width(self, width):
# TODO
pass
class RadioButtonGroup:
def __init__(self):
self._buttons = []
def handle_click(self, widget):
self.set_selected(widget)
def add_button(self, button):
self._buttons.append(button)
button.connect('clicked', self.handle_click)
if len(self._buttons) == 1:
button.view.setState_(NSOnState)
else:
button.view.setState_(NSOffState)
def get_buttons(self):
return self._buttons
def get_selected(self):
for mem in self._buttons:
if mem.get_selected():
return mem
def set_selected(self, button):
for mem in self._buttons:
if button is mem:
mem.view.setState_(NSOnState)
else:
mem.view.setState_(NSOffState)
class RadioButton(SizedControl):
def __init__(self, label, group=None, bold=False, color=None):
SizedControl.__init__(self)
self.create_signal('clicked')
self.view = MiroButton.alloc().initWithSignal_('clicked')
self.view.setButtonType_(NSRadioButton)
self.color = self.make_color(color)
self.title = label
self.bold = bold
self.font_size = NSFont.systemFontSize()
self._set_title()
if group is not None:
self.group = group
else:
self.group = RadioButtonGroup()
self.group.add_button(self)
def set_size(self, size):
SizedControl.set_size(self, size)
self._set_title()
def _set_title(self):
if self.color is None:
self.view.setTitle_(self.title)
else:
attributes = {
NSForegroundColorAttributeName: self.color,
NSFontAttributeName: NSFont.systemFontOfSize_(self.font_size)
}
string = NSAttributedString.alloc().initWithString_attributes_(
self.title, attributes)
self.view.setAttributedTitle_(string)
def calc_size_request(self):
size = self.view.cell().cellSize()
return (size.width, size.height)
def baseline(self):
-self.view.font().descender() + 2
def get_group(self):
return self.group
def get_selected(self):
return self.view.state() == NSOnState
def set_selected(self):
self.group.set_selected(self)
def enable(self):
SizedControl.enable(self)
self.view.setEnabled_(True)
def disable(self):
SizedControl.disable(self)
self.view.setEnabled_(False)
| gpl-3.0 | -2,966,563,869,931,945,000 | 32.637736 | 119 | 0.632488 | false |
coldmind/django | django/core/mail/backends/filebased.py | 558 | 2771 | """Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import \
EmailBackend as ConsoleEmailBackend
from django.utils import six
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, **kwargs):
self._fname = None
if 'file_path' in kwargs:
self.file_path = kwargs.pop('file_path')
else:
self.file_path = getattr(settings, 'EMAIL_FILE_PATH', None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, six.string_types):
raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path)
self.file_path = os.path.abspath(self.file_path)
# Make sure that self.file_path is an directory if it exists.
if os.path.exists(self.file_path) and not os.path.isdir(self.file_path):
raise ImproperlyConfigured(
'Path for saving email messages exists, but is not a directory: %s' % self.file_path
)
# Try to create it, if it not exists.
elif not os.path.exists(self.file_path):
try:
os.makedirs(self.file_path)
except OSError as err:
raise ImproperlyConfigured(
'Could not create directory for saving email messages: %s (%s)' % (self.file_path, err)
)
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs['stream'] = None
super(EmailBackend, self).__init__(*args, **kwargs)
def write_message(self, message):
self.stream.write(message.message().as_bytes() + b'\n')
self.stream.write(b'-' * 79)
self.stream.write(b'\n')
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), 'ab')
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
| bsd-3-clause | 2,053,251,104,423,867,400 | 38.585714 | 107 | 0.59834 | false |
GladeRom/android_external_chromium_org | tools/json_schema_compiler/features_h_generator.py | 94 | 2686 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
from code import Code
import cpp_util
class HGenerator(object):
def Generate(self, features, source_file, namespace):
return _Generator(features, source_file, namespace).Generate()
class _Generator(object):
"""A .cc generator for features.
"""
def __init__(self, features, source_file, namespace):
self._feature_defs = features
self._source_file = source_file
self._source_file_filename, _ = os.path.splitext(source_file)
self._class_name = cpp_util.ClassName(self._source_file_filename)
self._namespace = namespace
def Generate(self):
"""Generates a Code object for features.
"""
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FEATURE_MESSAGE % self._source_file)
.Append()
)
# Hack: for the purpose of gyp the header file will always be the source
# file with its file extension replaced by '.h'. Assume so.
output_file = os.path.splitext(self._namespace.source_file)[0] + '.h'
ifndef_name = cpp_util.GenerateIfndefName(output_file)
(c.Append('#ifndef %s' % ifndef_name)
.Append('#define %s' % ifndef_name)
.Append()
)
(c.Append('#include <map>')
.Append('#include <string>')
.Append()
.Concat(cpp_util.OpenNamespace(self._namespace))
.Append()
)
(c.Append('class %s {' % self._class_name)
.Append(' public:')
.Sblock()
.Concat(self._GeneratePublicBody())
.Eblock()
.Append(' private:')
.Sblock()
.Concat(self._GeneratePrivateBody())
.Eblock('};')
.Append()
.Cblock(cpp_util.CloseNamespace(self._namespace))
)
(c.Append('#endif // %s' % ifndef_name)
.Append()
)
return c
def _GeneratePublicBody(self):
c = Code()
(c.Append('%s();' % self._class_name)
.Append()
.Append('enum ID {')
.Concat(self._GenerateEnumConstants())
.Eblock('};')
.Append()
.Append('const char* ToString(ID id) const;')
.Append('ID FromString(const std::string& id) const;')
.Append()
)
return c
def _GeneratePrivateBody(self):
return Code().Append('std::map<std::string, '
'%s::ID> features_;' % self._class_name)
def _GenerateEnumConstants(self):
c = Code()
(c.Sblock()
.Append('kUnknown,')
)
for feature in self._feature_defs:
c.Append('%s,' % cpp_util.ConstantName(feature.name))
c.Append('kEnumBoundary')
return c
| bsd-3-clause | -2,804,768,821,261,686,000 | 26.131313 | 76 | 0.610573 | false |
pettarin/aeneas | aeneas/audiofilemfcc.py | 5 | 23930 | #!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the following classes:
* :class:`~aeneas.audiofilemfcc.AudioFileMFCC`,
representing a mono WAVE audio file as a matrix of
Mel-frequency ceptral coefficients (MFCC).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from aeneas.audiofile import AudioFile
from aeneas.exacttiming import TimeInterval
from aeneas.exacttiming import TimeValue
from aeneas.logger import Loggable
from aeneas.mfcc import MFCC
from aeneas.runtimeconfiguration import RuntimeConfiguration
from aeneas.vad import VAD
import aeneas.globalfunctions as gf
class AudioFileMFCC(Loggable):
"""
A monoaural (single channel) WAVE audio file,
represented as a NumPy 2D matrix of
Mel-frequency ceptral coefficients (MFCC).
The matrix is "fat", that is,
its number of rows is equal to the number of MFCC coefficients
and its number of columns is equal to the number of window shifts
in the audio file.
The number of MFCC coefficients and the MFCC window shift can
be modified via the
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.MFCC_SIZE`
and
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.MFCC_WINDOW_SHIFT`
keys in the ``rconf`` object.
If ``mfcc_matrix`` is not ``None``,
it will be used as the MFCC matrix.
If ``file_path`` or ``audio_file`` is not ``None``,
the MFCCs will be computed upon creation of the object,
possibly converting to PCM16 Mono WAVE and/or
loading audio data in memory.
The MFCCs for the entire wave
are divided into three
contiguous intervals (possibly, zero-length)::
HEAD = [:middle_begin[
MIDDLE = [middle_begin:middle_end[
TAIL = [middle_end:[
The usual NumPy convention of including the left/start index
and excluding the right/end index is adopted.
For alignment purposes, only the ``MIDDLE`` portion of the wave
is taken into account; the ``HEAD`` and ``TAIL`` intervals are ignored.
This class heavily uses NumPy views and in-place operations
to avoid creating temporary data or copying data around.
:param string file_path: the path of the PCM16 mono WAVE file, or ``None``
:param tuple file_format: the format of the audio file, if known in advance: ``(codec, channels, rate)`` or ``None``
:param mfcc_matrix: the MFCC matrix to be set, or ``None``
:type mfcc_matrix: :class:`numpy.ndarray`
:param audio_file: an audio file, or ``None``
:type audio_file: :class:`~aeneas.audiofile.AudioFile`
:param rconf: a runtime configuration
:type rconf: :class:`~aeneas.runtimeconfiguration.RuntimeConfiguration`
:param logger: the logger object
:type logger: :class:`~aeneas.logger.Logger`
:raises: ValueError: if ``file_path``, ``audio_file``, and ``mfcc_matrix`` are all ``None``
.. versionadded:: 1.5.0
"""
TAG = u"AudioFileMFCC"
def __init__(
self,
file_path=None,
file_format=None,
mfcc_matrix=None,
audio_file=None,
rconf=None,
logger=None
):
if (file_path is None) and (audio_file is None) and (mfcc_matrix is None):
raise ValueError(u"You must initialize with at least one of: file_path, audio_file, or mfcc_matrix")
super(AudioFileMFCC, self).__init__(rconf=rconf, logger=logger)
self.file_path = file_path
self.audio_file = audio_file
self.is_reversed = False
self.__mfcc = None
self.__mfcc_mask = None
self.__mfcc_mask_map = None
self.__speech_intervals = None
self.__nonspeech_intervals = None
self.log(u"Initializing MFCCs...")
if mfcc_matrix is not None:
self.__mfcc = mfcc_matrix
self.audio_length = self.all_length * self.rconf.mws
elif (self.file_path is not None) or (self.audio_file is not None):
audio_file_was_none = False
if self.audio_file is None:
audio_file_was_none = True
self.audio_file = AudioFile(
file_path=self.file_path,
file_format=file_format,
rconf=self.rconf,
logger=self.logger
)
# NOTE load audio samples into memory, if not present already
self.audio_file.audio_samples
gf.run_c_extension_with_fallback(
self.log,
"cmfcc",
self._compute_mfcc_c_extension,
self._compute_mfcc_pure_python,
(),
rconf=self.rconf
)
self.audio_length = self.audio_file.audio_length
if audio_file_was_none:
self.log(u"Clearing the audio data...")
self.audio_file.clear_data()
self.audio_file = None
self.log(u"Clearing the audio data... done")
self.__middle_begin = 0
self.__middle_end = self.__mfcc.shape[1]
self.log(u"Initializing MFCCs... done")
def __unicode__(self):
msg = [
u"File path: %s" % self.file_path,
u"Audio length (s): %s" % gf.safe_float(self.audio_length),
]
return u"\n".join(msg)
def __str__(self):
return gf.safe_str(self.__unicode__())
@property
def all_mfcc(self):
"""
The MFCCs of the entire audio file,
that is, HEAD + MIDDLE + TAIL.
:rtype: :class:`numpy.ndarray` (2D)
"""
return self.__mfcc
@property
def all_length(self):
"""
The length, in MFCC coefficients,
of the entire audio file,
that is, HEAD + MIDDLE + TAIL.
:rtype: int
"""
return self.__mfcc.shape[1]
@property
def middle_mfcc(self):
"""
The MFCCs of the middle part of the audio file,
that is, without HEAD and TAIL.
:rtype: :class:`numpy.ndarray` (2D)
"""
return self.__mfcc[:, self.__middle_begin:self.__middle_end]
@property
def middle_length(self):
"""
The length, in MFCC coefficients,
of the middle part of the audio file,
that is, without HEAD and TAIL.
:rtype: int
"""
return self.__middle_end - self.__middle_begin
@property
def middle_map(self):
"""
Return the map
from the MFCC frame indices
in the MIDDLE portion of the wave
to the MFCC FULL frame indices,
that is, an ``numpy.arange(self.middle_begin, self.middle_end)``.
NOTE: to translate indices of MIDDLE,
instead of using fancy indexing with the
result of this function, you might want to simply
add ``self.head_length``.
This function is provided mostly for consistency
with the MASKED case.
:rtype: :class:`numpy.ndarray` (1D)
"""
return numpy.arange(self.__middle_begin, self.__middle_end)
@property
def head_length(self):
"""
The length, in MFCC coefficients,
of the HEAD of the audio file.
:rtype: int
"""
return self.__middle_begin
@property
def tail_length(self):
"""
The length, in MFCC coefficients,
of the TAIL of the audio file.
:rtype: int
"""
return self.all_length - self.__middle_end
@property
def tail_begin(self):
"""
The index, in MFCC coefficients,
where the TAIL of the audio file starts.
:rtype: int
"""
return self.__middle_end
@property
def audio_length(self):
"""
The length, in seconds, of the audio file.
This value is the actual length of the audio file,
computed as ``number of samples / sample_rate``,
hence it might differ than ``len(self.__mfcc) * mfcc_window_shift``.
:rtype: :class:`~aeneas.exacttiming.TimeValue`
"""
return self.__audio_length
@audio_length.setter
def audio_length(self, audio_length):
self.__audio_length = audio_length
@property
def is_reversed(self):
"""
Return ``True`` if currently reversed.
:rtype: bool
"""
return self.__is_reversed
@is_reversed.setter
def is_reversed(self, is_reversed):
self.__is_reversed = is_reversed
@property
def masked_mfcc(self):
"""
Return the MFCC speech frames
in the FULL wave.
:rtype: :class:`numpy.ndarray` (2D)
"""
self._ensure_mfcc_mask()
return self.__mfcc[:, self.__mfcc_mask]
@property
def masked_length(self):
"""
Return the number of MFCC speech frames
in the FULL wave.
:rtype: int
"""
self._ensure_mfcc_mask()
return len(self.__mfcc_mask_map)
@property
def masked_map(self):
"""
Return the map
from the MFCC speech frame indices
to the MFCC FULL frame indices.
:rtype: :class:`numpy.ndarray` (1D)
"""
self._ensure_mfcc_mask()
return self.__mfcc_mask_map
@property
def masked_middle_mfcc(self):
"""
Return the MFCC speech frames
in the MIDDLE portion of the wave.
:rtype: :class:`numpy.ndarray` (2D)
"""
begin, end = self._masked_middle_begin_end()
return (self.masked_mfcc)[:, begin:end]
@property
def masked_middle_length(self):
"""
Return the number of MFCC speech frames
in the MIDDLE portion of the wave.
:rtype: int
"""
begin, end = self._masked_middle_begin_end()
return end - begin
@property
def masked_middle_map(self):
"""
Return the map
from the MFCC speech frame indices
in the MIDDLE portion of the wave
to the MFCC FULL frame indices.
:rtype: :class:`numpy.ndarray` (1D)
"""
begin, end = self._masked_middle_begin_end()
return self.__mfcc_mask_map[begin:end]
def _masked_middle_begin_end(self):
"""
Return the begin and end indices w.r.t. ``self.__mfcc_mask_map``,
corresponding to indices in the MIDDLE portion of the wave,
that is, which fall between ``self.__middle_begin`` and
``self.__middle_end`` in ``self.__mfcc``.
:rtype: (int, int)
"""
self._ensure_mfcc_mask()
begin = numpy.searchsorted(self.__mfcc_mask_map, self.__middle_begin, side="left")
end = numpy.searchsorted(self.__mfcc_mask_map, self.__middle_end, side="right")
return (begin, end)
def intervals(self, speech=True, time=True):
"""
Return a list of intervals::
[(b_1, e_1), (b_2, e_2), ..., (b_k, e_k)]
where ``b_i`` is the time when the ``i``-th interval begins,
and ``e_i`` is the time when it ends.
:param bool speech: if ``True``, return speech intervals,
otherwise return nonspeech intervals
:param bool time: if ``True``, return :class:`~aeneas.exacttiming.TimeInterval` objects,
otherwise return indices (int)
:rtype: list of pairs (see above)
"""
self._ensure_mfcc_mask()
if speech:
self.log(u"Converting speech runs to intervals...")
intervals = self.__speech_intervals
else:
self.log(u"Converting nonspeech runs to intervals...")
intervals = self.__nonspeech_intervals
if time:
mws = self.rconf.mws
intervals = [TimeInterval(
begin=(b * mws),
end=((e + 1) * mws)
) for b, e in intervals]
self.log(u"Converting... done")
return intervals
def inside_nonspeech(self, index):
"""
If ``index`` is contained in a nonspeech interval,
return a pair ``(interval_begin, interval_end)``
such that ``interval_begin <= index < interval_end``,
i.e., ``interval_end`` is assumed not to be included.
Otherwise, return ``None``.
:rtype: ``None`` or tuple
"""
self._ensure_mfcc_mask()
if (index < 0) or (index >= self.all_length) or (self.__mfcc_mask[index]):
return None
return self._binary_search_intervals(self.__nonspeech_intervals, index)
@classmethod
def _binary_search_intervals(cls, intervals, index):
"""
Binary search for the interval containing index,
assuming there is such an interval.
This function should never return ``None``.
"""
start = 0
end = len(intervals) - 1
while start <= end:
middle_index = start + ((end - start) // 2)
middle = intervals[middle_index]
if (middle[0] <= index) and (index < middle[1]):
return middle
elif middle[0] > index:
end = middle_index - 1
else:
start = middle_index + 1
return None
@property
def middle_begin(self):
"""
Return the index where MIDDLE starts.
:rtype: int
"""
return self.__middle_begin
@middle_begin.setter
def middle_begin(self, index):
"""
Set the index where MIDDLE starts.
:param int index: the new index for MIDDLE begin
"""
if (index < 0) or (index > self.all_length):
raise ValueError(u"The given index is not valid")
self.__middle_begin = index
@property
def middle_begin_seconds(self):
"""
Return the time instant, in seconds, where MIDDLE starts.
:rtype: :class:`~aeneas.exacttiming.TimeValue`
"""
return TimeValue(self.__middle_begin) * self.rconf.mws
@property
def middle_end(self):
"""
Return the index (+1) where MIDDLE ends.
:rtype: int
"""
return self.__middle_end
@middle_end.setter
def middle_end(self, index):
"""
Set the index (+1) where MIDDLE ends.
:param int index: the new index for MIDDLE end
"""
if (index < 0) or (index > self.all_length):
raise ValueError(u"The given index is not valid")
self.__middle_end = index
@property
def middle_end_seconds(self):
"""
Return the time instant, in seconds, where MIDDLE ends.
:rtype: :class:`~aeneas.exacttiming.TimeValue`
"""
return TimeValue(self.__middle_end) * self.rconf.mws
def _ensure_mfcc_mask(self):
"""
Ensure that ``run_vad()`` has already been called,
and hence ``self.__mfcc_mask`` has a meaningful value.
"""
if self.__mfcc_mask is None:
self.log(u"VAD was not run: running it now")
self.run_vad()
def _compute_mfcc_c_extension(self):
"""
Compute MFCCs using the Python C extension cmfcc.
"""
self.log(u"Computing MFCCs using C extension...")
try:
self.log(u"Importing cmfcc...")
import aeneas.cmfcc.cmfcc
self.log(u"Importing cmfcc... done")
self.__mfcc = (aeneas.cmfcc.cmfcc.compute_from_data(
self.audio_file.audio_samples,
self.audio_file.audio_sample_rate,
self.rconf[RuntimeConfiguration.MFCC_FILTERS],
self.rconf[RuntimeConfiguration.MFCC_SIZE],
self.rconf[RuntimeConfiguration.MFCC_FFT_ORDER],
self.rconf[RuntimeConfiguration.MFCC_LOWER_FREQUENCY],
self.rconf[RuntimeConfiguration.MFCC_UPPER_FREQUENCY],
self.rconf[RuntimeConfiguration.MFCC_EMPHASIS_FACTOR],
self.rconf[RuntimeConfiguration.MFCC_WINDOW_LENGTH],
self.rconf[RuntimeConfiguration.MFCC_WINDOW_SHIFT]
)[0]).transpose()
self.log(u"Computing MFCCs using C extension... done")
return (True, None)
except Exception as exc:
self.log_exc(u"An unexpected error occurred while running cmfcc", exc, False, None)
return (False, None)
def _compute_mfcc_pure_python(self):
"""
Compute MFCCs using the pure Python code.
"""
self.log(u"Computing MFCCs using pure Python code...")
try:
self.__mfcc = MFCC(
rconf=self.rconf,
logger=self.logger
).compute_from_data(
self.audio_file.audio_samples,
self.audio_file.audio_sample_rate
).transpose()
self.log(u"Computing MFCCs using pure Python code... done")
return (True, None)
except Exception as exc:
self.log_exc(u"An unexpected error occurred while running pure Python code", exc, False, None)
return (False, None)
def reverse(self):
"""
Reverse the audio file.
The reversing is done efficiently using NumPy views inplace
instead of swapping values.
Only speech and nonspeech intervals are actually recomputed
as Python lists.
"""
self.log(u"Reversing...")
all_length = self.all_length
self.__mfcc = self.__mfcc[:, ::-1]
tmp = self.__middle_end
self.__middle_end = all_length - self.__middle_begin
self.__middle_begin = all_length - tmp
if self.__mfcc_mask is not None:
self.__mfcc_mask = self.__mfcc_mask[::-1]
# equivalent to
# self.__mfcc_mask_map = ((all_length - 1) - self.__mfcc_mask_map)[::-1]
# but done in place using NumPy view
self.__mfcc_mask_map *= -1
self.__mfcc_mask_map += all_length - 1
self.__mfcc_mask_map = self.__mfcc_mask_map[::-1]
self.__speech_intervals = [(all_length - i[1], all_length - i[0]) for i in self.__speech_intervals[::-1]]
self.__nonspeech_intervals = [(all_length - i[1], all_length - i[0]) for i in self.__nonspeech_intervals[::-1]]
self.is_reversed = not self.is_reversed
self.log(u"Reversing...done")
def run_vad(
self,
log_energy_threshold=None,
min_nonspeech_length=None,
extend_before=None,
extend_after=None
):
"""
Determine which frames contain speech and nonspeech,
and store the resulting boolean mask internally.
The four parameters might be ``None``:
in this case, the corresponding RuntimeConfiguration values
are applied.
:param float log_energy_threshold: the minimum log energy threshold to consider a frame as speech
:param int min_nonspeech_length: the minimum length, in frames, of a nonspeech interval
:param int extend_before: extend each speech interval by this number of frames to the left (before)
:param int extend_after: extend each speech interval by this number of frames to the right (after)
"""
def _compute_runs(array):
"""
Compute runs as a list of arrays,
each containing the indices of a contiguous run.
:param array: the data array
:type array: :class:`numpy.ndarray` (1D)
:rtype: list of :class:`numpy.ndarray` (1D)
"""
if len(array) < 1:
return []
return numpy.split(array, numpy.where(numpy.diff(array) != 1)[0] + 1)
self.log(u"Creating VAD object")
vad = VAD(rconf=self.rconf, logger=self.logger)
self.log(u"Running VAD...")
self.__mfcc_mask = vad.run_vad(
wave_energy=self.__mfcc[0],
log_energy_threshold=log_energy_threshold,
min_nonspeech_length=min_nonspeech_length,
extend_before=extend_before,
extend_after=extend_after
)
self.__mfcc_mask_map = (numpy.where(self.__mfcc_mask))[0]
self.log(u"Running VAD... done")
self.log(u"Storing speech and nonspeech intervals...")
# where( == True) already computed, reusing
# COMMENTED runs = _compute_runs((numpy.where(self.__mfcc_mask))[0])
runs = _compute_runs(self.__mfcc_mask_map)
self.__speech_intervals = [(r[0], r[-1]) for r in runs]
# where( == False) not already computed, computing now
runs = _compute_runs((numpy.where(~self.__mfcc_mask))[0])
self.__nonspeech_intervals = [(r[0], r[-1]) for r in runs]
self.log(u"Storing speech and nonspeech intervals... done")
def set_head_middle_tail(self, head_length=None, middle_length=None, tail_length=None):
"""
Set the HEAD, MIDDLE, TAIL explicitly.
If a parameter is ``None``, it will be ignored.
If both ``middle_length`` and ``tail_length`` are specified,
only ``middle_length`` will be applied.
:param head_length: the length of HEAD, in seconds
:type head_length: :class:`~aeneas.exacttiming.TimeValue`
:param middle_length: the length of MIDDLE, in seconds
:type middle_length: :class:`~aeneas.exacttiming.TimeValue`
:param tail_length: the length of TAIL, in seconds
:type tail_length: :class:`~aeneas.exacttiming.TimeValue`
:raises: TypeError: if one of the arguments is not ``None``
or :class:`~aeneas.exacttiming.TimeValue`
:raises: ValueError: if one of the arguments is greater
than the length of the audio file
"""
for variable, name in [
(head_length, "head_length"),
(middle_length, "middle_length"),
(tail_length, "tail_length")
]:
if (variable is not None) and (not isinstance(variable, TimeValue)):
raise TypeError(u"%s is not None or TimeValue" % name)
if (variable is not None) and (variable > self.audio_length):
raise ValueError(u"%s is greater than the length of the audio file" % name)
self.log(u"Setting head middle tail...")
mws = self.rconf.mws
self.log([u"Before: 0 %d %d %d", self.middle_begin, self.middle_end, self.all_length])
if head_length is not None:
self.middle_begin = int(head_length / mws)
if middle_length is not None:
self.middle_end = self.middle_begin + int(middle_length / mws)
elif tail_length is not None:
self.middle_end = self.all_length - int(tail_length / mws)
self.log([u"After: 0 %d %d %d", self.middle_begin, self.middle_end, self.all_length])
self.log(u"Setting head middle tail... done")
| agpl-3.0 | 3,151,908,285,531,111,400 | 34.399408 | 123 | 0.586335 | false |
michael-ball/sublime-text | sublime-text-3/Packages/Python PEP8 Autoformat/libs/py33/lib2to3/fixes/fix_print.py | 164 | 2854 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for print.
Change:
'print' into 'print()'
'print ...' into 'print(...)'
'print ... ,' into 'print(..., end=" ")'
'print >>x, ...' into 'print(..., file=x)'
No changes are applied if print_function is imported from __future__
"""
# Local imports
from .. import patcomp
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Comma, String, is_tuple
parend_expr = patcomp.compile_pattern(
"""atom< '(' [atom|STRING|NAME] ')' >"""
)
class FixPrint(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
simple_stmt< any* bare='print' any* > | print_stmt
"""
def transform(self, node, results):
assert results
bare_print = results.get("bare")
if bare_print:
# Special-case print all by itself
bare_print.replace(Call(Name("print"), [],
prefix=bare_print.prefix))
return
assert node.children[0] == Name("print")
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
# already-parenthesised expression.
return
sep = end = file = None
if args and args[-1] == Comma():
args = args[:-1]
end = " "
if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
l_args[0].prefix = ""
if sep is not None or end is not None or file is not None:
if sep is not None:
self.add_kwarg(l_args, "sep", String(repr(sep)))
if end is not None:
self.add_kwarg(l_args, "end", String(repr(end)))
if file is not None:
self.add_kwarg(l_args, "file", file)
n_stmt = Call(Name("print"), l_args)
n_stmt.prefix = node.prefix
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
n_expr.prefix = ""
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
pytree.Leaf(token.EQUAL, "="),
n_expr))
if l_nodes:
l_nodes.append(Comma())
n_argument.prefix = " "
l_nodes.append(n_argument)
| unlicense | -674,602,593,625,917,000 | 31.804598 | 78 | 0.519972 | false |
x303597316/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/geos/tests/test_geos.py | 89 | 45668 | from __future__ import unicode_literals
import ctypes
import json
import random
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis import memoryview
from django.contrib.gis.geometry.test_data import TestDataMixin
from django.utils.encoding import force_bytes
from django.utils import six
from django.utils.six.moves import xrange
from django.utils import unittest
from django.utils.unittest import skipUnless
from .. import HAS_GEOS
if HAS_GEOS:
from .. import (GEOSException, GEOSIndexError, GEOSGeometry,
GeometryCollection, Point, MultiPoint, Polygon, MultiPolygon, LinearRing,
LineString, MultiLineString, fromfile, fromstr, geos_version_info,
GEOS_PREPARE)
from ..base import gdal, numpy, GEOSBase
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSTest(unittest.TestCase, TestDataMixin):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferrable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p(b'foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz and geos_version_info()['version'] >= '3.3.0':
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none. Also, GEOS has a
# a bug in versions prior to 3.1 that puts the X coordinate in
# place of Z; an exception should be raised on those versions.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
if GEOS_PREPARE:
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
else:
try:
hexewkb = pnt_3d.hexewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException.')
# Same for EWKB.
self.assertEqual(memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
if GEOS_PREPARE:
self.assertEqual(memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
else:
try:
ewkb = pnt_3d.ewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException')
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
_ = fromstr(err.wkt)
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, memoryview(b'0'))
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertTrue(p1 > p2)
self.assertFalse(p1 < p2)
self.assertFalse(p2 > p1)
self.assertTrue(p2 < p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertFalse(p4 < p3)
self.assertTrue(p3 < p4)
self.assertTrue(p4 > p3)
self.assertFalse(p3 > p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
prev = fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_custom_srid(self):
""" Test with a srid unknown from GDAL """
pnt = Point(111200, 220900, srid=999999)
self.assertTrue(pnt.ewkt.startswith("SRID=999999;POINT (111200.0"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference('+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 +datum=WGS84 +units=m +no_defs'),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 3)
self.assertAlmostEqual(new_pnt.y, 2, 3)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend([mls.wkt for mls in self.geometries.multilinestrings])
coll.extend([p.wkt for p in self.geometries.polygons])
coll.extend([mp.wkt for mp in self.geometries.multipoints])
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
if GEOS_PREPARE:
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
if GEOS_PREPARE:
self.assertEqual(p3d.z, 100)
else:
self.assertIsNone(p3d.z)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
from django.utils.six.moves import cPickle
import pickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
@skipUnless(HAS_GEOS and GEOS_PREPARE, "geos >= 3.1.0 is required")
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
@skipUnless(HAS_GEOS and GEOS_PREPARE, "geos >= 3.1.0 is required")
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
@skipUnless(HAS_GEOS and geos_version_info()['version'] >= '3.2.0', "geos >= 3.2.0 is required")
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0/3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0/3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_geos_version(self):
"""Testing the GEOS version regular expression."""
from django.contrib.gis.geos.libgeos import version_regex
versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'),
('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'),
('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'),
('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0')]
for v_init, v_geos, v_capi in versions:
m = version_regex.match(v_init)
self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init)
self.assertEqual(m.group('version'), v_geos)
self.assertEqual(m.group('capi_version'), v_capi)
| apache-2.0 | -3,789,537,276,482,337,000 | 40.216606 | 129 | 0.579684 | false |
hynnet/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/test/test_errno.py | 91 | 1184 | #! /usr/bin/env python
"""Test the errno module
Roger E. Masse
"""
import errno
from test import test_support
import unittest
std_c_errors = frozenset(['EDOM', 'ERANGE'])
class ErrnoAttributeTests(unittest.TestCase):
def test_for_improper_attributes(self):
# No unexpected attributes should be on the module.
for error_code in std_c_errors:
self.assertTrue(hasattr(errno, error_code),
"errno is missing %s" % error_code)
def test_using_errorcode(self):
# Every key value in errno.errorcode should be on the module.
for value in errno.errorcode.itervalues():
self.assertTrue(hasattr(errno, value), 'no %s attr in errno' % value)
class ErrorcodeTests(unittest.TestCase):
def test_attributes_in_errorcode(self):
for attribute in errno.__dict__.iterkeys():
if attribute.isupper():
self.assertIn(getattr(errno, attribute), errno.errorcode,
'no %s attr in errno.errorcode' % attribute)
def test_main():
test_support.run_unittest(ErrnoAttributeTests, ErrorcodeTests)
if __name__ == '__main__':
test_main()
| gpl-2.0 | 9,007,566,669,092,644,000 | 28.6 | 81 | 0.637669 | false |
wxgeo/geophar | wxgeometrie/modules/tablatex/tests/test_tabsign.py | 1 | 12983 | # -*- coding: utf-8 -*-
from wxgeometrie.modules.tablatex.tests.tabtestlib import assert_tableau
from wxgeometrie.modules.tablatex.tabsign import tabsign
from pytest import XFAIL
def assert_tabsign(chaine, code_latex, **options):
assert_tableau(tabsign, chaine, code_latex, **options)
def test_mode_manuel():
s = "x: -oo;+oo// 2x+1: -- -1/2 ++// 3-x: ++ 3 --// f(x)"
tab = \
r"""\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-\infty$ & & $-\frac{1}{2}$ & & $3$ & & $+\infty$ \\
\hline
$2x+1$ & & $-$ & 0 & + & & + & \\
\hline
$3-x$ & & + & & + & 0 & $-$ & \\
\hline
$f(x)$ & & $-$ & 0 & + & 0 & $-$ & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// 2x+1: -- -1/2 ++// 3-x: ++ 3 --// f(x)
"""
assert_tabsign(s, tab)
def test_mode_auto():
s = 'g(x)=(x-7/2)(x+7/2)'
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-\infty$ & & $-\frac{7}{2}$ & & $\frac{7}{2}$ & & $+\infty$ \\
\hline
$x-\frac{7}{2}$ & & $-$ & & $-$ & 0 & + & \\
\hline
$x+\frac{7}{2}$ & & $-$ & 0 & + & & + & \\
\hline
$g(x)$ & & + & 0 & $-$ & 0 & + & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// x-7/2: -- 7/2 ++ // x+7/2: -- -7/2 ++ // g(x)
% g(x)=(x-7/2)(x+7/2)
'''
assert_tabsign(s, tab)
def test_polynomes():
s= 'f(x)=x^3-30x^2+112'
tab = \
r"""\begin{center}
\begin{tabular}{|c|ccccccccc|}
\hline
$x$ & $-\infty$ & & $-6 \sqrt{7}+14$ & & $2$ & & $14+6 \sqrt{7}$ & & $+\infty$ \\
\hline
$f(x)$ & & $-$ & 0 & + & 0 & $-$ & 0 & + & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// x^3-30 x^2+112: -- -6*sqrt(7) + 14 ++ 2 -- 14 + 6*sqrt(7) ++ // f(x)
% f(x)=x^3-30x^2+112
"""
assert_tabsign(s, tab)
s = '- 6 x^{2} - 12 x + 4'
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-\infty$ & & $-\frac{\sqrt{15}}{3}-1$ & & $-1+\frac{\sqrt{15}}{3}$ & & $+\infty$ \\
\hline
$-6 x^{2}-12 x+4$ & & $-$ & 0 & + & 0 & $-$ & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// -6 x^(2)-12 x+4: -- -sqrt(15)/3 - 1 ++ -1 + sqrt(15)/3 -- // - 6 x^{2} - 12 x + 4
% - 6 x^{2} - 12 x + 4
'''
assert_tabsign(s, tab)
def test_quotients():
s = '(3x-2)/((x-1)^2)'
tab = \
r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$}
\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-\infty$ & & $\frac{2}{3}$ & & $1$ & & $+\infty$ \\
\hline
$3 x-2$ & & $-$ & 0 & + & & + & \\
\hline
$(x-1)^{2}$ & & + & & + & 0 & + & \\
\hline
$\frac{3x-2}{(x-1)^{2}}$ & & $-$ & 0 & + & \geopharDB{$\frac{3x-2}{(x-1)^{2}}$} & + & \\
\hline
\end{tabular}
\end{center}
% x: -oo;!1: !1;+oo// 3 x-2: -- 2/3 ++ // !(x-1)^2: ++ 1 ++ // (3x-2)/((x-1)^2)
% (3x-2)/((x-1)^2)
'''
assert_tabsign(s, tab)
s = '(3x-2)/(x-1)^2'
tab = \
r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$}
\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-\infty$ & & $\frac{2}{3}$ & & $1$ & & $+\infty$ \\
\hline
$3 x-2$ & & $-$ & 0 & + & & + & \\
\hline
$(x-1)^{2}$ & & + & & + & 0 & + & \\
\hline
$\frac{3x-2}{(x-1)^{2}}$ & & $-$ & 0 & + & \geopharDB{$\frac{3x-2}{(x-1)^{2}}$} & + & \\
\hline
\end{tabular}
\end{center}
% x: -oo;!1: !1;+oo// 3 x-2: -- 2/3 ++ // !(x-1)^2: ++ 1 ++ // (3x-2)/(x-1)^2
% (3x-2)/(x-1)^2
'''
assert_tabsign(s, tab)
def test_latex():
s = '\dfrac{3x-2}{(x-1)^2}'
tab = \
r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$}
\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-\infty$ & & $\frac{2}{3}$ & & $1$ & & $+\infty$ \\
\hline
$3 x-2$ & & $-$ & 0 & + & & + & \\
\hline
$(x-1)^{2}$ & & + & & + & 0 & + & \\
\hline
$\dfrac{3x-2}{(x-1)^{2}}$ & & $-$ & 0 & + & \geopharDB{$\dfrac{3x-2}{(x-1)^{2}}$} & + & \\
\hline
\end{tabular}
\end{center}
% x: -oo;!1: !1;+oo// 3 x-2: -- 2/3 ++ // !(x-1)^2: ++ 1 ++ // \dfrac{3x-2}{(x-1)^2}
% \dfrac{3x-2}{(x-1)^2}
'''
assert_tabsign(s, tab)
s = "g(x)=\dfrac{-x+1}{\e^{x}}"
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccc|}
\hline
$x$ & $-\infty$ & & $1$ & & $+\infty$ \\
\hline
$-x+1$ & & + & 0 & $-$ & \\
\hline
$\e^{x}$ & & + & & + & \\
\hline
$g(x)$ & & + & 0 & $-$ & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// -x+1: ++ 1 -- // e^(x): ++ // g(x)
% g(x)=\dfrac{-x+1}{\e^{x}}
'''
assert_tabsign(s, tab)
s= "f'(x)=1-\e^{-x+2}"
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccc|}
\hline
$x$ & $-\infty$ & & $2$ & & $+\infty$ \\
\hline
$f'(x)$ & & $-$ & 0 & + & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// 1-e^(-x+2): -- 2 ++ // f'(x)
% f'(x)=1-\e^{-x+2}
'''
assert_tabsign(s, tab)
def test_intervalle():
s = "x^2 sur [1;+oo["
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccc|}
\hline
$x$ & $1$ & & $+\infty$ \\
\hline
$x^{2}$ & & + & \\
\hline
\end{tabular}
\end{center}
% x: 1;+oo// x^2: ++ // x^2
% x^2 sur [1;+\infty[
'''
assert_tabsign(s, tab)
s = "u(x)=1-x sur ]0;+oo["
tab = \
r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$}
\begin{center}
\begin{tabular}{|c|ccccc|}
\hline
$x$ & $0$ & & $1$ & & $+\infty$ \\
\hline
$u(x)$ & \geopharDB{$u(x)$} & + & 0 & $-$ & \\
\hline
\end{tabular}
\end{center}
% x: !0;+oo// 1-x: ++ 1 -- // u(x)
% u(x)=1-x sur ]0;+\infty[
'''
assert_tabsign(s, tab)
s = "u(x)=x(1-x) sur ]-1;0[U]0;4["
tab = \
r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$}
\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-1$ & & $0$ & & $1$ & & $4$ \\
\hline
$x$ & & $-$ & 0 & + & & + & \\
\hline
$1-x$ & & + & & + & 0 & $-$ & \\
\hline
$u(x)$ & \geopharDB{$u(x)$} & $-$ & \geopharDB{$u(x)$} & + & 0 & $-$ & \geopharDB{$u(x)$} \\
\hline
\end{tabular}
\end{center}
% x: !-1;!0: !0;!4// !x: -- 0 ++ // 1-x: ++ 1 -- // u(x)
% u(x)=x(1-x) sur ]-1;0[U]0;4[
'''
assert_tabsign(s, tab)
s = "u(x)=(1+x)(1-x)/x sur ]-3;2[U]2;4]"
tab = \
r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$}
\begin{center}
\begin{tabular}{|c|ccccccccccc|}
\hline
$x$ & $-3$ & & $-1$ & & $0$ & & $1$ & & $2$ & & $4$ \\
\hline
$1+x$ & & $-$ & 0 & + & & + & & + & & + & \\
\hline
$1-x$ & & + & & + & & + & 0 & $-$ & & $-$ & \\
\hline
$x$ & & $-$ & & $-$ & 0 & + & & + & & + & \\
\hline
$u(x)$ & \geopharDB{$u(x)$} & + & 0 & $-$ & \geopharDB{$u(x)$} & + & 0 & $-$ & \geopharDB{$u(x)$} & $-$ & \\
\hline
\end{tabular}
\end{center}
% x: !-3;!0: !0;!2: !2;4// 1+x: -- -1 ++ // 1-x: ++ 1 -- // !x: -- 0 ++ // u(x)
% u(x)=(1+x)(1-x)/x sur ]-3;2[U]2;4]
'''
assert_tabsign(s, tab)
def test_issue_173():
s = "(1 - x)\e^{ 2x}"
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccc|}
\hline
$x$ & $-\infty$ & & $1$ & & $+\infty$ \\
\hline
$1-x$ & & + & 0 & $-$ & \\
\hline
$\e^{2 x}$ & & + & & + & \\
\hline
$(1-x)\e^{ 2x}$ & & + & 0 & $-$ & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// 1-x: ++ 1 -- // e^(2 x): ++ // (1 - x)\e^{ 2x}
% (1 - x)\e^{ 2x}
'''
assert_tabsign(s, tab)
def test_issue_200():
s = 'f(x)=x^2-3'
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-\infty$ & & $-\sqrt{3}$ & & $\sqrt{3}$ & & $+\infty$ \\
\hline
$f(x)$ & & + & 0 & $-$ & 0 & + & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// x^2-3: ++ -sqrt(3) -- sqrt(3) ++ // f(x)
% f(x)=x^2-3
'''
assert_tabsign(s, tab)
def test_issue_189():
# Tableaux de signes et de variation avec des décimaux
s = '2-0.25x'
options = {'cellspace': True}
tab = \
r'''\begin{center}
\begin{tabular}{|Sc|ScScScScSc|}
\hline
$x$ & $-\infty$ & & $8$ & & $+\infty$ \\
\hline
$2-0.25x$ & & + & 0 & $-$ & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// 2-0.25 x: ++ 8 -- // 2-0.25x
% 2-0.25x
'''
assert_tabsign(s, tab, **options)
def test_intervalle_virgule():
s = 'h(x)=x^2-x/2-3 sur [-2,5;3,5]'
options = {'cellspace': True}
tab = \
r'''\begin{center}
\begin{tabular}{|Sc|ScScScScScScSc|}
\hline
$x$ & $-2,5$ & & $-\frac{3}{2}$ & & $2$ & & $3,5$ \\
\hline
$h(x)$ & & + & 0 & $-$ & 0 & + & \\
\hline
\end{tabular}
\end{center}
% x: -2,5;3,5// x^2-x/2-3: ++ -3/2 -- 2 ++ // h(x)
% h(x)=x^2-x/2-3 sur [-2,5;3,5]
'''
assert_tabsign(s, tab, **options)
def test_constante():
s = 'f(x)=5'
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccc|}
\hline
$x$ & $-\infty$ & & $+\infty$ \\
\hline
$f(x)$ & & + & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// 5: ++ // f(x)
% f(x)=5
'''
assert_tabsign(s, tab)
def test_issue_247():
"FS#247 - Accepter la syntaxe suivant : 'f(x): -- -8 ++ -2 -- 5 ++'."
s = "f(x): -- -8 ++ -2 -- 5 ++"
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccccccc|}
\hline
$x$ & $-\infty$ & & $-8$ & & $-2$ & & $5$ & & $+\infty$ \\
\hline
$f(x)$ & & $-$ & 0 & + & 0 & $-$ & 0 & + & \\
\hline
\end{tabular}
\end{center}
% f(x): -- -8 ++ -2 -- 5 ++
'''
assert_tabsign(s, tab)
def test_mix_numeric_and_symbolic_values():
s = 'f(x): -- x_1 ++ 5 ++ x_2 -- 7 --'
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccccccccc|}
\hline
$x$ & $-\infty$ & & $x_1$ & & $5$ & & $x_2$ & & $7$ & & $+\infty$ \\
\hline
$f(x)$ & & $-$ & 0 & + & 0 & + & 0 & $-$ & 0 & $-$ & \\
\hline
\end{tabular}
\end{center}
% f(x): -- x_1 ++ 5 ++ x_2 -- 7 --
'''
assert_tabsign(s, tab)
s = r'x:-oo;+oo // f(x): -- 5 ++ // g(x): ++ \alpha=2,1 --'
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-\infty$ & & $\alpha$ & & $5$ & & $+\infty$ \\
\hline
$f(x)$ & & $-$ & & $-$ & 0 & + & \\
\hline
$g(x)$ & & + & 0 & $-$ & & $-$ & \\
\hline
$f(x)g(x)$ & & $-$ & 0 & + & 0 & $-$ & \\
\hline
\end{tabular}
\end{center}
% x:-oo;+oo // f(x): -- 5 ++ // g(x): ++ \alpha=2,1 --
'''
assert_tabsign(s, tab)
def test_approche():
s = "f(x)=x^2-3x-5"
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-\infty$ & & $-\frac{\sqrt{29}}{2}+\frac{3}{2}$ & & $\frac{3}{2}+\frac{\sqrt{29}}{2}$ & & $+\infty$ \\
\hline
$f(x)$ & & + & 0 & $-$ & 0 & + & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// x^2-3 x-5: ++ -sqrt(29)/2 + 3/2 -- 3/2 + sqrt(29)/2 ++ // f(x)
% f(x)=x^2-3x-5
'''
assert_tabsign(s, tab)
options = {'approche': True, "decimales": 2}
tab = \
r'''\begin{center}
\begin{tabular}{|c|ccccccc|}
\hline
$x$ & $-\infty$ & & $-1,19$ & & $4,19$ & & $+\infty$ \\
\hline
$f(x)$ & & + & 0 & $-$ & 0 & + & \\
\hline
\end{tabular}
\end{center}
% x: -oo;+oo// x^2-3 x-5: ++ -1,19 -- 4,19 ++ // f(x)
% f(x)=x^2-3x-5
'''
assert_tabsign(s, tab, **options)
| gpl-2.0 | 7,098,556,127,674,237,000 | 27.469298 | 127 | 0.346557 | false |
mihail911/nupic | examples/opf/experiments/anomaly/spatial/2field_many_novelAtEnd/description.py | 40 | 15840 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [claModelControlEnableSPLearningCb, claModelControlEnableTPLearningCb],
# 'setup' : [claModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| gpl-3.0 | -8,916,489,403,285,606,000 | 36.894737 | 92 | 0.623864 | false |
kevmccor/openemr | phpmyadmin/doc/_ext/configext.py | 141 | 6618 | from sphinx.domains import Domain, ObjType
from sphinx.roles import XRefRole
from sphinx.domains.std import GenericObject, StandardDomain
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import clean_astext, make_refnode
from sphinx.util import ws_re
from sphinx import addnodes
from sphinx.util.docfields import Field
from docutils import nodes
def get_id_from_cfg(text):
'''
Formats anchor ID from config option.
'''
if text[:6] == '$cfg[\'':
text = text[6:]
if text[-2:] == '\']':
text = text[:-2]
text = text.replace('[$i]', '')
parts = text.split("']['")
return parts
class ConfigOption(ObjectDescription):
indextemplate = 'configuration option; %s'
parse_node = None
has_arguments = True
doc_field_types = [
Field('default', label='Default value', has_arg=False,
names=('default', )),
Field('type', label='Type', has_arg=False,
names=('type',)),
]
def handle_signature(self, sig, signode):
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
targetparts = get_id_from_cfg(name)
targetname = 'cfg_%s' % '_'.join(targetparts)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
indextype = 'single'
# Generic index entries
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, targetname))
self.indexnode['entries'].append((indextype, name,
targetname, targetname))
# Server section
if targetparts[0] == 'Servers' and len(targetparts) > 1:
indexname = ', '.join(targetparts[1:])
self.indexnode['entries'].append((indextype, 'server configuration; %s' % indexname,
targetname, targetname))
self.indexnode['entries'].append((indextype, indexname,
targetname, targetname))
else:
indexname = ', '.join(targetparts)
self.indexnode['entries'].append((indextype, indexname,
targetname, targetname))
self.env.domaindata['config']['objects'][self.objtype, name] = \
self.env.docname, targetname
class ConfigSectionXRefRole(XRefRole):
"""
Cross-referencing role for configuration sections (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, varname),
('single', 'configuration section; %s' % varname, tgtid, varname)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class ConfigSection(ObjectDescription):
indextemplate = 'configuration section; %s'
parse_node = None
def handle_signature(self, sig, signode):
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub('', sig)
return name
def add_target_and_index(self, name, sig, signode):
targetname = '%s-%s' % (self.objtype, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
if self.indextemplate:
colon = self.indextemplate.find(':')
if colon != -1:
indextype = self.indextemplate[:colon].strip()
indexentry = self.indextemplate[colon+1:].strip() % (name,)
else:
indextype = 'single'
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, targetname))
self.env.domaindata['config']['objects'][self.objtype, name] = \
self.env.docname, targetname
class ConfigOptionXRefRole(XRefRole):
"""
Cross-referencing role for configuration options (adds an index entry).
"""
def result_nodes(self, document, env, node, is_ref):
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, varname),
('single', 'configuration option; %s' % varname, tgtid, varname)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class ConfigFileDomain(Domain):
name = 'config'
label = 'Config'
object_types = {
'option': ObjType('config option', 'option'),
'section': ObjType('config section', 'section'),
}
directives = {
'option': ConfigOption,
'section': ConfigSection,
}
roles = {
'option': ConfigOptionXRefRole(),
'section': ConfigSectionXRefRole(),
}
initial_data = {
'objects': {}, # (type, name) -> docname, labelid
}
def clear_doc(self, docname):
for key, (fn, _) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][key]
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
docname, labelid = self.data['objects'].get((typ, target), ('', ''))
if not docname:
return None
else:
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def get_objects(self):
for (type, name), info in self.data['objects'].items():
yield (name, name, type, info[0], info[1],
self.object_types[type].attrs['searchprio'])
def setup(app):
app.add_domain(ConfigFileDomain)
| gpl-3.0 | 8,570,576,961,580,174,000 | 34.202128 | 96 | 0.562406 | false |
xuleiboy1234/autoTitle | tensorflow/tensorflow/tools/ci_build/update_version.py | 4 | 12685 | #!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Automatically update TensorFlow version in source files
#
# Usage:
# ./tensorflow/tools/ci_build/update_version.py --version 1.4.0-rc0
# ./tensorflow/tools/ci_build/update_version.py --nightly
#
"""Update version of TensorFlow script."""
# pylint: disable=superfluous-parens
import argparse
import fileinput
import os
import re
import subprocess
import time
# File parameters
TF_SRC_DIR = "tensorflow"
VERSION_H = "%s/core/public/version.h" % TF_SRC_DIR
SETUP_PY = "%s/tools/pip_package/setup.py" % TF_SRC_DIR
README_MD = "./README.md"
DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel" % TF_SRC_DIR
GPU_DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel-gpu" % TF_SRC_DIR
RELEVANT_FILES = [TF_SRC_DIR,
VERSION_H,
SETUP_PY,
README_MD,
DEVEL_DOCKERFILE,
GPU_DEVEL_DOCKERFILE]
# Version type parameters
NIGHTLY_VERSION = 1
REGULAR_VERSION = 0
def replace_line(old_line, new_line, filename):
"""Replace a line in a file."""
for line in fileinput.input(filename, inplace=True):
print(line.rstrip().replace(old_line, new_line))
def check_existence(filename):
"""Check the existence of file or dir."""
if not os.path.exists(filename):
raise RuntimeError("%s not found. Are you under the TensorFlow source root"
" directory?")
def check_all_files():
"""Check all relevant files necessary for upgrade."""
for file_name in RELEVANT_FILES:
check_existence(file_name)
def replace_with_sed(query, filename):
"""Replace with sed when regex is required."""
subprocess.check_call("sed -i -r -e \"%s\" \"%s\"" % (query, filename),
shell=True)
class Version(object):
"""Version class object that stores SemVer version information."""
def __init__(self, major, minor, patch, identifier_string, version_type):
"""Constructor.
Args:
major: major string eg. (1)
minor: minor string eg. (3)
patch: patch string eg. (1)
identifier_string: extension string eg. (-rc0)
version_type: version parameter ((REGULAR|NIGHTLY)_VERSION)
"""
self.string = "%s.%s.%s%s" % (major,
minor,
patch,
identifier_string)
self.major = major
self.minor = minor
self.patch = patch
self.identifier_string = identifier_string
self.version_type = version_type
def __str__(self):
return self.string
@property
def pep_440_str(self):
if self.version_type == REGULAR_VERSION:
return_string = "%s.%s.%s%s" % (self.major,
self.minor,
self.patch,
self.identifier_string)
return return_string.replace("-", "")
else:
return_string = "%s.%s.%s" % (self.major,
self.minor,
self.identifier_string)
return return_string.replace("-", "")
@staticmethod
def parse_from_string(string, version_type):
"""Returns version object from Semver string.
Args:
string: version string
version_type: version parameter
Raises:
RuntimeError: If the version string is not valid.
"""
# Check validity of new version string
if not re.search(r"[0-9]+\.[0-9]+\.[a-zA-Z0-9]+", string):
raise RuntimeError("Invalid version string: %s" % string)
major, minor, extension = string.split(".", 2)
# Isolate patch and identifier string if identifier string exists
extension_split = extension.split("-", 1)
patch = extension_split[0]
if len(extension_split) == 2:
identifier_string = "-" + extension_split[1]
else:
identifier_string = ""
return Version(major,
minor,
patch,
identifier_string,
version_type)
def get_current_semver_version():
"""Returns a Version object of current version.
Returns:
version: Version object of current SemVer string based on information from
core/public/version.h
"""
# Get current version information
version_file = open(VERSION_H, "r")
for line in version_file:
major_match = re.search("^#define TF_MAJOR_VERSION ([0-9]+)", line)
minor_match = re.search("^#define TF_MINOR_VERSION ([0-9]+)", line)
patch_match = re.search("^#define TF_PATCH_VERSION ([0-9]+)", line)
extension_match = re.search("^#define TF_VERSION_SUFFIX \"(.*)\"", line)
if major_match:
old_major = major_match.group(1)
if minor_match:
old_minor = minor_match.group(1)
if patch_match:
old_patch_num = patch_match.group(1)
if extension_match:
old_extension = extension_match.group(1)
break
if "dev" in old_extension:
version_type = NIGHTLY_VERSION
else:
version_type = REGULAR_VERSION
return Version(old_major,
old_minor,
old_patch_num,
old_extension,
version_type)
def update_version_h(old_version, new_version):
"""Update tensorflow/core/public/version.h."""
replace_line("#define TF_MAJOR_VERSION %s" % old_version.major,
"#define TF_MAJOR_VERSION %s" % new_version.major, VERSION_H)
replace_line("#define TF_MINOR_VERSION %s" % old_version.minor,
"#define TF_MINOR_VERSION %s" % new_version.minor, VERSION_H)
replace_line("#define TF_PATCH_VERSION %s" % old_version.patch,
"#define TF_PATCH_VERSION %s" % new_version.patch, VERSION_H)
replace_line("#define TF_VERSION_SUFFIX \"%s\"" %
old_version.identifier_string,
"#define TF_VERSION_SUFFIX \"%s\""
% new_version.identifier_string,
VERSION_H)
def update_setup_dot_py(old_version, new_version):
"""Update setup.py."""
replace_line("_VERSION = '%s'" % old_version.string,
"_VERSION = '%s'" % new_version.string, SETUP_PY)
def update_readme(old_version, new_version):
"""Update README."""
pep_440_str = new_version.pep_440_str
replace_with_sed(r"s/%s\.%s\.([[:alnum:]]+)-/%s-/g" % (old_version.major,
old_version.minor,
pep_440_str),
README_MD)
def update_md_files(old_version, new_version):
"""Update the md doc files.
Args:
old_version: Version object of current version
new_version: Version object of new version
"""
old_pep_version = old_version.pep_440_str
new_pep_version = new_version.pep_440_str
for filename in ["linux", "mac", "windows", "sources"]:
filepath = "%s/docs_src/install/install_%s.md" % (TF_SRC_DIR,
filename)
replace_with_sed("s/tensorflow-%s/tensorflow-%s/g"
% (old_pep_version, new_pep_version), filepath)
replace_with_sed("s/tensorflow_gpu-%s/tensorflow_gpu-%s/g"
% (old_pep_version, new_pep_version), filepath)
replace_with_sed("s/TensorFlow %s/TensorFlow %s/g"
% (old_pep_version, new_pep_version), filepath)
for filename in ["java", "go", "c"]:
filepath = "%s/docs_src/install/install_%s.md" % (TF_SRC_DIR,
filename)
replace_with_sed(r"s/x86_64-%s/x86_64-%s/g"
% (old_version, new_version), filepath)
replace_with_sed(r"s/libtensorflow-%s.jar/libtensorflow-%s.jar/g"
% (old_version, new_version), filepath)
replace_with_sed(r"s/<version>%s<\/version>/<version>%s<\/version>/g"
% (old_version, new_version), filepath)
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
minor_mismatch = old_version.minor != new_version.minor
if major_mismatch or minor_mismatch:
return True
return False
def update_dockerfiles(old_version, new_version):
"""Update dockerfiles if there was a major change."""
if major_minor_change(old_version, new_version):
old_r_major_minor = r"r%s\.%s" % (old_version.major, old_version.minor)
old_r_major_minor_string = old_r_major_minor.replace("\\", "")
r_major_minor = r"r%s\.%s" % (new_version.major, new_version.minor)
r_major_minor_string = r_major_minor.replace("\\", "")
print("Detected Major.Minor change.")
print("Updating pattern %s to %s in additional files"
% (old_r_major_minor_string, r_major_minor_string))
# Update dockerfiles
replace_with_sed("s/%s/%s/g"
% (old_r_major_minor, r_major_minor), DEVEL_DOCKERFILE)
replace_with_sed("s/%s/%s/g"
% (old_r_major_minor, r_major_minor), GPU_DEVEL_DOCKERFILE)
def check_for_lingering_string(lingering_string):
"""Check for given lingering strings."""
formatted_string = lingering_string.replace(".", r"\.")
try:
linger_strs = subprocess.check_output("grep -rnoH \"%s\" \"%s\""
% (formatted_string, TF_SRC_DIR),
shell=True).split("\n")
except subprocess.CalledProcessError:
linger_strs = []
if linger_strs:
print("WARNING: Below are potentially instances of lingering old version "
"string \"%s\" in source directory \"%s/\" that are not "
"updated by this script. Please check them manually!"
% (lingering_string, TF_SRC_DIR))
for linger_str in linger_strs:
print(linger_str)
else:
print("No lingering old version strings \"%s\" found in source directory"
" \"%s/\". Good." % (lingering_string, TF_SRC_DIR))
def check_for_old_version(old_version, new_version):
"""Check for old version references."""
for old_ver in [old_version.string, old_version.pep_440_str]:
check_for_lingering_string(old_ver)
if major_minor_change(old_version, new_version):
old_r_major_minor = "r%s.%s" % (old_version.major, old_version.minor)
check_for_lingering_string(old_r_major_minor)
def main():
"""This script updates all instances of version in the tensorflow directory.
Requirements:
version: The version tag
OR
nightly: Create a nightly tag with current date
Raises:
RuntimeError: If the script is not being run from tf source dir
"""
parser = argparse.ArgumentParser(description="Cherry picking automation.")
group = parser.add_mutually_exclusive_group(required=True)
# Arg information
group.add_argument("--version",
help="<new_major_ver>.<new_minor_ver>.<new_patch_ver>",
default="")
group.add_argument("--nightly",
help="disable the service provisioning step",
action="store_true")
args = parser.parse_args()
check_all_files()
old_version = get_current_semver_version()
if args.nightly:
new_version = Version(old_version.major,
old_version.minor,
old_version.patch,
"-dev" + time.strftime("%Y%m%d"),
NIGHTLY_VERSION)
else:
new_version = Version.parse_from_string(args.version, REGULAR_VERSION)
update_version_h(old_version, new_version)
update_setup_dot_py(old_version, new_version)
update_readme(old_version, new_version)
update_md_files(old_version, new_version)
update_dockerfiles(old_version, new_version)
# Print transition details
print("Major: %s -> %s" % (old_version.major, new_version.major))
print("Minor: %s -> %s" % (old_version.minor, new_version.minor))
print("Patch: %s -> %s\n" % (old_version.patch, new_version.patch))
check_for_old_version(old_version, new_version)
if __name__ == "__main__":
main()
| mit | 2,476,023,095,633,056,000 | 34.236111 | 80 | 0.603153 | false |
vikatory/kbengine | kbe/res/scripts/common/Lib/distutils/command/bdist_msi.py | 152 | 35217 | # Copyright (C) 2005, 2006 Martin von Löwis
# Licensed to PSF under a Contributor Agreement.
# The bdist_wininst command proper
# based on bdist_wininst
"""
Implements the bdist_msi command.
"""
import sys, os
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils.sysconfig import get_python_version
from distutils.version import StrictVersion
from distutils.errors import DistutilsOptionError
from distutils.util import get_platform
from distutils import log
import msilib
from msilib import schema, sequence, text
from msilib import Directory, Feature, Dialog, add_data
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
bmwidth = 152*ruler/328
#if kw.get("bitmap", True):
# self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 15, 10, 320, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
class bdist_msi(Command):
description = "create a Microsoft Installer (.msi) binary distribution"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4',
'2.5', '2.6', '2.7', '2.8', '2.9',
'3.0', '3.1', '3.2', '3.3', '3.4',
'3.5', '3.6', '3.7', '3.8', '3.9']
other_version = 'X'
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.skip_build = None
self.install_script = None
self.pre_install_script = None
self.versions = None
def finalize_options(self):
self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'msi')
short_version = get_python_version()
if (not self.target_version) and self.distribution.has_ext_modules():
self.target_version = short_version
if self.target_version:
self.versions = [self.target_version]
if not self.skip_build and self.distribution.has_ext_modules()\
and self.target_version != short_version:
raise DistutilsOptionError(
"target version can only be %s, or the '--skip-build'"
" option must be specified" % (short_version,))
else:
self.versions = list(self.all_versions)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.pre_install_script:
raise DistutilsOptionError(
"the pre-install-script feature is not yet implemented")
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError(
"install_script '%s' not found in scripts"
% self.install_script)
self.install_script_key = None
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.prefix = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
self.mkpath(self.dist_dir)
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
installer_name = os.path.abspath(installer_name)
if os.path.exists(installer_name): os.unlink(installer_name)
metadata = self.distribution.metadata
author = metadata.author
if not author:
author = metadata.maintainer
if not author:
author = "UNKNOWN"
version = metadata.get_version()
# ProductVersion must be strictly numeric
# XXX need to deal with prerelease versions
sversion = "%d.%d.%d" % StrictVersion(version).version
# Prefix ProductName with Python x.y, so that
# it sorts together with the other Python packages
# in Add-Remove-Programs (APR)
fullname = self.distribution.get_fullname()
if self.target_version:
product_name = "Python %s %s" % (self.target_version, fullname)
else:
product_name = "Python %s" % (fullname)
self.db = msilib.init_database(installer_name, schema,
product_name, msilib.gen_uuid(),
sversion, author)
msilib.add_tables(self.db, sequence)
props = [('DistVersion', version)]
email = metadata.author_email or metadata.maintainer_email
if email:
props.append(("ARPCONTACT", email))
if metadata.url:
props.append(("ARPURLINFOABOUT", metadata.url))
if props:
add_data(self.db, 'Property', props)
self.add_find_python()
self.add_files()
self.add_scripts()
self.add_ui()
self.db.Commit()
if hasattr(self.distribution, 'dist_files'):
tup = 'bdist_msi', self.target_version or 'any', fullname
self.distribution.dist_files.append(tup)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
def add_files(self):
db = self.db
cab = msilib.CAB("distfiles")
rootdir = os.path.abspath(self.bdist_dir)
root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
f = Feature(db, "Python", "Python", "Everything",
0, 1, directory="TARGETDIR")
items = [(f, root, '')]
for version in self.versions + [self.other_version]:
target = "TARGETDIR" + version
name = default = "Python" + version
desc = "Everything"
if version is self.other_version:
title = "Python from another location"
level = 2
else:
title = "Python %s from registry" % version
level = 1
f = Feature(db, name, title, desc, 1, level, directory=target)
dir = Directory(db, cab, root, rootdir, target, default)
items.append((f, dir, version))
db.Commit()
seen = {}
for feature, dir, version in items:
todo = [dir]
while todo:
dir = todo.pop()
for file in os.listdir(dir.absolute):
afile = os.path.join(dir.absolute, file)
if os.path.isdir(afile):
short = "%s|%s" % (dir.make_short(file), file)
default = file + version
newdir = Directory(db, cab, dir, file, default, short)
todo.append(newdir)
else:
if not dir.component:
dir.start_component(dir.logical, feature, 0)
if afile not in seen:
key = seen[afile] = dir.add_file(file)
if file==self.install_script:
if self.install_script_key:
raise DistutilsOptionError(
"Multiple files with name %s" % file)
self.install_script_key = '[#%s]' % key
else:
key = seen[afile]
add_data(self.db, "DuplicateFile",
[(key + version, dir.component, key, None, dir.logical)])
db.Commit()
cab.commit(db)
def add_find_python(self):
"""Adds code to the installer to compute the location of Python.
Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the
registry for each version of Python.
Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined,
else from PYTHON.MACHINE.X.Y.
Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe"""
start = 402
for ver in self.versions:
install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver
machine_reg = "python.machine." + ver
user_reg = "python.user." + ver
machine_prop = "PYTHON.MACHINE." + ver
user_prop = "PYTHON.USER." + ver
machine_action = "PythonFromMachine" + ver
user_action = "PythonFromUser" + ver
exe_action = "PythonExe" + ver
target_dir_prop = "TARGETDIR" + ver
exe_prop = "PYTHON" + ver
if msilib.Win64:
# type: msidbLocatorTypeRawValue + msidbLocatorType64bit
Type = 2+16
else:
Type = 2
add_data(self.db, "RegLocator",
[(machine_reg, 2, install_path, None, Type),
(user_reg, 1, install_path, None, Type)])
add_data(self.db, "AppSearch",
[(machine_prop, machine_reg),
(user_prop, user_reg)])
add_data(self.db, "CustomAction",
[(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"),
(user_action, 51+256, target_dir_prop, "[" + user_prop + "]"),
(exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"),
])
add_data(self.db, "InstallExecuteSequence",
[(machine_action, machine_prop, start),
(user_action, user_prop, start + 1),
(exe_action, None, start + 2),
])
add_data(self.db, "InstallUISequence",
[(machine_action, machine_prop, start),
(user_action, user_prop, start + 1),
(exe_action, None, start + 2),
])
add_data(self.db, "Condition",
[("Python" + ver, 0, "NOT TARGETDIR" + ver)])
start += 4
assert start < 500
def add_scripts(self):
if self.install_script:
start = 6800
for ver in self.versions + [self.other_version]:
install_action = "install_script." + ver
exe_prop = "PYTHON" + ver
add_data(self.db, "CustomAction",
[(install_action, 50, exe_prop, self.install_script_key)])
add_data(self.db, "InstallExecuteSequence",
[(install_action, "&Python%s=3" % ver, start)])
start += 1
# XXX pre-install scripts are currently refused in finalize_options()
# but if this feature is completed, it will also need to add
# entries for each version as the above code does
if self.pre_install_script:
scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
f = open(scriptfn, "w")
# The batch file will be executed with [PYTHON], so that %1
# is the path to the Python interpreter; %0 will be the path
# of the batch file.
# rem ="""
# %1 %0
# exit
# """
# <actual script>
f.write('rem ="""\n%1 %0\nexit\n"""\n')
f.write(open(self.pre_install_script).read())
f.close()
add_data(self.db, "Binary",
[("PreInstall", msilib.Binary(scriptfn))
])
add_data(self.db, "CustomAction",
[("PreInstall", 2, "PreInstall", None)
])
add_data(self.db, "InstallExecuteSequence",
[("PreInstall", "NOT Installed", 450)])
def add_ui(self):
db = self.db
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
track_disk_space = 32
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair"),
# possible values: ALL, JUSTME
("WhichUsers", "ALL")
])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
# In the user interface, assume all-users installation if privileged.
("SelectFeaturesDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, 'ActionText', text.ActionText)
add_data(db, 'UIText', text.UIText)
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Completing the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
#error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
#cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
# "py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 15, 70, 320, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Feature (Python directory) selection
seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Python Installations")
seldlg.text("Hint", 15, 30, 300, 20, 3,
"Select the Python locations where %s should be installed."
% self.distribution.get_fullname())
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
order = 1
c.event("[TARGETDIR]", "[SourceDir]", ordering=order)
for version in self.versions + [self.other_version]:
order += 1
c.event("[TARGETDIR]", "[TARGETDIR%s]" % version,
"FEATURE_SELECTED AND &Python%s=3" % version,
ordering=order)
c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1)
c.event("EndDialog", "Return", ordering=order + 2)
c = seldlg.cancel("Cancel", "Features")
c.event("SpawnDialog", "CancelDlg")
c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3,
"FEATURE", None, "PathEdit", None)
c.event("[FEATURE_SELECTED]", "1")
ver = self.other_version
install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver
dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver
c = seldlg.text("Other", 15, 200, 300, 15, 3,
"Provide an alternate Python location")
c.condition("Enable", install_other_cond)
c.condition("Show", install_other_cond)
c.condition("Disable", dont_install_other_cond)
c.condition("Hide", dont_install_other_cond)
c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1,
"TARGETDIR" + ver, None, "Next", None)
c.condition("Enable", install_other_cond)
c.condition("Show", install_other_cond)
c.condition("Disable", dont_install_other_cond)
c.condition("Hide", dont_install_other_cond)
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
"WhichUsers", "", "Next")
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", ordering = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
###################################################################
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 15, 63, 330, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
"MaintenanceForm_Action", "", "Next")
#g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
#c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
#c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
#c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name,
self.target_version)
else:
base_name = "%s.%s.msi" % (fullname, self.plat_name)
installer_name = os.path.join(self.dist_dir, base_name)
return installer_name
| lgpl-3.0 | -2,207,692,972,586,438,700 | 46.524966 | 200 | 0.538562 | false |
suyashphadtare/sajil-final-erp | erpnext/startup/report_data_map.py | 21 | 8867 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
# mappings for table dumps
# "remember to add indexes!"
data_map = {
"Company": {
"columns": ["name"],
"conditions": ["docstatus < 2"]
},
"Fiscal Year": {
"columns": ["name", "year_start_date", "year_end_date"],
"conditions": ["docstatus < 2"],
},
# Accounts
"Account": {
"columns": ["name", "parent_account", "lft", "rgt", "report_type",
"company", "group_or_ledger"],
"conditions": ["docstatus < 2"],
"order_by": "lft",
"links": {
"company": ["Company", "name"],
}
},
"Cost Center": {
"columns": ["name", "lft", "rgt"],
"conditions": ["docstatus < 2"],
"order_by": "lft"
},
"GL Entry": {
"columns": ["name", "account", "posting_date", "cost_center", "debit", "credit",
"is_opening", "company", "voucher_type", "voucher_no", "remarks"],
"order_by": "posting_date, account",
"links": {
"account": ["Account", "name"],
"company": ["Company", "name"],
"cost_center": ["Cost Center", "name"]
}
},
# Stock
"Item": {
"columns": ["name", "if(item_name=name, '', item_name) as item_name", "description",
"item_group as parent_item_group", "stock_uom", "brand", "valuation_method",
"re_order_level", "re_order_qty"],
# "conditions": ["docstatus < 2"],
"order_by": "name",
"links": {
"parent_item_group": ["Item Group", "name"],
"brand": ["Brand", "name"]
}
},
"Item Group": {
"columns": ["name", "parent_item_group"],
# "conditions": ["docstatus < 2"],
"order_by": "lft"
},
"Brand": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Project": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Warehouse": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Stock Ledger Entry": {
"columns": ["name", "posting_date", "posting_time", "item_code", "warehouse",
"actual_qty as qty", "voucher_type", "voucher_no", "project",
"ifnull(incoming_rate,0) as incoming_rate", "stock_uom", "serial_no",
"qty_after_transaction", "valuation_rate"],
"order_by": "posting_date, posting_time, name",
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"],
"project": ["Project", "name"]
},
"force_index": "posting_sort_index"
},
"Serial No": {
"columns": ["name", "purchase_rate as incoming_rate"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Stock Entry": {
"columns": ["name", "purpose"],
"conditions": ["docstatus=1"],
"order_by": "posting_date, posting_time, name",
},
"Production Order": {
"columns": ["name", "production_item as item_code",
"(ifnull(qty, 0) - ifnull(produced_qty, 0)) as qty",
"fg_warehouse as warehouse"],
"conditions": ["docstatus=1", "status != 'Stopped'", "ifnull(fg_warehouse, '')!=''",
"ifnull(qty, 0) > ifnull(produced_qty, 0)"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
"Material Request Item": {
"columns": ["item.name as name", "item_code", "warehouse",
"(ifnull(qty, 0) - ifnull(ordered_qty, 0)) as qty"],
"from": "`tabMaterial Request Item` item, `tabMaterial Request` main",
"conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'",
"ifnull(warehouse, '')!=''", "ifnull(qty, 0) > ifnull(ordered_qty, 0)"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
"Purchase Order Item": {
"columns": ["item.name as name", "item_code", "warehouse",
"(ifnull(qty, 0) - ifnull(received_qty, 0)) as qty"],
"from": "`tabPurchase Order Item` item, `tabPurchase Order` main",
"conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'",
"ifnull(warehouse, '')!=''", "ifnull(qty, 0) > ifnull(received_qty, 0)"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
"Sales Order Item": {
"columns": ["item.name as name", "item_code", "(ifnull(qty, 0) - ifnull(delivered_qty, 0)) as qty", "warehouse"],
"from": "`tabSales Order Item` item, `tabSales Order` main",
"conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'",
"ifnull(warehouse, '')!=''", "ifnull(qty, 0) > ifnull(delivered_qty, 0)"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
# Sales
"Customer": {
"columns": ["name", "if(customer_name=name, '', customer_name) as customer_name",
"customer_group as parent_customer_group", "territory as parent_territory"],
"conditions": ["docstatus < 2"],
"order_by": "name",
"links": {
"parent_customer_group": ["Customer Group", "name"],
"parent_territory": ["Territory", "name"],
}
},
"Customer Group": {
"columns": ["name", "parent_customer_group"],
"conditions": ["docstatus < 2"],
"order_by": "lft"
},
"Territory": {
"columns": ["name", "parent_territory"],
"conditions": ["docstatus < 2"],
"order_by": "lft"
},
"Sales Invoice": {
"columns": ["name", "customer", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"customer": ["Customer", "name"],
"company":["Company", "name"]
}
},
"Sales Invoice Item": {
"columns": ["name", "parent", "item_code", "qty", "base_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Sales Invoice", "name"],
"item_code": ["Item", "name"]
}
},
"Sales Order": {
"columns": ["name", "customer", "transaction_date as posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "transaction_date",
"links": {
"customer": ["Customer", "name"],
"company":["Company", "name"]
}
},
"Sales Order Item[Sales Analytics]": {
"columns": ["name", "parent", "item_code", "qty", "base_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Sales Order", "name"],
"item_code": ["Item", "name"]
}
},
"Delivery Note": {
"columns": ["name", "customer", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"customer": ["Customer", "name"],
"company":["Company", "name"]
}
},
"Delivery Note Item[Sales Analytics]": {
"columns": ["name", "parent", "item_code", "qty", "base_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Delivery Note", "name"],
"item_code": ["Item", "name"]
}
},
"Supplier": {
"columns": ["name", "if(supplier_name=name, '', supplier_name) as supplier_name",
"supplier_type as parent_supplier_type"],
"conditions": ["docstatus < 2"],
"order_by": "name",
"links": {
"parent_supplier_type": ["Supplier Type", "name"],
}
},
"Supplier Type": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Purchase Invoice": {
"columns": ["name", "supplier", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"supplier": ["Supplier", "name"],
"company":["Company", "name"]
}
},
"Purchase Invoice Item": {
"columns": ["name", "parent", "item_code", "qty", "base_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Purchase Invoice", "name"],
"item_code": ["Item", "name"]
}
},
"Purchase Order": {
"columns": ["name", "supplier", "transaction_date as posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"supplier": ["Supplier", "name"],
"company":["Company", "name"]
}
},
"Purchase Order Item[Purchase Analytics]": {
"columns": ["name", "parent", "item_code", "qty", "base_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Purchase Order", "name"],
"item_code": ["Item", "name"]
}
},
"Purchase Receipt": {
"columns": ["name", "supplier", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"supplier": ["Supplier", "name"],
"company":["Company", "name"]
}
},
"Purchase Receipt Item[Purchase Analytics]": {
"columns": ["name", "parent", "item_code", "qty", "base_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Purchase Receipt", "name"],
"item_code": ["Item", "name"]
}
},
# Support
"Support Ticket": {
"columns": ["name","status","creation","resolution_date","first_responded_on"],
"conditions": ["docstatus < 2"],
"order_by": "creation"
}
}
| agpl-3.0 | 8,439,381,569,199,534,000 | 28.956081 | 115 | 0.570542 | false |
tovmeod/anaf | anaf/knowledge/forms.py | 1 | 4996 | """
Knowledge base model forms
"""
from django.forms import ModelForm, Form, ChoiceField
from models import KnowledgeFolder, KnowledgeItem, KnowledgeCategory
from anaf.core.models import Object
from anaf.core.decorators import preprocess_form
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
preprocess_form()
class MassActionForm(Form):
""" Mass action form for Reports """
delete = ChoiceField(label=_("Delete"), choices=(('', '-----'), ('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
instance = None
def __init__(self, user, *args, **kwargs):
if 'instance' in kwargs:
self.instance = kwargs['instance']
del kwargs['instance']
super(MassActionForm, self).__init__(*args, **kwargs)
self.fields['delete'] = ChoiceField(label=_("Delete"), choices=(('', '-----'),
('delete', _(
'Delete Completely')),
('trash', _('Move to Trash'))), required=False)
def save(self, *args, **kwargs):
"Process form"
if self.instance and self.is_valid() and self.cleaned_data['delete']:
if self.cleaned_data['delete'] == 'delete':
self.instance.delete()
if self.cleaned_data['delete'] == 'trash':
self.instance.trash = True
self.instance.save()
class KnowledgeFolderForm(ModelForm):
""" Knowledge folder form """
def __init__(self, user, knowledgeType_id, *args, **kwargs):
super(KnowledgeFolderForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['parent'].label = _("Parent")
self.fields['parent'].queryset = KnowledgeFolder.objects
self.fields['parent'].queryset = Object.filter_permitted(
user, KnowledgeFolder.objects, mode='x')
if knowledgeType_id:
self.fields['parent'].initial = knowledgeType_id
self.fields['details'].label = _("Details")
class Meta:
"KnowledgeFolder"
model = KnowledgeFolder
fields = ('name', 'parent', 'details')
class KnowledgeItemForm(ModelForm):
""" Knowledge item form """
def __init__(self, user, knowledgeType_id, *args, **kwargs):
super(KnowledgeItemForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['folder'].label = _("Folder")
self.fields['folder'].queryset = Object.filter_permitted(
user, KnowledgeFolder.objects, mode='x')
self.fields['folder'].widget.attrs.update(
{'popuplink': reverse('knowledge_folder_add')})
if knowledgeType_id:
self.fields['folder'].initial = knowledgeType_id
self.fields['category'].label = _("Category")
self.fields['category'].queryset = Object.filter_permitted(
user, KnowledgeCategory.objects, mode='x')
self.fields['category'].widget.attrs.update(
{'popuplink': reverse('knowledge_category_add')})
self.fields['body'].label = _("Body")
self.fields['body'].widget.attrs.update({'class': 'full-editor'})
class Meta:
"KnowledgeItem"
model = KnowledgeItem
fields = ('name', 'folder', 'category', 'body')
class KnowledgeCategoryForm(ModelForm):
""" Knowledge category form """
def __init__(self, *args, **kwargs):
super(KnowledgeCategoryForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['details'].label = _("Details")
class Meta:
"KnowledgeCategory"
model = KnowledgeCategory
fields = ('name', 'details')
class FilterForm(ModelForm):
""" Filter form definition """
def __init__(self, user, skip=None, *args, **kwargs):
if skip is None:
skip = []
super(FilterForm, self).__init__(*args, **kwargs)
if 'folder' in skip:
del self.fields['folder']
else:
self.fields['folder'].queryset = Object.filter_permitted(
user, KnowledgeFolder.objects, mode='x')
# self.fields['folder'].required = False
self.fields['folder'].label = _("Folder")
if 'category' in skip:
del self.fields['category']
else:
self.fields['category'].queryset = Object.filter_permitted(user,
KnowledgeCategory.objects, mode='x')
self.fields['category'].required = False
self.fields['category'].label = _("Category")
class Meta:
"Filter"
model = KnowledgeItem
fields = ('folder', 'category')
| bsd-3-clause | 9,082,211,403,079,981,000 | 34.183099 | 119 | 0.555845 | false |
newemailjdm/scipy | scipy/weave/examples/object.py | 100 | 1679 | """ Attribute and method access on Python objects from C++.
Note: std::cout type operations currently crash python...
Not sure what is up with this...
"""
from __future__ import absolute_import, print_function
import scipy.weave as weave
#----------------------------------------------------------------------------
# get/set attribute and call methods example
#----------------------------------------------------------------------------
class Foo(object):
def __init__(self):
self.val = 1
def inc(self,amount):
self.val += amount
return self.val
obj = Foo()
code = """
py::tuple result(3);
int i = obj.attr("val");
result[0] = i;
py::tuple args(1);
args[0] = 2;
i = obj.mcall("inc",args);
result[1] = i;
obj.set_attr("val",5);
i = obj.attr("val");
result[2] = i;
return_val = result;
"""
print('initial, inc(2), set(5)/get:', weave.inline(code,['obj']))
#----------------------------------------------------------------------------
# indexing of values.
#----------------------------------------------------------------------------
from UserList import UserList
obj = UserList([1,[1,2],"hello"])
code = """
int i;
// find obj length and access each of its items
//std::cout << "UserList items: ";
//for(i = 0; i < obj.length(); i++)
// std::cout << obj[i].str() << " ";
//std::cout << std::endl;
// assign new values to each of its items
for(i = 0; i < obj.length(); i++)
obj[i] = "goodbye";
"""
weave.inline(code,['obj'])
print("obj with new values:", obj)
| bsd-3-clause | 3,781,669,792,563,363,300 | 26.983333 | 77 | 0.437761 | false |
salfab/CouchPotatoServer | libs/elixir/ext/perform_ddl.py | 29 | 3315 | '''
DDL statements for Elixir.
Entities having the perform_ddl statement, will automatically execute the
given DDL statement, at the given moment: ether before or after the table
creation in SQL.
The 'when' argument can be either 'before-create' or 'after-create'.
The 'statement' argument can be one of:
- a single string statement
- a list of string statements, in which case, each of them will be executed
in turn.
- a callable which should take no argument and return either a single string
or a list of strings.
In each string statement, you may use the special '%(fullname)s' construct,
that will be replaced with the real table name including schema, if unknown
to you. Also, self explained '%(table)s' and '%(schema)s' may be used here.
You would use this extension to handle non elixir sql statemts, like triggers
etc.
.. sourcecode:: python
class Movie(Entity):
title = Field(Unicode(30), primary_key=True)
year = Field(Integer)
perform_ddl('after-create',
"insert into %(fullname)s values ('Alien', 1979)")
preload_data is a more specific statement meant to preload data in your
entity table from a list of tuples (of fields values for each row).
.. sourcecode:: python
class Movie(Entity):
title = Field(Unicode(30), primary_key=True)
year = Field(Integer)
preload_data(('title', 'year'),
[(u'Alien', 1979), (u'Star Wars', 1977)])
preload_data(('year', 'title'),
[(1982, u'Blade Runner')])
preload_data(data=[(u'Batman', 1966)])
'''
from elixir.statements import Statement
from elixir.properties import EntityBuilder
from sqlalchemy import DDL
__all__ = ['perform_ddl', 'preload_data']
__doc_all__ = []
#
# the perform_ddl statement
#
class PerformDDLEntityBuilder(EntityBuilder):
def __init__(self, entity, when, statement, on=None, context=None):
self.entity = entity
self.when = when
self.statement = statement
self.on = on
self.context = context
def after_table(self):
statement = self.statement
if hasattr(statement, '__call__'):
statement = statement()
if not isinstance(statement, list):
statement = [statement]
for s in statement:
ddl = DDL(s, self.on, self.context)
ddl.execute_at(self.when, self.entity.table)
perform_ddl = Statement(PerformDDLEntityBuilder)
#
# the preload_data statement
#
class PreloadDataEntityBuilder(EntityBuilder):
def __init__(self, entity, columns=None, data=None):
self.entity = entity
self.columns = columns
self.data = data
def after_table(self):
all_columns = [col.name for col in self.entity.table.columns]
def onload(event, schema_item, connection):
columns = self.columns
if columns is None:
columns = all_columns
data = self.data
if hasattr(data, '__call__'):
data = data()
insert = schema_item.insert()
connection.execute(insert,
[dict(zip(columns, values)) for values in data])
self.entity.table.append_ddl_listener('after-create', onload)
preload_data = Statement(PreloadDataEntityBuilder)
| gpl-3.0 | -8,847,765,805,570,421,000 | 30.273585 | 77 | 0.643137 | false |
googleapis/python-talent | google/cloud/talent_v4/services/tenant_service/transports/base.py | 1 | 9826 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.talent_v4.types import tenant
from google.cloud.talent_v4.types import tenant as gct_tenant
from google.cloud.talent_v4.types import tenant_service
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class TenantServiceTransport(abc.ABC):
"""Abstract transport class for TenantService."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
)
DEFAULT_HOST: str = "jobs.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_tenant: gapic_v1.method.wrap_method(
self.create_tenant, default_timeout=30.0, client_info=client_info,
),
self.get_tenant: gapic_v1.method.wrap_method(
self.get_tenant,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.update_tenant: gapic_v1.method.wrap_method(
self.update_tenant, default_timeout=30.0, client_info=client_info,
),
self.delete_tenant: gapic_v1.method.wrap_method(
self.delete_tenant,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.list_tenants: gapic_v1.method.wrap_method(
self.list_tenants,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
}
@property
def create_tenant(
self,
) -> Callable[
[tenant_service.CreateTenantRequest],
Union[gct_tenant.Tenant, Awaitable[gct_tenant.Tenant]],
]:
raise NotImplementedError()
@property
def get_tenant(
self,
) -> Callable[
[tenant_service.GetTenantRequest],
Union[tenant.Tenant, Awaitable[tenant.Tenant]],
]:
raise NotImplementedError()
@property
def update_tenant(
self,
) -> Callable[
[tenant_service.UpdateTenantRequest],
Union[gct_tenant.Tenant, Awaitable[gct_tenant.Tenant]],
]:
raise NotImplementedError()
@property
def delete_tenant(
self,
) -> Callable[
[tenant_service.DeleteTenantRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_tenants(
self,
) -> Callable[
[tenant_service.ListTenantsRequest],
Union[
tenant_service.ListTenantsResponse,
Awaitable[tenant_service.ListTenantsResponse],
],
]:
raise NotImplementedError()
__all__ = ("TenantServiceTransport",)
| apache-2.0 | -8,748,073,559,244,546,000 | 36.361217 | 103 | 0.602687 | false |
thedep2/CouchPotatoServer | libs/guessit/transfo/__init__.py | 94 | 4117 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import base_text_type, Guess
from guessit.patterns import canonical_form
from guessit.textutils import clean_string
import logging
log = logging.getLogger(__name__)
def found_property(node, name, confidence):
node.guess = Guess({name: node.clean_value}, confidence=confidence, raw=node.value)
log.debug('Found with confidence %.2f: %s' % (confidence, node.guess))
def format_guess(guess):
"""Format all the found values to their natural type.
For instance, a year would be stored as an int value, etc...
Note that this modifies the dictionary given as input.
"""
for prop, value in guess.items():
if prop in ('season', 'episodeNumber', 'year', 'cdNumber',
'cdNumberTotal', 'bonusNumber', 'filmNumber'):
guess[prop] = int(guess[prop])
elif isinstance(value, base_text_type):
if prop in ('edition',):
value = clean_string(value)
guess[prop] = canonical_form(value).replace('\\', '')
return guess
def find_and_split_node(node, strategy, logger):
string = ' %s ' % node.value # add sentinels
for matcher, confidence, args, kwargs in strategy:
all_args = [string]
if getattr(matcher, 'use_node', False):
all_args.append(node)
if args:
all_args.append(args)
if kwargs:
result, span = matcher(*all_args, **kwargs)
else:
result, span = matcher(*all_args)
if result:
# readjust span to compensate for sentinels
span = (span[0] - 1, span[1] - 1)
if isinstance(result, Guess):
if confidence is None:
confidence = result.confidence(list(result.keys())[0])
else:
if confidence is None:
confidence = 1.0
guess = format_guess(Guess(result, confidence=confidence, raw=string[span[0] + 1:span[1] + 1]))
msg = 'Found with confidence %.2f: %s' % (confidence, guess)
(logger or log).debug(msg)
node.partition(span)
absolute_span = (span[0] + node.offset, span[1] + node.offset)
for child in node.children:
if child.span == absolute_span:
child.guess = guess
else:
find_and_split_node(child, strategy, logger)
return
class SingleNodeGuesser(object):
def __init__(self, guess_func, confidence, logger, *args, **kwargs):
self.guess_func = guess_func
self.confidence = confidence
self.logger = logger
self.args = args
self.kwargs = kwargs
def process(self, mtree):
# strategy is a list of pairs (guesser, confidence)
# - if the guesser returns a guessit.Guess and confidence is specified,
# it will override it, otherwise it will leave the guess confidence
# - if the guesser returns a simple dict as a guess and confidence is
# specified, it will use it, or 1.0 otherwise
strategy = [ (self.guess_func, self.confidence, self.args, self.kwargs) ]
for node in mtree.unidentified_leaves():
find_and_split_node(node, strategy, self.logger)
| gpl-3.0 | 8,548,563,577,860,957,000 | 36.770642 | 107 | 0.624727 | false |
freedesktop-unofficial-mirror/papyon | papyon/service/ContentRoaming/scenario/store_profile.py | 6 | 3579 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Johann Prieur <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from base import *
from papyon.util.async import *
__all__ = ['StoreProfileScenario']
class StoreProfileScenario(BaseScenario):
def __init__(self, storage, callback, errback,
cid, profile_id, expression_profile_id, display_picture_id,
display_name='', personal_message='', display_picture=''):
"""Updates the roaming profile stored on the server
@param storage: the storage service
@param callback: tuple(callable, *args)
@param errback: tuple(callable, *args)
"""
BaseScenario.__init__(self, 'RoamingIdentityChanged', callback, errback)
self.__storage = storage
self.__cid = cid
self.__profile_id = profile_id
self.__expression_profile_id = expression_profile_id
self.__display_picture_id = display_picture_id
self.display_name = display_name
self.personal_message = personal_message
self.display_picture = display_picture
def execute(self):
self.__storage.UpdateProfile((self.__update_profile_callback,),
self._errback, self._scenario,
self.__profile_id, self.display_name,
self.personal_message, 0)
def __update_profile_callback(self):
if not self.display_picture or not self.__display_picture_id:
run(self._callback)
elif not self.__cid:
self.__delete_relationship_profile_callback()
else:
self.__storage.DeleteRelationships(
(self.__delete_relationship_profile_callback,),
self._errback,
self._scenario,
self.__display_picture_id,
self.__cid, None)
def __delete_relationship_profile_callback(self):
if not self.__expression_profile_id:
self.__delete_relationship_expression_callback()
else:
self.__storage.DeleteRelationships(
(self.__delete_relationship_expression_callback,),
self._errback, self._scenario, self.__display_picture_id,
None, self.__expression_profile_id)
def __delete_relationship_expression_callback(self):
# FIXME : add support for dp name
self.__storage.CreateDocument(
(self.__create_document_callback,), self._errback,
self._scenario, self.__cid,
"roaming", self.display_picture[0],
self.display_picture[1].encode('base64'))
def __create_document_callback(self, document_rid):
self.__storage.CreateRelationships(self._callback, self._errback,
self._scenario, self.__expression_profile_id, document_rid)
| gpl-2.0 | 472,551,720,358,592,400 | 41.105882 | 80 | 0.619447 | false |
google/deepvariant | deepvariant/model_train.py | 1 | 11333 | # Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Trains the DeepVariant model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
import json
import os
from absl import flags
from absl import logging
import tensorflow as tf
from third_party.nucleus.util import proto_utils
from deepvariant import data_providers
from deepvariant import logging_level
from deepvariant import modeling
from deepvariant import tf_utils
FLAGS = flags.FLAGS
# Data set selection parameters
flags.DEFINE_string('dataset_config_pbtxt', None,
'The path to the dataset config file.')
flags.DEFINE_string('model_name', 'inception_v3',
'The name of the model to use for predictions.')
flags.DEFINE_integer('batch_size', 4096, 'The number of samples in each batch.')
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'gcp_project', None,
'Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', None,
'GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_name', None,
'Name of the Cloud TPU for Cluster Resolvers. You must specify either '
'this flag or --master.')
flags.DEFINE_string(
'master', None,
'GRPC URL of the master (e.g. grpc://ip.address.of.tpu:8470). You '
'must specify either this flag or --tpu_name.')
flags.DEFINE_string('train_dir', '/tmp/deepvariant/',
'Directory where to write event logs.')
flags.DEFINE_boolean('use_tpu', False, 'use tpu if available')
flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
flags.DEFINE_integer(
'ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
flags.DEFINE_integer('task', 0, 'Task id of the replica running the training.')
flags.DEFINE_integer('number_of_steps', 8000000,
'Maximum number of global steps to take when training.')
flags.DEFINE_integer(
'num_retries', 0,
'The number of times to retry on InternalError or UnavailableError.')
flags.DEFINE_integer(
'max_examples', None,
'The maximum number of examples to use in training. If None, all examples '
'will be used. If not None, the first max_examples examples from the '
'dataset will be used, with those same examples repeating over and over.')
# Pre-trained model parameters
flags.DEFINE_string(
'start_from_checkpoint', 'model_default',
'A path to a checkpoint of model weights to initialize our model at the '
'start of training. If None or "", the model will start from random weights'
'. The special value "model_default" will use the default pretrained '
'path for the selected model.')
flags.DEFINE_integer(
'max_checkpoints_to_keep', 10,
'Number of last checkpoints to keep during training. '
'Passing "0" preserves all checkpoints.')
flags.DEFINE_string(
'kmp_blocktime', '0',
'Value to set the KMP_BLOCKTIME environment variable to for efficient MKL '
'training. See https://www.tensorflow.org/performance/performance_guide '
'for more information. The default value is 0, which provides the best '
'performance in our tests. Set this flag to "" to not set the variable.')
flags.DEFINE_integer(
'random_seed', 400620758,
'Random seed value to use for TensorFlow. Providing a value != 0 will '
'result in a call to tf.set_random_seed(FLAGS.random_seed), making '
'training more deterministic. If set to 0, the TensorFlow random seed '
'will not be set at all, and TensorFlow will assign it a pseudo-random '
'value each time model_train is run.')
def loss(logits, one_hot_labels, label_smoothing):
"""Creates a loss function for training logits against one_hot_labels.
Args:
logits: tensor. logits of the model we want to train.
one_hot_labels: One-hot encoded truth labels that we want to train this
model to predict.
label_smoothing: float. label_smoothing value for softmax_cross_entropy.
Returns:
A `Tensor` whose value represents the total loss.
"""
tf.compat.v1.losses.softmax_cross_entropy(
logits, one_hot_labels, label_smoothing=label_smoothing, weights=1.0)
return tf.compat.v1.losses.get_total_loss()
def run(target, unused_is_chief, device_fn, use_tpu):
"""Run training.
Args:
target: The target of the TensorFlow standard server to use. Can be the
empty string to run locally using an inprocess server.
device_fn: Device function used to assign ops to devices.
use_tpu: turn on tpu code path.
"""
if not FLAGS.dataset_config_pbtxt:
logging.error('Need to specify --dataset_config_pbtxt')
return
g = tf.Graph()
with g.as_default():
with tf.device(device_fn):
# If ps_tasks is zero, the local device is used. When using multiple
# (non-local) replicas, the ReplicaDeviceSetter distributes the variables
# across the different devices.
tf_dataset = data_providers.get_input_fn_from_dataset(
dataset_config_filename=FLAGS.dataset_config_pbtxt,
mode=tf.estimator.ModeKeys.TRAIN,
max_examples=FLAGS.max_examples,
use_tpu=use_tpu)
model = modeling.get_model(FLAGS.model_name)
logging.info('Running training on %s with model %s and tpu %s',
tf_dataset, FLAGS.model_name, use_tpu)
batches_per_epoch = tf_dataset.num_examples // FLAGS.batch_size
logging.info('Batches per epoch %s', batches_per_epoch)
params = dict(batches_per_epoch=batches_per_epoch,)
estimator = model.make_estimator(
batch_size=FLAGS.batch_size,
model_dir=FLAGS.train_dir,
params=params,
use_tpu=use_tpu,
master=target,
start_from_checkpoint=FLAGS.start_from_checkpoint,
)
estimator.train(
input_fn=tf_dataset, max_steps=FLAGS.number_of_steps, hooks=None)
def parse_and_run():
"""Parse TF_CONFIG to cluster_spec and call run().
TF_CONFIG environment variable is available when running using
gcloud either locally or on cloud. It has all the information required
to create a ClusterSpec which is important for running distributed code.
Raises:
ValueError: If flags are invalid.
"""
tf_config = os.environ.get('TF_CONFIG')
logging.info('TF_CONFIG %s', tf_config)
for name in ['master', 'task', 'ps_tasks']:
if getattr(FLAGS, name) and tf_config:
raise ValueError(
'Either the flag --%s or the environment variable TF_CONFIG can be'
' set but not both.' % name)
# redacted
#
# If TF_CONFIG is not available we are either running locally in Cloud
# or distributed inside Google. On Cloud the default values of
# FLAGS.master and FLAGS.task correspond to running training locally.
# Inside Google they will be set as needed to configure local or distributed
# training. Inside Google we don't need to explicitly set worker_device
# in replica_device_setter becaue this will be set automatically based
# on various flags.
if not tf_config:
device_fn = tf.compat.v1.train.replica_device_setter(FLAGS.ps_tasks)
# pylint: disable=g-long-ternary
master = tf_utils.resolve_master(FLAGS.master, FLAGS.tpu_name,
FLAGS.tpu_zone,
FLAGS.gcp_project) if FLAGS.use_tpu else ''
return run(
master, FLAGS.task == 0, device_fn=device_fn, use_tpu=FLAGS.use_tpu)
tf_config_json = json.loads(tf_config)
cluster = tf_config_json.get('cluster')
job_name = tf_config_json.get('task', {}).get('type')
task_index = tf_config_json.get('task', {}).get('index')
# If cluster information is empty run local
if job_name is None or task_index is None:
device_fn = tf.compat.v1.train.replica_device_setter(0)
return run('', True, device_fn=device_fn, use_tpu=FLAGS.use_tpu)
ps = cluster.get('ps', [])
num_ps = len(ps)
cluster_spec = tf.train.ClusterSpec(cluster)
server = tf.distribute.Server(
cluster_spec, job_name=job_name, task_index=task_index)
if job_name == 'ps':
server.join()
return
elif job_name in ['master', 'worker']:
device_fn = tf.compat.v1.train.replica_device_setter(
num_ps,
worker_device='/job:%s/task:%d' % (job_name, task_index),
cluster=cluster_spec)
return run(
server.target,
job_name == 'master',
device_fn=device_fn,
use_tpu=FLAGS.use_tpu)
def main(_):
"""Run and handle retryable errors."""
proto_utils.uses_fast_cpp_protos_or_die()
logging_level.set_from_flag()
if FLAGS.random_seed:
logging.info('Setting tf.random_seed to %d', FLAGS.random_seed)
tf.compat.v1.set_random_seed(FLAGS.random_seed)
else:
logging.info('Not setting tf.random_seed, will be assigned a random value')
if FLAGS.kmp_blocktime:
os.environ['KMP_BLOCKTIME'] = FLAGS.kmp_blocktime
logging.info('Set KMP_BLOCKTIME to %s', os.environ['KMP_BLOCKTIME'])
for _ in range(FLAGS.num_retries + 1):
try:
parse_and_run()
return
except tf.errors.UnavailableError as e:
# An UnavailableError indicates a gRPC error, typically this is
# retryable.
logging.error('Caught UnavailableError %s; will retry.', e)
except tf.errors.InternalError as e:
# Retry on an InternalError.
logging.error('Caught InternalError %s; will retry.', e)
if __name__ == '__main__':
tf.compat.v1.app.run()
| bsd-3-clause | 9,046,718,908,291,514,000 | 36.40264 | 80 | 0.695756 | false |
avlach/univbris-ocf | optin_manager/src/python/openflow/optin_manager/sfa/rspecs/.oldversions/sfav1.py | 2 | 8905 | from copy import deepcopy
from lxml import etree
from openflow.optin_manager.sfa.util.sfalogging import logger
from openflow.optin_manager.sfa.util.xrn import hrn_to_urn, urn_to_hrn
from openflow.optin_manager.sfa.rspecs.version import RSpecVersion
from openflow.optin_manager.sfa.rspecs.elements.element import Element
from openflow.optin_manager.sfa.rspecs.elements.versions.pgv2Link import PGv2Link
from openflow.optin_manager.sfa.rspecs.elements.versions.sfav1Node import SFAv1Node
from openflow.optin_manager.sfa.rspecs.elements.versions.sfav1Sliver import SFAv1Sliver
from openflow.optin_manager.sfa.rspecs.elements.versions.sfav1Lease import SFAv1Lease
class SFAv1(RSpecVersion):
enabled = True
type = 'SFA'
content_type = '*'
version = '1'
schema = None
namespace = None
extensions = {}
namespaces = None
template = '<RSpec type="%s"></RSpec>' % type
# Network
def get_networks(self):
network_elems = self.xml.xpath('//network')
networks = [network_elem.get_instance(fields=['name', 'slice']) for \
network_elem in network_elems]
return networks
def add_network(self, network):
network_tags = self.xml.xpath('//network[@name="%s"]' % network)
if not network_tags:
network_tag = self.xml.add_element('network', name=network)
else:
network_tag = network_tags[0]
return network_tag
# Nodes
def get_nodes(self, filter=None):
return SFAv1Node.get_nodes(self.xml, filter)
def get_nodes_with_slivers(self):
return SFAv1Node.get_nodes_with_slivers(self.xml)
def add_nodes(self, nodes, network = None, no_dupes=False):
SFAv1Node.add_nodes(self.xml, nodes)
def merge_node(self, source_node_tag, network, no_dupes=False):
if no_dupes and self.get_node_element(node['hostname']):
# node already exists
return
network_tag = self.add_network(network)
network_tag.append(deepcopy(source_node_tag))
# Slivers
def add_slivers(self, hostnames, attributes=[], sliver_urn=None, append=False):
# add slice name to network tag
network_tags = self.xml.xpath('//network')
if network_tags:
network_tag = network_tags[0]
network_tag.set('slice', urn_to_hrn(sliver_urn)[0])
# add slivers
sliver = {'name':sliver_urn,
'pl_tags': attributes}
for hostname in hostnames:
if sliver_urn:
sliver['name'] = sliver_urn
node_elems = self.get_nodes({'component_id': '*%s*' % hostname})
if not node_elems:
continue
node_elem = node_elems[0]
SFAv1Sliver.add_slivers(node_elem.element, sliver)
# remove all nodes without slivers
if not append:
for node_elem in self.get_nodes():
if not node_elem['slivers']:
parent = node_elem.element.getparent()
parent.remove(node_elem.element)
def remove_slivers(self, slivers, network=None, no_dupes=False):
SFAv1Node.remove_slivers(self.xml, slivers)
def get_slice_attributes(self, network=None):
attributes = []
nodes_with_slivers = self.get_nodes_with_slivers()
for default_attribute in self.get_default_sliver_attributes(network):
attribute = default_attribute.copy()
attribute['node_id'] = None
attributes.append(attribute)
for node in nodes_with_slivers:
nodename=node['component_name']
sliver_attributes = self.get_sliver_attributes(nodename, network)
for sliver_attribute in sliver_attributes:
sliver_attribute['node_id'] = nodename
attributes.append(sliver_attribute)
return attributes
def add_sliver_attribute(self, component_id, name, value, network=None):
nodes = self.get_nodes({'component_id': '*%s*' % component_id})
if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
node = nodes[0]
slivers = SFAv1Sliver.get_slivers(node)
if slivers:
sliver = slivers[0]
SFAv1Sliver.add_sliver_attribute(sliver, name, value)
else:
# should this be an assert / raise an exception?
logger.error("WARNING: failed to find component_id %s" % component_id)
def get_sliver_attributes(self, component_id, network=None):
nodes = self.get_nodes({'component_id': '*%s*' % component_id})
attribs = []
if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
node = nodes[0]
slivers = SFAv1Sliver.get_slivers(node.element)
if slivers is not None and isinstance(slivers, list) and len(slivers) > 0:
sliver = slivers[0]
attribs = SFAv1Sliver.get_sliver_attributes(sliver.element)
return attribs
def remove_sliver_attribute(self, component_id, name, value, network=None):
attribs = self.get_sliver_attributes(component_id)
for attrib in attribs:
if attrib['name'] == name and attrib['value'] == value:
#attrib.element.delete()
parent = attrib.element.getparent()
parent.remove(attrib.element)
def add_default_sliver_attribute(self, name, value, network=None):
if network:
defaults = self.xml.xpath("//network[@name='%s']/sliver_defaults" % network)
else:
defaults = self.xml.xpath("//sliver_defaults")
if not defaults:
if network:
network_tag = self.xml.xpath("//network[@name='%s']" % network)
else:
network_tag = self.xml.xpath("//network")
if isinstance(network_tag, list):
network_tag = network_tag[0]
defaults = network_tag.add_element('sliver_defaults')
elif isinstance(defaults, list):
defaults = defaults[0]
SFAv1Sliver.add_sliver_attribute(defaults, name, value)
def get_default_sliver_attributes(self, network=None):
if network:
defaults = self.xml.xpath("//network[@name='%s']/sliver_defaults" % network)
else:
defaults = self.xml.xpath("//sliver_defaults")
if not defaults: return []
return SFAv1Sliver.get_sliver_attributes(defaults[0])
def remove_default_sliver_attribute(self, name, value, network=None):
attribs = self.get_default_sliver_attributes(network)
for attrib in attribs:
if attrib['name'] == name and attrib['value'] == value:
#attrib.element.delete()
parent = attrib.element.getparent()
parent.remove(attrib.element)
# Links
def get_links(self, network=None):
return PGv2Link.get_links(self.xml)
def get_link_requests(self):
return PGv2Link.get_link_requests(self.xml)
def add_links(self, links):
networks = self.get_networks()
if len(networks) > 0:
xml = networks[0].element
else:
xml = self.xml
PGv2Link.add_links(xml, links)
def add_link_requests(self, links):
PGv2Link.add_link_requests(self.xml, links)
# utility
def merge(self, in_rspec):
"""
Merge contents for specified rspec with current rspec
"""
if not in_rspec:
return
from openflow.optin_manager.sfa.rspecs.rspec import RSpec
if isinstance(in_rspec, RSpec):
rspec = in_rspec
else:
rspec = RSpec(in_rspec)
if rspec.version.type.lower() == 'protogeni':
from openflow.optin_manager.sfa.rspecs.rspec_converter import RSpecConverter
in_rspec = RSpecConverter.to_sfa_rspec(rspec.toxml())
rspec = RSpec(in_rspec)
# just copy over all networks
current_networks = self.get_networks()
networks = rspec.version.get_networks()
for network in networks:
current_network = network.get('name')
if current_network and current_network not in current_networks:
self.xml.append(network.element)
current_networks.append(current_network)
# Leases
def get_leases(self, filter=None):
return SFAv1Lease.get_leases(self.xml, filter)
def add_leases(self, leases, network = None, no_dupes=False):
SFAv1Lease.add_leases(self.xml, leases)
if __name__ == '__main__':
from openflow.optin_manager.sfa.rspecs.rspec import RSpec
from openflow.optin_manager.sfa.rspecs.rspec_elements import *
r = RSpec('/tmp/resources.rspec')
r.load_rspec_elements(SFAv1.elements)
print r.get(RSpecElements.NODE)
| bsd-3-clause | 6,670,541,927,113,774,000 | 37.218884 | 88 | 0.612802 | false |
popazerty/openblackhole-SH4 | lib/python/Screens/ServiceScan.py | 6 | 4074 | import Screens.InfoBar
from enigma import eServiceReference
from Screens.Screen import Screen
from Components.ServiceScan import ServiceScan as CScan
from Components.ProgressBar import ProgressBar
from Components.Label import Label
from Components.ActionMap import ActionMap
from Components.FIFOList import FIFOList
from Components.Sources.FrontendInfo import FrontendInfo
from Components.config import config
class ServiceScanSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget name="Title" position="6,4" size="120,42" font="Regular;16" transparent="1" />
<widget name="scan_progress" position="6,50" zPosition="1" borderWidth="1" size="56,12" backgroundColor="dark" />
<widget name="Service" position="6,22" size="120,26" font="Regular;12" transparent="1" />
</screen>"""
def __init__(self, session, parent, showStepSlider = True):
Screen.__init__(self, session, parent)
self["Title"] = Label(parent.title or _("Service scan"))
self["Service"] = Label(_("No service"))
self["scan_progress"] = ProgressBar()
def updateProgress(self, value):
self["scan_progress"].setValue(value)
def updateService(self, name):
self["Service"].setText(name)
class ServiceScan(Screen):
def ok(self):
if self["scan"].isDone():
if self.currentInfobar.__class__.__name__ == "InfoBar":
selectedService = self["servicelist"].getCurrentSelection()
if selectedService and self.currentServiceList is not None:
self.currentServiceList.setTvMode()
bouquets = self.currentServiceList.getBouquetList()
last_scanned_bouquet = bouquets and next((x[1] for x in bouquets if x[0] == "Last Scanned"), None)
if last_scanned_bouquet:
self.currentServiceList.enterUserbouquet(last_scanned_bouquet)
self.currentServiceList.setCurrentSelection(eServiceReference(selectedService[1]))
service = self.currentServiceList.getCurrentSelection()
if not self.session.postScanService or service != self.session.postScanService:
self.session.postScanService = service
self.currentServiceList.addToHistory(service)
config.servicelist.lastmode.save()
self.currentServiceList.saveChannel(service)
self.doCloseRecursive()
self.cancel()
def cancel(self):
self.exit(False)
def doCloseRecursive(self):
self.exit(True)
def exit(self, returnValue):
if self.currentInfobar.__class__.__name__ == "InfoBar":
self.close(returnValue)
self.close()
def __init__(self, session, scanList):
Screen.__init__(self, session)
self["Title"] = Label(_("Scanning..."))
self.scanList = scanList
if hasattr(session, 'infobar'):
self.currentInfobar = Screens.InfoBar.InfoBar.instance
if self.currentInfobar:
self.currentServiceList = self.currentInfobar.servicelist
if self.session.pipshown and self.currentServiceList:
if self.currentServiceList.dopipzap:
self.currentServiceList.togglePipzap()
if hasattr(self.session, 'pip'):
del self.session.pip
self.session.pipshown = False
else:
self.currentInfobar = None
self.session.nav.stopService()
self["scan_progress"] = ProgressBar()
self["scan_state"] = Label(_("scan state"))
self["network"] = Label()
self["transponder"] = Label()
self["pass"] = Label("")
self["servicelist"] = FIFOList()
self["FrontendInfo"] = FrontendInfo()
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(_("OK"))
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"ok": self.ok,
"save": self.ok,
"cancel": self.cancel,
"menu": self.doCloseRecursive
}, -2)
self.setTitle("Service scan")
self.onFirstExecBegin.append(self.doServiceScan)
def doServiceScan(self):
self["servicelist"].len = self["servicelist"].instance.size().height() / self["servicelist"].l.getItemSize().height()
self["scan"] = CScan(self["scan_progress"], self["scan_state"], self["servicelist"], self["pass"], self.scanList, self["network"], self["transponder"], self["FrontendInfo"], self.session.summary)
def createSummary(self):
return ServiceScanSummary
| gpl-2.0 | -1,745,740,406,013,722,600 | 34.426087 | 197 | 0.712322 | false |
jabesq/home-assistant | tests/components/mobile_app/test_entity.py | 2 | 4044 | """Entity tests for mobile_app."""
# pylint: disable=redefined-outer-name,unused-import
import logging
_LOGGER = logging.getLogger(__name__)
async def test_sensor(hass, create_registrations, webhook_client): # noqa: F401, F811, E501
"""Test that sensors can be registered and updated."""
webhook_id = create_registrations[1]['webhook_id']
webhook_url = '/api/webhook/{}'.format(webhook_id)
reg_resp = await webhook_client.post(
webhook_url,
json={
'type': 'register_sensor',
'data': {
'attributes': {
'foo': 'bar'
},
'device_class': 'battery',
'icon': 'mdi:battery',
'name': 'Battery State',
'state': 100,
'type': 'sensor',
'unique_id': 'battery_state',
'unit_of_measurement': '%'
}
}
)
assert reg_resp.status == 201
json = await reg_resp.json()
assert json == {'success': True}
await hass.async_block_till_done()
entity = hass.states.get('sensor.battery_state')
assert entity is not None
assert entity.attributes['device_class'] == 'battery'
assert entity.attributes['icon'] == 'mdi:battery'
assert entity.attributes['unit_of_measurement'] == '%'
assert entity.attributes['foo'] == 'bar'
assert entity.domain == 'sensor'
assert entity.name == 'Battery State'
assert entity.state == '100'
update_resp = await webhook_client.post(
webhook_url,
json={
'type': 'update_sensor_states',
'data': [
{
'icon': 'mdi:battery-unknown',
'state': 123,
'type': 'sensor',
'unique_id': 'battery_state'
}
]
}
)
assert update_resp.status == 200
updated_entity = hass.states.get('sensor.battery_state')
assert updated_entity.state == '123'
async def test_sensor_must_register(hass, create_registrations, # noqa: F401, F811, E501
webhook_client): # noqa: F401, F811, E501
"""Test that sensors must be registered before updating."""
webhook_id = create_registrations[1]['webhook_id']
webhook_url = '/api/webhook/{}'.format(webhook_id)
resp = await webhook_client.post(
webhook_url,
json={
'type': 'update_sensor_states',
'data': [
{
'state': 123,
'type': 'sensor',
'unique_id': 'battery_state'
}
]
}
)
assert resp.status == 200
json = await resp.json()
assert json['battery_state']['success'] is False
assert json['battery_state']['error']['code'] == 'not_registered'
async def test_sensor_id_no_dupes(hass, create_registrations, # noqa: F401, F811, E501
webhook_client): # noqa: F401, F811, E501
"""Test that sensors must have a unique ID."""
webhook_id = create_registrations[1]['webhook_id']
webhook_url = '/api/webhook/{}'.format(webhook_id)
payload = {
'type': 'register_sensor',
'data': {
'attributes': {
'foo': 'bar'
},
'device_class': 'battery',
'icon': 'mdi:battery',
'name': 'Battery State',
'state': 100,
'type': 'sensor',
'unique_id': 'battery_state',
'unit_of_measurement': '%'
}
}
reg_resp = await webhook_client.post(webhook_url, json=payload)
assert reg_resp.status == 201
reg_json = await reg_resp.json()
assert reg_json == {'success': True}
dupe_resp = await webhook_client.post(webhook_url, json=payload)
assert dupe_resp.status == 409
dupe_json = await dupe_resp.json()
assert dupe_json['success'] is False
assert dupe_json['error']['code'] == 'duplicate_unique_id'
| apache-2.0 | -777,938,695,969,267,300 | 29.870229 | 92 | 0.527201 | false |
yodalee/servo | tests/wpt/web-platform-tests/encrypted-media/polyfill/make-polyfill-tests.py | 16 | 1180 | #!/usr/bin/python
import os, re, os.path, glob
head = re.compile( r"^(\s*</head>)", re.MULTILINE )
runtest = re.compile( r"runTest\(\s*(\S.*?)\s*\)", re.DOTALL )
scripts = '''
<!-- Polyfill files (NOTE: These are added by auto-generation script) -->
<script src=/encrypted-media/polyfill/chrome-polyfill.js></script>
<script src=/encrypted-media/polyfill/firefox-polyfill.js></script>
<script src=/encrypted-media/polyfill/edge-polyfill.js></script>
<script src=/encrypted-media/polyfill/clearkey-polyfill.js></script>'''
def process_file( infile, outfile ) :
with open( outfile, "w" ) as output :
with open( infile, "r" ) as input :
output.write( runtest.sub( r"runTest( \1, 'polyfill: ' )", head.sub( scripts + r"\1", input.read() ) ) )
if __name__ == '__main__' :
if (not os.getcwd().endswith('polyfill')) :
print "Please run from polyfill directory"
exit( 1 )
for infile in glob.glob( "../*.html" ) :
process_file( infile, os.path.basename( infile ) )
for infile in glob.glob( "../resources/*.html" ) :
process_file( infile, os.path.join( "resources", os.path.basename( infile ) ) ) | mpl-2.0 | 4,611,188,515,550,353,000 | 41.178571 | 116 | 0.620339 | false |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/test_sorting.py | 4 | 17560 | import pytest
from itertools import product
from collections import defaultdict
import warnings
from datetime import datetime
import numpy as np
from numpy import nan
import pandas as pd
from pandas.core import common as com
from pandas import DataFrame, MultiIndex, merge, concat, Series, compat
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.sorting import (is_int64_overflow_possible,
decons_group_index,
get_group_index,
nargsort,
lexsort_indexer,
safe_sort)
class TestSorting(object):
@pytest.mark.slow
def test_int64_overflow(self):
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame({'A': A,
'B': B,
'C': A,
'D': B,
'E': A,
'F': B,
'G': A,
'H': B,
'values': np.random.randn(2500)})
lg = df.groupby(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
rg = df.groupby(['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'])
left = lg.sum()['values']
right = rg.sum()['values']
exp_index, _ = left.index.sortlevel()
tm.assert_index_equal(left.index, exp_index)
exp_index, _ = right.index.sortlevel(0)
tm.assert_index_equal(right.index, exp_index)
tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'
]].values))
tups = com._asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()['values']
for k, v in compat.iteritems(expected):
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
def test_int64_overflow_moar(self):
# GH9096
values = range(55109)
data = pd.DataFrame.from_dict({'a': values,
'b': values,
'c': values,
'd': values})
grouped = data.groupby(['a', 'b', 'c', 'd'])
assert len(grouped) == len(values)
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list('abcde'))
df['jim'], df['joe'] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list('abcde'))
# verify this is testing what it is supposed to test!
assert is_int64_overflow_possible(gr.grouper.shape)
# mannually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df['jim'], df['joe']):
jim[key].append(a)
joe[key].append(b)
assert len(gr) == len(jim)
mi = MultiIndex.from_tuples(jim.keys(), names=list('abcde'))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype='f8')
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=['jim', 'joe'], index=mi)
return res.sort_index()
assert_frame_equal(gr.mean(), aggr(np.mean))
assert_frame_equal(gr.median(), aggr(np.median))
def test_lexsort_indexer(self):
keys = [[nan] * 5 + list(range(100)) + [nan] * 5]
# orders=True, na_position='last'
result = lexsort_indexer(keys, orders=True, na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=True, na_position='first'
result = lexsort_indexer(keys, orders=True, na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='last'
result = lexsort_indexer(keys, orders=False, na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='first'
result = lexsort_indexer(keys, orders=False, na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [nan] * 5 + list(range(100)) + [nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype='O')
try:
# GH 2785; due to a regression in NumPy1.6.2
np.argsort(np.array([[1, 2], [1, 3], [1, 2]], dtype='i'))
np.argsort(items2, kind='mergesort')
except TypeError:
pytest.skip('requested sort not available for type')
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = nargsort(items, kind='mergesort', ascending=True,
na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items, kind='mergesort', ascending=True,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items, kind='mergesort', ascending=False,
na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items, kind='mergesort', ascending=False,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='last'
result = nargsort(items2, kind='mergesort', ascending=True,
na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items2, kind='mergesort', ascending=True,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items2, kind='mergesort', ascending=False,
na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items2, kind='mergesort', ascending=False,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
class TestMerge(object):
@pytest.mark.slow
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
assert len(result) == 2000
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)),
columns=list('ABCDEFG'))
left['left'] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ['right']
right.index = np.arange(len(right))
right['right'] *= -1
out = merge(left, right, how='outer')
assert len(out) == len(left)
assert_series_equal(out['left'], - out['right'], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
assert_series_equal(out['left'], result, check_names=False)
assert result.name is None
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ['left', 'right', 'outer', 'inner']:
assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how='left', sort=False)
assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how='left', sort=False)
assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'),
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
assert is_int64_overflow_possible(shape)
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7))
.astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list('ABCDEFG')).iterrows():
ldict[idx].append(row['left'])
for idx, row in right.set_index(list('ABCDEFG')).iterrows():
rdict[idx].append(row['right'])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(k + tuple([lv, rv]))
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(k + tuple([np.nan, rv]))
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list('ABCDEFG')
assert_frame_equal(df[kcols].copy(),
df[kcols].sort_values(kcols, kind='mergesort'))
out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right'])
out = align(out)
jmask = {'left': out['left'].notna(),
'right': out['right'].notna(),
'inner': out['left'].notna() & out['right'].notna(),
'outer': np.ones(len(out), dtype='bool')}
for how in 'left', 'right', 'outer', 'inner':
mask = jmask[how]
frame = align(out[mask].copy())
assert mask.all() ^ mask.any() or how == 'outer'
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
assert_frame_equal(frame, align(res),
check_dtype=how not in ('right', 'outer'))
def test_decons():
def testit(label_list, shape):
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
label_list2 = decons_group_index(group_index, shape)
for a, b in zip(label_list, label_list2):
assert (np.array_equal(a, b))
shape = (4, 5, 6)
label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100), np.tile(
[0, 2, 4, 3, 0, 1, 2, 3], 100), np.tile(
[5, 1, 0, 2, 3, 0, 5, 4], 100)]
testit(label_list, shape)
shape = (10000, 10000)
label_list = [np.tile(np.arange(10000), 5), np.tile(np.arange(10000), 5)]
testit(label_list, shape)
class TestSafeSort(object):
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = safe_sort(values)
expected = np.array(list("aaabbc"), dtype='object')
tm.assert_numpy_array_equal(result, expected)
values = []
result = safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
def test_labels(self):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
labels = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# na_sentinel
labels = [0, 1, 1, 2, 3, 0, 99, 4]
result, result_labels = safe_sort(values, labels,
na_sentinel=99)
expected_labels = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# out of bound indices
labels = [0, 101, 102, 2, 3, 0, 99, 4]
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
labels = []
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer(self):
values = np.array(['b', 1, 0, 'a', 0, 'b'], dtype=object)
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
values = np.array(['b', 1, 0, 'a'], dtype=object)
labels = [0, 1, 2, 3, 0, -1, 1]
result, result_labels = safe_sort(values, labels)
expected = np.array([0, 1, 'a', 'b'], dtype=object)
expected_labels = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer_from_list(self):
values = ['b', 1, 0, 'a', 0, 'b']
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
if compat.PY2 and not pd._np_version_under1p10:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
with warnings.catch_warnings():
pytest.raises(TypeError, safe_sort, arr)
else:
pytest.raises(TypeError, safe_sort, arr)
def test_exceptions(self):
with tm.assert_raises_regex(TypeError,
"Only list-like objects are allowed"):
safe_sort(values=1)
with tm.assert_raises_regex(TypeError,
"Only list-like objects or None"):
safe_sort(values=[0, 1, 2], labels=1)
with tm.assert_raises_regex(ValueError,
"values should be unique"):
safe_sort(values=[0, 1, 2, 1], labels=[0, 1])
| apache-2.0 | -3,648,030,989,463,786,000 | 39.091324 | 79 | 0.546185 | false |
zstyblik/infernal-twin | build/pillow/Tests/test_file_webp_metadata.py | 11 | 3033 | from helper import unittest, PillowTestCase
from PIL import Image
class TestFileWebpMetadata(PillowTestCase):
def setUp(self):
try:
from PIL import _webp
except ImportError:
self.skipTest('WebP support not installed')
return
if not _webp.HAVE_WEBPMUX:
self.skipTest('WebPMux support not installed')
def test_read_exif_metadata(self):
file_path = "Tests/images/flower.webp"
image = Image.open(file_path)
self.assertEqual(image.format, "WEBP")
exif_data = image.info.get("exif", None)
self.assertTrue(exif_data)
exif = image._getexif()
# camera make
self.assertEqual(exif[271], "Canon")
jpeg_image = Image.open('Tests/images/flower.jpg')
expected_exif = jpeg_image.info['exif']
self.assertEqual(exif_data, expected_exif)
def test_write_exif_metadata(self):
from io import BytesIO
file_path = "Tests/images/flower.jpg"
image = Image.open(file_path)
expected_exif = image.info['exif']
test_buffer = BytesIO()
image.save(test_buffer, "webp", exif=expected_exif)
test_buffer.seek(0)
webp_image = Image.open(test_buffer)
webp_exif = webp_image.info.get('exif', None)
self.assertTrue(webp_exif)
if webp_exif:
self.assertEqual(
webp_exif, expected_exif, "WebP EXIF didn't match")
def test_read_icc_profile(self):
file_path = "Tests/images/flower2.webp"
image = Image.open(file_path)
self.assertEqual(image.format, "WEBP")
self.assertTrue(image.info.get("icc_profile", None))
icc = image.info['icc_profile']
jpeg_image = Image.open('Tests/images/flower2.jpg')
expected_icc = jpeg_image.info['icc_profile']
self.assertEqual(icc, expected_icc)
def test_write_icc_metadata(self):
from io import BytesIO
file_path = "Tests/images/flower2.jpg"
image = Image.open(file_path)
expected_icc_profile = image.info['icc_profile']
test_buffer = BytesIO()
image.save(test_buffer, "webp", icc_profile=expected_icc_profile)
test_buffer.seek(0)
webp_image = Image.open(test_buffer)
webp_icc_profile = webp_image.info.get('icc_profile', None)
self.assertTrue(webp_icc_profile)
if webp_icc_profile:
self.assertEqual(
webp_icc_profile, expected_icc_profile,
"Webp ICC didn't match")
def test_read_no_exif(self):
from io import BytesIO
file_path = "Tests/images/flower.jpg"
image = Image.open(file_path)
self.assertTrue('exif' in image.info)
test_buffer = BytesIO()
image.save(test_buffer, "webp")
test_buffer.seek(0)
webp_image = Image.open(test_buffer)
self.assertFalse(webp_image._getexif())
if __name__ == '__main__':
unittest.main()
# End of file
| gpl-3.0 | -1,037,100,791,134,725,800 | 25.605263 | 73 | 0.602044 | false |
KhronosGroup/COLLADA-CTS | StandardDataSets/1_5/collada/library_cameras/camera/_reference/_reference_optics_orthographic_zfar_znear/_reference_optics_orthographic_zfar_znear.py | 8 | 3605 | # Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
# Baseline
def JudgeBaseline(self, context):
# No step should crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# Superior
def JudgeSuperior(self, context):
# if baseline fails, no point in further checking
if (self.status_baseline == False):
self.status_superior = self.status_baseline
return self.status_superior
# Compare the rendered images
self.__assistant.CompareRenderedImages(context)
self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| mit | 5,521,097,287,295,862,000 | 49.069444 | 466 | 0.723717 | false |
Adel-Magebinary/odoo | addons/mrp_operations/report/mrp_code_barcode.py | 381 | 1511 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class code_barcode(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(code_barcode, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.mrp.code.barcode', 'mrp_operations.operation.code', 'addons/mrp_operations/report/mrp_code_barcode.rml',parser=code_barcode,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,519,977,577,303,618,000 | 44.787879 | 167 | 0.630046 | false |
cl4rke/scikit-learn | sklearn/cluster/k_means_.py | 128 | 54694 | """K-means clustering"""
# Authors: Gael Varoquaux <[email protected]>
# Thomas Rueckstiess <[email protected]>
# James Bergstra <[email protected]>
# Jan Schlueter <[email protected]>
# Nelle Varoquaux
# Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
if squared_norm(centers_old - centers) <= tol:
if verbose:
print("Converged at iteration %d" % i)
break
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
if len(centers) != k:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, k))
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True, n_jobs=1):
if hasattr(init, '__array__'):
n_clusters = init.shape[0]
init = np.asarray(init, dtype=np.float64)
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES,
warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64)
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=np.float64)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause | 4,856,901,176,834,109,000 | 37.114286 | 79 | 0.611895 | false |
hidat/audio_pipeline | audio_pipeline/test/AudioFileTest.py | 1 | 4157 | import os
import unittest
import mutagen
from . import TestUtil
from .. import util
test_files = "audio_pipeline\\test\\test_files\\audio\\tag_test_files"
class TestAudioFileTags(TestUtil.TestUtilMixin):
def test_artist_name(self):
tag = self.format.album_artist(self.meta)
self.check_af_tag(tag, self.af.album_artist)
def test_mbid(self):
tag = self.format.mbid(self.meta)
self.check_af_tag(tag, self.af.mbid)
def test_album(self):
tag = self.format.album(self.meta)
self.check_af_tag(tag, self.af.album)
def test_release_date(self):
tag = self.format.release_date(self.meta)
self.check_af_tag(tag, self.af.release_date)
def test_title(self):
tag = self.format.title(self.meta)
self.check_af_tag(tag, self.af.title)
def test_artist(self):
tag = self.format.artist(self.meta)
self.check_af_tag(tag, self.af.artist)
def test_disc_num(self):
tag = self.format.disc_num(self.meta)
self.check_af_tag(tag, self.af.disc_num)
def test_track_num(self):
tag = self.format.track_num(self.meta)
self.check_af_tag(tag, self.af.track_num)
def test_length(self):
tag = self.format.length(self.meta)
self.check_af_tag(tag, self.af.length)
def test_custom_release(self):
for tag_name in self.af.custom_release_tags.keys():
tag = self.format.custom_tag(tag_name, self.meta)
self.check_af_tag(tag, self.af.custom_release_tags[tag_name])
def test_custom_track(self):
for tag_name in self.af.custom_track_tags.keys():
tag = self.format.custom_tag(tag_name, self.meta)
self.check_af_tag(tag, self.af.custom_track_tags[tag_name])
class TestAudioFileVorbis_t1(TestAudioFileTags, unittest.TestCase):
meta = mutagen.File(os.path.join(test_files, "t1.flac"))
format = util.format.Vorbis.Format
af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "t1.flac"))
class TestAudioFileVorbis_picard(TestAudioFileTags, unittest.TestCase):
meta = mutagen.File(os.path.join(test_files, "picard.flac"))
format = util.format.Vorbis.Format
af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "picard.flac"))
class TestAudioFileVorbis_unknown(TestAudioFileTags, unittest.TestCase):
meta = mutagen.File(os.path.join(test_files, "unknown.flac"))
format = util.format.Vorbis.Format
af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "unknown.flac"))
class TestAudioFileAAC_t1(TestAudioFileTags, unittest.TestCase):
meta = mutagen.File(os.path.join(test_files, "t1.m4a"))
format = util.format.AAC.Format
af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "t1.m4a"))
class TestAudioFileAAC_picard(TestAudioFileTags, unittest.TestCase):
meta = mutagen.File(os.path.join(test_files, "picard.m4a"))
format = util.format.AAC.Format
af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "picard.m4a"))
class TestAudioFileAAC_unknown(TestAudioFileTags, unittest.TestCase):
meta = mutagen.File(os.path.join(test_files, "unknown.m4a"))
format = util.format.AAC.Format
af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "unknown.m4a"))
class TestAudioFileID3_t1(TestAudioFileTags, unittest.TestCase):
meta = mutagen.File(os.path.join(test_files, "t1.mp3"))
format = util.format.ID3.Format
af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "t1.mp3"))
class TestAudioFileID3_picard(TestAudioFileTags, unittest.TestCase):
meta = mutagen.File(os.path.join(test_files, "picard.mp3"))
format = util.format.ID3.Format
af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "picard.mp3"))
class TestAudioFileID3_unknown(TestAudioFileTags, unittest.TestCase):
meta = mutagen.File(os.path.join(test_files, "unknown.mp3"))
format = util.format.ID3.Format
af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "unknown.mp3")) | mit | -7,871,462,770,664,625,000 | 31.484375 | 79 | 0.670195 | false |
liikGit/MissionPlanner | Lib/site-packages/numpy/lib/tests/test_index_tricks.py | 53 | 4333 | from numpy.testing import *
import numpy as np
from numpy import ( array, ones, r_, mgrid, unravel_index, zeros, where,
ndenumerate, fill_diagonal, diag_indices,
diag_indices_from, s_, index_exp )
class TestUnravelIndex(TestCase):
def test_basic(self):
assert unravel_index(2,(2,2)) == (1,0)
assert unravel_index(254,(17,94)) == (2, 66)
assert_raises(ValueError, unravel_index, 4,(2,2))
class TestGrid(TestCase):
def test_basic(self):
a = mgrid[-1:1:10j]
b = mgrid[-1:1:0.1]
assert(a.shape == (10,))
assert(b.shape == (20,))
assert(a[0] == -1)
assert_almost_equal(a[-1],1)
assert(b[0] == -1)
assert_almost_equal(b[1]-b[0],0.1,11)
assert_almost_equal(b[-1],b[0]+19*0.1,11)
assert_almost_equal(a[1]-a[0],2.0/9.0,11)
def test_linspace_equivalence(self):
y,st = np.linspace(2,10,retstep=1)
assert_almost_equal(st,8/49.0)
assert_array_almost_equal(y,mgrid[2:10:50j],13)
def test_nd(self):
c = mgrid[-1:1:10j,-2:2:10j]
d = mgrid[-1:1:0.1,-2:2:0.2]
assert(c.shape == (2,10,10))
assert(d.shape == (2,20,20))
assert_array_equal(c[0][0,:],-ones(10,'d'))
assert_array_equal(c[1][:,0],-2*ones(10,'d'))
assert_array_almost_equal(c[0][-1,:],ones(10,'d'),11)
assert_array_almost_equal(c[1][:,-1],2*ones(10,'d'),11)
assert_array_almost_equal(d[0,1,:]-d[0,0,:], 0.1*ones(20,'d'),11)
assert_array_almost_equal(d[1,:,1]-d[1,:,0], 0.2*ones(20,'d'),11)
class TestConcatenator(TestCase):
def test_1d(self):
assert_array_equal(r_[1,2,3,4,5,6],array([1,2,3,4,5,6]))
b = ones(5)
c = r_[b,0,0,b]
assert_array_equal(c,[1,1,1,1,1,0,0,1,1,1,1,1])
def test_mixed_type(self):
g = r_[10.1, 1:10]
assert(g.dtype == 'f8')
def test_more_mixed_type(self):
g = r_[-10.1, array([1]), array([2,3,4]), 10.0]
assert(g.dtype == 'f8')
def test_2d(self):
b = rand(5,5)
c = rand(5,5)
d = r_['1',b,c] # append columns
assert(d.shape == (5,10))
assert_array_equal(d[:,:5],b)
assert_array_equal(d[:,5:],c)
d = r_[b,c]
assert(d.shape == (10,5))
assert_array_equal(d[:5,:],b)
assert_array_equal(d[5:,:],c)
class TestNdenumerate(TestCase):
def test_basic(self):
a = array([[1,2], [3,4]])
assert_equal(list(ndenumerate(a)),
[((0,0), 1), ((0,1), 2), ((1,0), 3), ((1,1), 4)])
class TestIndexExpression(TestCase):
def test_regression_1(self):
# ticket #1196
a = np.arange(2)
assert_equal(a[:-1], a[s_[:-1]])
assert_equal(a[:-1], a[index_exp[:-1]])
def test_simple_1(self):
a = np.random.rand(4,5,6)
assert_equal(a[:,:3,[1,2]], a[index_exp[:,:3,[1,2]]])
assert_equal(a[:,:3,[1,2]], a[s_[:,:3,[1,2]]])
def test_fill_diagonal():
a = zeros((3, 3),int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]]))
# The same function can operate on a 4-d array:
a = zeros((3, 3, 3, 3), int)
fill_diagonal(a, 4)
i = array([0, 1, 2])
yield (assert_equal, where(a != 0), (i, i, i, i))
def test_diag_indices():
di = diag_indices(4)
a = array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
a[di] = 100
yield (assert_array_equal, a,
array([[100, 2, 3, 4],
[ 5, 100, 7, 8],
[ 9, 10, 100, 12],
[ 13, 14, 15, 100]]))
# Now, we create indices to manipulate a 3-d array:
d3 = diag_indices(2, 3)
# And use it to set the diagonal of a zeros array to 1:
a = zeros((2, 2, 2),int)
a[d3] = 1
yield (assert_array_equal, a,
array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]]) )
def test_diag_indices_from():
x = np.random.random((4, 4))
r, c = diag_indices_from(x)
assert_array_equal(r, np.arange(4))
assert_array_equal(c, np.arange(4))
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | -5,947,847,048,425,951,000 | 29.514085 | 73 | 0.487422 | false |
awkspace/ansible | lib/ansible/modules/source_control/gitlab_project.py | 10 | 14539 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez ([email protected])
# Copyright: (c) 2015, Werner Dijkerman ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gitlab_project
short_description: Creates/updates/deletes Gitlab Projects
description:
- When the project does not exist in Gitlab, it will be created.
- When the project does exists and state=absent, the project will be deleted.
- When changes are made to the project, the project will be updated.
version_added: "2.1"
author:
- Werner Dijkerman (@dj-wasabi)
- Guillaume Martinez (@Lunik)
requirements:
- python >= 2.7
- python-gitlab python module
extends_documentation_fragment:
- auth_basic
options:
server_url:
description:
- The URL of the Gitlab server, with protocol (i.e. http or https).
required: true
type: str
login_user:
description:
- Gitlab user name.
type: str
login_password:
description:
- Gitlab password for login_user
type: str
api_token:
description:
- Gitlab token for logging in.
type: str
aliases:
- login_token
group:
description:
- Id or The full path of the group of which this projects belongs to.
type: str
name:
description:
- The name of the project
required: true
type: str
path:
description:
- The path of the project you want to create, this will be server_url/<group>/path
- If not supplied, name will be used.
type: str
description:
description:
- An description for the project.
type: str
issues_enabled:
description:
- Whether you want to create issues or not.
- Possible values are true and false.
type: bool
default: yes
merge_requests_enabled:
description:
- If merge requests can be made or not.
- Possible values are true and false.
type: bool
default: yes
wiki_enabled:
description:
- If an wiki for this project should be available or not.
- Possible values are true and false.
type: bool
default: yes
snippets_enabled:
description:
- If creating snippets should be available or not.
- Possible values are true and false.
type: bool
default: yes
visibility:
description:
- Private. Project access must be granted explicitly for each user.
- Internal. The project can be cloned by any logged in user.
- Public. The project can be cloned without any authentication.
default: private
type: str
choices: ["private", "internal", "public"]
aliases:
- visibility_level
import_url:
description:
- Git repository which will be imported into gitlab.
- Gitlab server needs read access to this git repository.
required: false
type: str
state:
description:
- create or delete project.
- Possible values are present and absent.
default: present
type: str
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: Delete Gitlab Project
gitlab_project:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
validate_certs: False
name: my_first_project
state: absent
delegate_to: localhost
- name: Create Gitlab Project in group Ansible
gitlab_project:
api_url: https://gitlab.example.com/
validate_certs: True
api_username: dj-wasabi
api_password: "MySecretPassword"
name: my_first_project
group: ansible
issues_enabled: False
wiki_enabled: True
snippets_enabled: True
import_url: http://git.example.com/example/lab.git
state: present
delegate_to: localhost
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Success"
result:
description: json parsed response from the server
returned: always
type: dict
error:
description: the error message returned by the Gitlab API
returned: failed
type: str
sample: "400: path is already in use"
project:
description: API object
returned: always
type: dict
'''
import os
import traceback
GITLAB_IMP_ERR = None
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except Exception:
GITLAB_IMP_ERR = traceback.format_exc()
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.gitlab import findGroup, findProject
class GitLabProject(object):
def __init__(self, module, gitlab_instance):
self._module = module
self._gitlab = gitlab_instance
self.projectObject = None
'''
@param project_name Name of the project
@param namespace Namespace Object (User or Group)
@param options Options of the project
'''
def createOrUpdateProject(self, project_name, namespace, options):
changed = False
# Because we have already call userExists in main()
if self.projectObject is None:
project = self.createProject(namespace, {
'name': project_name,
'path': options['path'],
'description': options['description'],
'issues_enabled': options['issues_enabled'],
'merge_requests_enabled': options['merge_requests_enabled'],
'wiki_enabled': options['wiki_enabled'],
'snippets_enabled': options['snippets_enabled'],
'visibility': options['visibility'],
'import_url': options['import_url']})
changed = True
else:
changed, project = self.updateProject(self.projectObject, {
'name': project_name,
'description': options['description'],
'issues_enabled': options['issues_enabled'],
'merge_requests_enabled': options['merge_requests_enabled'],
'wiki_enabled': options['wiki_enabled'],
'snippets_enabled': options['snippets_enabled'],
'visibility': options['visibility']})
self.projectObject = project
if changed:
if self._module.check_mode:
self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name)
try:
project.save()
except Exception as e:
self._module.fail_json(msg="Failed update project: %s " % e)
return True
else:
return False
'''
@param namespace Namespace Object (User or Group)
@param arguments Attributs of the project
'''
def createProject(self, namespace, arguments):
if self._module.check_mode:
return True
arguments['namespace_id'] = namespace.id
try:
project = self._gitlab.projects.create(arguments)
except (gitlab.exceptions.GitlabCreateError) as e:
self._module.fail_json(msg="Failed to create project: %s " % to_native(e))
return project
'''
@param project Project Object
@param arguments Attributs of the project
'''
def updateProject(self, project, arguments):
changed = False
for arg_key, arg_value in arguments.items():
if arguments[arg_key] is not None:
if getattr(project, arg_key) != arguments[arg_key]:
setattr(project, arg_key, arguments[arg_key])
changed = True
return (changed, project)
def deleteProject(self):
if self._module.check_mode:
return True
project = self.projectObject
return project.delete()
'''
@param namespace User/Group object
@param name Name of the project
'''
def existsProject(self, namespace, path):
# When project exists, object will be stored in self.projectObject.
project = findProject(self._gitlab, namespace.full_path + '/' + path)
if project:
self.projectObject = project
return True
return False
def deprecation_warning(module):
deprecated_aliases = ['login_token']
module.deprecate("Aliases \'{aliases}\' are deprecated".format(aliases='\', \''.join(deprecated_aliases)), 2.10)
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
server_url=dict(type='str', required=True, removed_in_version=2.10),
login_user=dict(type='str', no_log=True, removed_in_version=2.10),
login_password=dict(type='str', no_log=True, removed_in_version=2.10),
api_token=dict(type='str', no_log=True, aliases=["login_token"]),
group=dict(type='str'),
name=dict(type='str', required=True),
path=dict(type='str'),
description=dict(type='str'),
issues_enabled=dict(type='bool', default=True),
merge_requests_enabled=dict(type='bool', default=True),
wiki_enabled=dict(type='bool', default=True),
snippets_enabled=dict(default=True, type='bool'),
visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]),
import_url=dict(type='str'),
state=dict(type='str', default="present", choices=["absent", "present"]),
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['api_url', 'server_url'],
['api_username', 'login_user'],
['api_password', 'login_password'],
['api_username', 'api_token'],
['api_password', 'api_token'],
['login_user', 'login_token'],
['login_password', 'login_token']
],
required_together=[
['api_username', 'api_password'],
['login_user', 'login_password'],
],
required_one_of=[
['api_username', 'api_token', 'login_user', 'login_token']
],
supports_check_mode=True,
)
deprecation_warning(module)
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
api_url = module.params['api_url']
validate_certs = module.params['validate_certs']
api_user = module.params['api_username']
api_password = module.params['api_password']
gitlab_url = server_url if api_url is None else api_url
gitlab_user = login_user if api_user is None else api_user
gitlab_password = login_password if api_password is None else api_password
gitlab_token = module.params['api_token']
group_identifier = module.params['group']
project_name = module.params['name']
project_path = module.params['path']
project_description = module.params['description']
issues_enabled = module.params['issues_enabled']
merge_requests_enabled = module.params['merge_requests_enabled']
wiki_enabled = module.params['wiki_enabled']
snippets_enabled = module.params['snippets_enabled']
visibility = module.params['visibility']
import_url = module.params['import_url']
state = module.params['state']
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
try:
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
private_token=gitlab_token, api_version=4)
gitlab_instance.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg="Failed to connect to Gitlab server: %s" % to_native(e))
except (gitlab.exceptions.GitlabHttpError) as e:
module.fail_json(msg="Failed to connect to Gitlab server: %s. \
Gitlab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
# Set project_path to project_name if it is empty.
if project_path is None:
project_path = project_name.replace(" ", "_")
gitlab_project = GitLabProject(module, gitlab_instance)
if group_identifier:
group = findGroup(gitlab_instance, group_identifier)
if group is None:
module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier)
namespace = gitlab_instance.namespaces.get(group.id)
project_exists = gitlab_project.existsProject(namespace, project_path)
else:
user = gitlab_instance.users.list(username=gitlab_instance.user.username)[0]
namespace = gitlab_instance.namespaces.get(user.id)
project_exists = gitlab_project.existsProject(namespace, project_path)
if state == 'absent':
if project_exists:
gitlab_project.deleteProject()
module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name)
else:
module.exit_json(changed=False, msg="Project deleted or does not exists")
if state == 'present':
if gitlab_project.createOrUpdateProject(project_name, namespace, {
"path": project_path,
"description": project_description,
"issues_enabled": issues_enabled,
"merge_requests_enabled": merge_requests_enabled,
"wiki_enabled": wiki_enabled,
"snippets_enabled": snippets_enabled,
"visibility": visibility,
"import_url": import_url}):
module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs)
else:
module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.projectObject._attrs)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,707,419,299,261,808,000 | 33.699284 | 156 | 0.624596 | false |
tomaslaz/KLMC_Analysis | thirdparty/JPype-0.5.4.2/src/python/jpype/_refdaemon.py | 8 | 1084 | #*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
import thread
import _jpype
def startJava():
_jpype.startReferenceQueue(1)
def startPython():
def _run() :
_jpype.attachThreadToJVM()
_jpype.startReferenceQueue(0)
thread.start_new_thread(_run, tuple())
def stop():
_jpype.stopReferenceQueue() | gpl-3.0 | 1,640,471,054,611,128,000 | 31.9375 | 78 | 0.580258 | false |
maxweisspoker/simplebitcoinfuncs | simplebitcoinfuncs/miscbitcoinfuncs.py | 1 | 7265 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Misc functions related to Bitcoin, but which didn't feel right being
in the main bitcoin funcs
See _doctester.py for examples of most functions below.
'''
import os
import datetime
from binascii import hexlify, unhexlify
try:
ModuleNotFoundError
except:
ModuleNotFoundError = ImportError
try:
from .hexhashes import hash256
from .ecmath import N
from .base58 import b58e
from .miscfuncs import *
except Exception as e:
if type(e) != ImportError and \
type(e) != ModuleNotFoundError and \
type(e) != ValueError and \
type(e) != SystemError:
raise Exception("Unknown problem with imports.")
from hexhashes import hash256
from ecmath import N
from base58 import b58e
from miscfuncs import *
def genkeyhex():
'''
Generate new random Bitcoin private key, using os.urandom and
double-sha256. Hex format.
'''
while True:
key = hash256(
hexlify(os.urandom(40) + str(datetime.datetime.now())
.encode("utf-8")))
# 40 bytes used instead of 32, as a buffer for any slight
# lack of entropy in urandom
# Double-sha256 used instead of single hash, for entropy
# reasons as well.
# I know, it's nit-picking, but better safe than sorry.
if int(key,16) > 1 and int(key,16) < N:
break
return key
def genkey(outcompressed=True,prefix='80'):
'''
Generate new random Bitcoin private key, using os.urandom and
double-sha256.
'''
key = prefix + genkeyhex()
if outcompressed:
key = key + '01'
return b58e(key)
def oppushdatalen(num):
assert isitint(num)
assert num < 4294967296
assert num > 0
if num < 76:
return dechex(num,1)
elif num < 256:
return "4c" + dechex(num,1)
elif num < 65536:
return "4d" + hexreverse(dechex(num,2))
elif num < 4294967296:
return "4e" + hexreverse(dechex(num,4))
def intfromoppushdatalen(oppushdatalenhex):
oppushdatalenhex = strlify(oppushdatalenhex)
if oppushdatalenhex[:2] == "4c":
assert len(oppushdatalenhex) == 4
return int(oppushdatalenhex[2:4],16)
elif oppushdatalenhex[:2] == "4d":
assert len(oppushdatalenhex) == 6
return int(oppushdatalenhex[4:6] +
oppushdatalenhex[2:4],16)
elif oppushdatalenhex[:2] == "4e":
assert len(oppushdatalenhex) == 10
return int(oppushdatalenhex[8:10] +
oppushdatalenhex[6:8] +
oppushdatalenhex[4:6] +
oppushdatalenhex[2:4],16)
else:
assert len(oppushdatalenhex) == 2
return int(oppushdatalenhex,16)
def tovarint(num):
assert isitint(num) and num < 18446744073709551616
if num == 0:
return '00'
elif num < 253:
o = dechex(num,1)
elif num < 65536:
o = hexstrlify(b'\xfd' + unhexlify(dechex(num,2))[::-1])
elif num < 4294967296:
o = hexstrlify(b'\xfe' + unhexlify(dechex(num,4))[::-1])
elif num < 18446744073709551616:
o = hexstrlify(b'\xff' + unhexlify(dechex(num,8))[::-1])
return o
def numvarintbytes(varint):
varint = strlify(varint)
assert len(varint) == 2
if varint == 'ff':
return 9
elif varint == 'fe':
return 5
elif varint == 'fd':
return 3
else:
return 1
def fromvarint(varint):
varint = strlify(varint)
if varint[:2] == 'ff':
assert len(varint) == 18
elif varint[:2] == 'fe':
assert len(varint) == 10
elif varint[:2] == 'fd':
assert len(varint) == 6
else:
assert len(varint) == 2
return int(varint,16)
return int(hexreverse(varint[2:]),16)
def getandstrip_varintdata(data):
'''
Takes a hex string that begins with varint data, and has extra at
the end, and gets the varint integer, strips the varint bytes, and
returns the integer and the remaining data. So rather than having
to manually read the varint prefix, count, and strip, you can do
it in one function. This function will return a tuple of the data
and the leftover.
For example, let's say you are parsing a transaction from
beginning to end, and you know the next byte is a varint byte.
Here's an example:
fd5d010048304502200187af928e9d155c4b1ac9c1c9118153239aba76774f77
5d7c1f9c3e106ff33c0221008822b0f658edec22274d0b6ae9de10ebf2da06b1
bbdaaba4e50eb078f39e3d78014730440220795f0f4f5941a77ae032ecb9e337
53788d7eb5cb0c78d805575d6b00a1d9bfed02203e1f4ad9332d1416ae01e270
38e945bc9db59c732728a383a6f1ed2fb99da7a4014cc952410491bba2510912
a5bd37da1fb5b1673010e43d2c6d812c514e91bfa9f2eb129e1c183329db55bd
868e209aac2fbc02cb33d98fe74bf23f0c235d6126b1d8334f864104865c4029
3a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac09ef122b1
a986818a7cb624532f062c1d1f8722084861c5c3291ccffef4ec687441048d24
55d2403e08708fc1f556002f1b6cd83f992d085097f9974ab08a28838f07896f
bab08f39495e15fa6fad6edbfb1e754e35fa1c7844c41f322a1863d4621353ae
ffffffff0140420f00000000001976a914ae56b4db13554d321c402db3961187
aed1bbed5b88ac00000000
If the above tx fragment is input as a single long string with no
white-space, this function will return the tuple:
('004830...53ae', 'ffffffff...00000000')
See _doctester.py for that example in action.
'''
data = strlify(data)
numbytes = numvarintbytes(data[:2])
varint = data[:2*numbytes]
data = data[2*numbytes:]
tostrip = fromvarint(varint) * 2
return data[:tostrip], data[tostrip:]
def inttoDER(a):
'''
Format an int/long to DER hex format
'''
o = dechex(a,1)
if int(o[:2],16) > 127:
o = '00' + o
olen = dechex(len(o)//2,1)
return '02' + olen + o
def inttoLEB128(intinput):
'''
Convert int/long to unsigned LEB128 format hex
'''
binstr = str(bin(intinput)) \
.lstrip("0b").replace("b","").replace("L","") \
.replace("'","").replace('"',"")
if len(binstr) % 7:
binstr = binstr.zfill(len(binstr) + 7 - (len(binstr) % 7))
bytelist = ""
for i in range(len(binstr) // 7):
if i < ((len(binstr) // 7) - 1):
pad = "1"
else:
pad = "0"
currbyte = binstr[(len(binstr) - (7*i + 7)):(len(binstr) - (7*i))]
currbyte = pad + currbyte
currbyte = dechex(int(currbyte,2))
# assert len(currbyte) == 2
bytelist = bytelist + currbyte
return bytelist
def LEB128toint(LEBinput):
'''
Convert unsigned LEB128 hex to integer
'''
reversedbytes = hexreverse(LEBinput)
binstr = ""
for i in range(len(LEBinput) // 2):
if i == 0:
assert int(reversedbytes[2*i:(2*i + 2)],16) < 128
else:
assert int(reversedbytes[2*i:(2*i + 2)],16) >= 128
tempbin = str(bin(int(reversedbytes[2*i:(2*i + 2)],16))) \
.lstrip("0b").replace("b","").replace("L","") \
.replace("'","").replace('"',"") \
.zfill(8)
binstr += tempbin[1:]
return int(binstr,2)
| mit | -1,177,901,497,831,714,800 | 28.766393 | 74 | 0.622608 | false |
andersk/zulip | zerver/migrations/0217_migrate_create_stream_policy.py | 6 | 1270 | # Generated by Django 1.11.20 on 2019-05-06 13:15
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def upgrade_create_stream_policy(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Realm = apps.get_model("zerver", "Realm")
Realm.CREATE_STREAM_POLICY_MEMBERS = 1
Realm.CREATE_STREAM_POLICY_ADMINS = 2
Realm.CREATE_STREAM_POLICY_WAITING_PERIOD = 3
Realm.objects.filter(waiting_period_threshold__exact=0).filter(
create_stream_by_admins_only=False
).update(create_stream_policy=Realm.CREATE_STREAM_POLICY_MEMBERS)
Realm.objects.filter(create_stream_by_admins_only=True).update(
create_stream_policy=Realm.CREATE_STREAM_POLICY_ADMINS
)
Realm.objects.filter(waiting_period_threshold__gt=0).filter(
create_stream_by_admins_only=False
).update(create_stream_policy=Realm.CREATE_STREAM_POLICY_WAITING_PERIOD)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0216_add_create_stream_policy"),
]
operations = [
migrations.RunPython(
upgrade_create_stream_policy, reverse_code=migrations.RunPython.noop, elidable=True
),
]
| apache-2.0 | 4,167,860,464,716,065,300 | 36.352941 | 95 | 0.723622 | false |
vauxoo-dev/vxtools-server | setup.py | 1 | 1465 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='vxtools-server',
version='0.1.0',
description='Server tools, part of VauxooTools',
long_description=readme + '\n\n' + history,
author='Tulio Ruiz',
author_email='[email protected]',
url='https://github.com/ruiztulio/vxtools-server',
packages=[
'vxtools-server',
],
package_dir={'vxtools-server':
'vxtools-server'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='vxtools-server',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
) | bsd-3-clause | -7,057,690,043,334,302,000 | 26.148148 | 66 | 0.612969 | false |
webnotes/wnframework | core/doctype/file_data/file_data.py | 32 | 1734 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
record of files
naming for same name files: file.gif, file-1.gif, file-2.gif etc
"""
import webnotes, webnotes.utils, os
from webnotes import conf
class DocType():
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def before_insert(self):
webnotes.local.rollback_observers.append(self)
def on_update(self):
# check duplicate assignement
n_records = webnotes.conn.sql("""select count(*) from `tabFile Data`
where file_name=%s
and attached_to_doctype=%s
and attached_to_name=%s""", (self.doc.file_name, self.doc.attached_to_doctype,
self.doc.attached_to_name))[0][0]
if n_records > 1:
webnotes.msgprint(webnotes._("Same file has already been attached to the record"))
raise webnotes.DuplicateEntryError
def on_trash(self):
if self.doc.attached_to_name:
# check persmission
try:
if not webnotes.has_permission(self.doc.attached_to_doctype,
"write", self.doc.attached_to_name):
webnotes.msgprint(webnotes._("No permission to write / remove."),
raise_exception=True)
except webnotes.DoesNotExistError:
pass
# if file not attached to any other record, delete it
if self.doc.file_name and not webnotes.conn.count("File Data",
{"file_name": self.doc.file_name, "name": ["!=", self.doc.name]}):
if self.doc.file_name.startswith("files/"):
path = webnotes.utils.get_site_path("public", self.doc.file_name)
else:
path = webnotes.utils.get_site_path(conf.files_path, self.doc.file_name)
if os.path.exists(path):
os.remove(path)
def on_rollback(self):
self.on_trash() | mit | 8,421,549,943,214,468,000 | 31.12963 | 85 | 0.691465 | false |
FrankBian/kuma | vendor/packages/pyparsing/examples/simpleSQL.py | 16 | 4698 | # simpleSQL.py
#
# simple demo of using the parsing library to do simple-minded SQL parsing
# could be extended to include where clauses etc.
#
# Copyright (c) 2003, Paul McGuire
#
from pyparsing import Literal, CaselessLiteral, Word, Upcase, delimitedList, Optional, \
Combine, Group, alphas, nums, alphanums, ParseException, Forward, oneOf, quotedString, \
ZeroOrMore, restOfLine, Keyword
def test( str ):
print str,"->"
try:
tokens = simpleSQL.parseString( str )
print "tokens = ", tokens
print "tokens.columns =", tokens.columns
print "tokens.tables =", tokens.tables
print "tokens.where =", tokens.where
except ParseException, err:
print " "*err.loc + "^\n" + err.msg
print err
print
# define SQL tokens
selectStmt = Forward()
selectToken = Keyword("select", caseless=True)
fromToken = Keyword("from", caseless=True)
ident = Word( alphas, alphanums + "_$" ).setName("identifier")
columnName = Upcase( delimitedList( ident, ".", combine=True ) )
columnNameList = Group( delimitedList( columnName ) )
tableName = Upcase( delimitedList( ident, ".", combine=True ) )
tableNameList = Group( delimitedList( tableName ) )
whereExpression = Forward()
and_ = Keyword("and", caseless=True)
or_ = Keyword("or", caseless=True)
in_ = Keyword("in", caseless=True)
E = CaselessLiteral("E")
binop = oneOf("= != < > >= <= eq ne lt le gt ge", caseless=True)
arithSign = Word("+-",exact=1)
realNum = Combine( Optional(arithSign) + ( Word( nums ) + "." + Optional( Word(nums) ) |
( "." + Word(nums) ) ) +
Optional( E + Optional(arithSign) + Word(nums) ) )
intNum = Combine( Optional(arithSign) + Word( nums ) +
Optional( E + Optional("+") + Word(nums) ) )
columnRval = realNum | intNum | quotedString | columnName # need to add support for alg expressions
whereCondition = Group(
( columnName + binop + columnRval ) |
( columnName + in_ + "(" + delimitedList( columnRval ) + ")" ) |
( columnName + in_ + "(" + selectStmt + ")" ) |
( "(" + whereExpression + ")" )
)
whereExpression << whereCondition + ZeroOrMore( ( and_ | or_ ) + whereExpression )
# define the grammar
selectStmt << ( selectToken +
( '*' | columnNameList ).setResultsName( "columns" ) +
fromToken +
tableNameList.setResultsName( "tables" ) +
Optional( Group( CaselessLiteral("where") + whereExpression ), "" ).setResultsName("where") )
simpleSQL = selectStmt
# define Oracle comment format, and ignore them
oracleSqlComment = "--" + restOfLine
simpleSQL.ignore( oracleSqlComment )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select A,B,C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select &&& frox Sys.dual" )
test( "Select A from Sys.dual where a in ('RED','GREEN','BLUE')" )
test( "Select A from Sys.dual where a in ('RED','GREEN','BLUE') and b in (10,20,30)" )
test( "Select A,b from table1,table2 where table1.id eq table2.id -- test out comparison operators" )
"""
Test output:
>pythonw -u simpleSQL.py
SELECT * from XYZZY, ABC ->
tokens = ['select', '*', 'from', ['XYZZY', 'ABC']]
tokens.columns = *
tokens.tables = ['XYZZY', 'ABC']
select * from SYS.XYZZY ->
tokens = ['select', '*', 'from', ['SYS.XYZZY']]
tokens.columns = *
tokens.tables = ['SYS.XYZZY']
Select A from Sys.dual ->
tokens = ['select', ['A'], 'from', ['SYS.DUAL']]
tokens.columns = ['A']
tokens.tables = ['SYS.DUAL']
Select A,B,C from Sys.dual ->
tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL']]
tokens.columns = ['A', 'B', 'C']
tokens.tables = ['SYS.DUAL']
Select A, B, C from Sys.dual ->
tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL']]
tokens.columns = ['A', 'B', 'C']
tokens.tables = ['SYS.DUAL']
Select A, B, C from Sys.dual, Table2 ->
tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL', 'TABLE2']]
tokens.columns = ['A', 'B', 'C']
tokens.tables = ['SYS.DUAL', 'TABLE2']
Xelect A, B, C from Sys.dual ->
^
Expected 'select'
Expected 'select' (0), (1,1)
Select A, B, C frox Sys.dual ->
^
Expected 'from'
Expected 'from' (15), (1,16)
Select ->
^
Expected '*'
Expected '*' (6), (1,7)
Select &&& frox Sys.dual ->
^
Expected '*'
Expected '*' (7), (1,8)
>Exit code: 0
""" | mpl-2.0 | -14,859,202,620,870,594 | 31.457746 | 112 | 0.590677 | false |
bootandy/sqlalchemy | test/engine/test_transaction.py | 23 | 54427 | from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message, ne_, expect_warnings
import sys
from sqlalchemy import event
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy import create_engine, MetaData, INT, VARCHAR, Sequence, \
select, Integer, String, func, text, exc
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
from sqlalchemy import testing
from sqlalchemy.testing import fixtures
users, metadata = None, None
class TransactionTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, metadata
metadata = MetaData()
users = Table('query_users', metadata,
Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True,
)
users.create(testing.db)
def teardown(self):
testing.db.execute(users.delete()).close()
@classmethod
def teardown_class(cls):
users.drop(testing.db)
def test_commits(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
transaction = connection.begin()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 3
transaction.commit()
connection.close()
def test_rollback(self):
"""test a basic rollback"""
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.rollback()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 0
connection.close()
def test_raise(self):
connection = testing.db.connect()
transaction = connection.begin()
try:
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=1, user_name='user3')
transaction.commit()
assert False
except Exception as e:
print("Exception: ", e)
transaction.rollback()
result = connection.execute("select * from query_users")
assert len(result.fetchall()) == 0
connection.close()
def test_transaction_container(self):
def go(conn, table, data):
for d in data:
conn.execute(table.insert(), d)
testing.db.transaction(go, users, [dict(user_id=1,
user_name='user1')])
eq_(testing.db.execute(users.select()).fetchall(), [(1, 'user1'
)])
assert_raises(exc.DBAPIError, testing.db.transaction, go,
users, [{'user_id': 2, 'user_name': 'user2'},
{'user_id': 1, 'user_name': 'user3'}])
eq_(testing.db.execute(users.select()).fetchall(), [(1, 'user1'
)])
def test_nested_rollback(self):
connection = testing.db.connect()
try:
transaction = connection.begin()
try:
connection.execute(users.insert(), user_id=1,
user_name='user1')
connection.execute(users.insert(), user_id=2,
user_name='user2')
connection.execute(users.insert(), user_id=3,
user_name='user3')
trans2 = connection.begin()
try:
connection.execute(users.insert(), user_id=4,
user_name='user4')
connection.execute(users.insert(), user_id=5,
user_name='user5')
raise Exception('uh oh')
trans2.commit()
except:
trans2.rollback()
raise
transaction.rollback()
except Exception as e:
transaction.rollback()
raise
except Exception as e:
try:
assert str(e) == 'uh oh' # and not "This transaction is
# inactive"
finally:
connection.close()
def test_branch_nested_rollback(self):
connection = testing.db.connect()
try:
connection.begin()
branched = connection.connect()
assert branched.in_transaction()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
assert not connection.in_transaction()
eq_(connection.scalar("select count(*) from query_users"), 0)
finally:
connection.close()
def test_branch_autorollback(self):
connection = testing.db.connect()
try:
branched = connection.connect()
branched.execute(users.insert(), user_id=1, user_name='user1')
try:
branched.execute(users.insert(), user_id=1, user_name='user1')
except exc.DBAPIError:
pass
finally:
connection.close()
def test_branch_orig_rollback(self):
connection = testing.db.connect()
try:
branched = connection.connect()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin()
assert branched.in_transaction()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
eq_(connection.scalar("select count(*) from query_users"), 1)
finally:
connection.close()
def test_branch_autocommit(self):
connection = testing.db.connect()
try:
branched = connection.connect()
branched.execute(users.insert(), user_id=1, user_name='user1')
finally:
connection.close()
eq_(testing.db.scalar("select count(*) from query_users"), 1)
@testing.requires.savepoints
def test_branch_savepoint_rollback(self):
connection = testing.db.connect()
try:
trans = connection.begin()
branched = connection.connect()
assert branched.in_transaction()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin_nested()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
assert connection.in_transaction()
trans.commit()
eq_(connection.scalar("select count(*) from query_users"), 1)
finally:
connection.close()
@testing.requires.two_phase_transactions
def test_branch_twophase_rollback(self):
connection = testing.db.connect()
try:
branched = connection.connect()
assert not branched.in_transaction()
branched.execute(users.insert(), user_id=1, user_name='user1')
nested = branched.begin_twophase()
branched.execute(users.insert(), user_id=2, user_name='user2')
nested.rollback()
assert not connection.in_transaction()
eq_(connection.scalar("select count(*) from query_users"), 1)
finally:
connection.close()
def test_retains_through_options(self):
connection = testing.db.connect()
try:
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
conn2 = connection.execution_options(dummy=True)
conn2.execute(users.insert(), user_id=2, user_name='user2')
transaction.rollback()
eq_(connection.scalar("select count(*) from query_users"), 0)
finally:
connection.close()
def test_nesting(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
trans2.commit()
transaction.rollback()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 0
connection.close()
def test_with_interface(self):
connection = testing.db.connect()
trans = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
try:
connection.execute(users.insert(), user_id=2, user_name='user2.5')
except Exception as e:
trans.__exit__(*sys.exc_info())
assert not trans.is_active
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
trans = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans.__exit__(None, None, None)
assert not trans.is_active
self.assert_(connection.scalar('select count(*) from '
'query_users') == 1)
connection.close()
def test_close(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
assert connection.in_transaction()
trans2.close()
assert connection.in_transaction()
transaction.commit()
assert not connection.in_transaction()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 5)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 5
connection.close()
def test_close2(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans2 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
assert connection.in_transaction()
trans2.close()
assert connection.in_transaction()
transaction.close()
assert not connection.in_transaction()
self.assert_(connection.scalar('select count(*) from '
'query_users') == 0)
result = connection.execute('select * from query_users')
assert len(result.fetchall()) == 0
connection.close()
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans2.rollback()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (3, )])
connection.close()
@testing.requires.savepoints
@testing.crashes('oracle+zxjdbc',
'Errors out and causes subsequent tests to '
'deadlock')
def test_nested_subtransaction_commit(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans2.commit()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (3, )])
connection.close()
@testing.requires.savepoints
def test_rollback_to_subtransaction(self):
connection = testing.db.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name='user2')
trans3 = connection.begin()
connection.execute(users.insert(), user_id=3, user_name='user3')
trans3.rollback()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (4, )])
connection.close()
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self):
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.prepare()
transaction.commit()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=2, user_name='user2')
transaction.commit()
transaction.close()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.rollback()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction.prepare()
transaction.rollback()
transaction.close()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, )])
connection.close()
# PG emergency shutdown:
# select * from pg_prepared_xacts
# ROLLBACK PREPARED '<xid>'
@testing.crashes('mysql', 'Crashing on 5.5, not worth it')
@testing.requires.skip_mysql_on_windows
@testing.requires.two_phase_transactions
@testing.requires.savepoints
def test_mixed_two_phase_transaction(self):
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction2 = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
transaction3 = connection.begin_nested()
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction4 = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
transaction4.commit()
transaction3.rollback()
connection.execute(users.insert(), user_id=5, user_name='user5')
transaction2.commit()
transaction.prepare()
transaction.commit()
eq_(connection.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (5, )])
connection.close()
@testing.requires.two_phase_transactions
@testing.crashes('mysql+oursql',
'Times out in full test runs only, causing '
'subsequent tests to fail')
@testing.crashes('mysql+zxjdbc',
'Deadlocks, causing subsequent tests to fail')
@testing.fails_on('mysql', 'FIXME: unknown')
def test_two_phase_recover(self):
# MySQL recovery doesn't currently seem to work correctly
# Prepared transactions disappear when connections are closed
# and even when they aren't it doesn't seem possible to use the
# recovery id.
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.prepare()
connection.invalidate()
connection2 = testing.db.connect()
eq_(
connection2.execution_options(autocommit=True).
execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(), [])
recoverables = connection2.recover_twophase()
assert transaction.xid in recoverables
connection2.commit_prepared(transaction.xid, recover=True)
eq_(connection2.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, )])
connection2.close()
@testing.requires.two_phase_transactions
def test_multiple_two_phase(self):
conn = testing.db.connect()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=1, user_name='user1')
xa.prepare()
xa.commit()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=2, user_name='user2')
xa.prepare()
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=3, user_name='user3')
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=4, user_name='user4')
xa.prepare()
xa.commit()
result = \
conn.execute(select([users.c.user_name]).
order_by(users.c.user_id))
eq_(result.fetchall(), [('user1', ), ('user4', )])
conn.close()
@testing.requires.two_phase_transactions
def test_reset_rollback_two_phase_no_rollback(self):
# test [ticket:2907], essentially that the
# TwoPhaseTransaction is given the job of "reset on return"
# so that picky backends like MySQL correctly clear out
# their state when a connection is closed without handling
# the transaction explicitly.
eng = testing_engine()
# MySQL raises if you call straight rollback() on
# a connection with an XID present
@event.listens_for(eng, "invalidate")
def conn_invalidated(dbapi_con, con_record, exception):
dbapi_con.close()
raise exception
with eng.connect() as conn:
rec = conn.connection._connection_record
raw_dbapi_con = rec.connection
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=1, user_name='user1')
assert rec.connection is raw_dbapi_con
with eng.connect() as conn:
result = \
conn.execute(select([users.c.user_name]).
order_by(users.c.user_id))
eq_(result.fetchall(), [])
class ResetAgentTest(fixtures.TestBase):
__backend__ = True
def test_begin_close(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
assert not trans.is_active
def test_begin_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
def test_begin_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.savepoints
def test_begin_nested_close(self):
with testing.db.connect() as connection:
trans = connection.begin_nested()
assert connection.connection._reset_agent is trans
assert not trans.is_active
@testing.requires.savepoints
def test_begin_begin_nested_close(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
assert trans2.is_active # was never closed
assert not trans.is_active
@testing.requires.savepoints
def test_begin_begin_nested_rollback_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.savepoints
def test_begin_begin_nested_rollback_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
def test_begin_begin_rollback_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is None
trans.rollback()
assert connection.connection._reset_agent is None
def test_begin_begin_commit_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin()
assert connection.connection._reset_agent is trans
trans2.commit()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase_commit(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
class AutoRollbackTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global metadata
metadata = MetaData()
@classmethod
def teardown_class(cls):
metadata.drop_all(testing.db)
def test_rollback_deadlock(self):
"""test that returning connections to the pool clears any object
locks."""
conn1 = testing.db.connect()
conn2 = testing.db.connect()
users = Table('deadlock_users', metadata, Column('user_id',
INT, primary_key=True), Column('user_name',
VARCHAR(20)), test_needs_acid=True)
users.create(conn1)
conn1.execute('select * from deadlock_users')
conn1.close()
# without auto-rollback in the connection pool's return() logic,
# this deadlocks in PostgreSQL, because conn1 is returned to the
# pool but still has a lock on "deadlock_users". comment out the
# rollback in pool/ConnectionFairy._close() to see !
users.drop(conn2)
conn2.close()
class ExplicitAutoCommitTest(fixtures.TestBase):
"""test the 'autocommit' flag on select() and text() objects.
Requires PostgreSQL so that we may define a custom function which
modifies the database. """
__only_on__ = 'postgresql'
@classmethod
def setup_class(cls):
global metadata, foo
metadata = MetaData(testing.db)
foo = Table('foo', metadata, Column('id', Integer,
primary_key=True), Column('data', String(100)))
metadata.create_all()
testing.db.execute("create function insert_foo(varchar) "
"returns integer as 'insert into foo(data) "
"values ($1);select 1;' language sql")
def teardown(self):
foo.delete().execute().close()
@classmethod
def teardown_class(cls):
testing.db.execute('drop function insert_foo(varchar)')
metadata.drop_all()
def test_control(self):
# test that not using autocommit does not commit
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1')]))
assert conn2.execute(select([foo.c.data])).fetchall() == []
conn1.execute(text("select insert_foo('moredata')"))
assert conn2.execute(select([foo.c.data])).fetchall() == []
trans = conn1.begin()
trans.commit()
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', ), ('moredata', )]
conn1.close()
conn2.close()
def test_explicit_compiled(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1'
)]).execution_options(autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', )]
conn1.close()
conn2.close()
def test_explicit_connection(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execution_options(autocommit=True).\
execute(select([func.insert_foo('data1'
)]))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
)])
# connection supersedes statement
conn1.execution_options(autocommit=False).\
execute(select([func.insert_foo('data2'
)]).execution_options(autocommit=True))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
)])
# ditto
conn1.execution_options(autocommit=True).\
execute(select([func.insert_foo('data3'
)]).execution_options(autocommit=False))
eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1',
), ('data2', ), ('data3', )])
conn1.close()
conn2.close()
def test_explicit_text(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("select insert_foo('moredata')"
).execution_options(autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('moredata', )]
conn1.close()
conn2.close()
@testing.uses_deprecated(r'autocommit on select\(\) is deprecated',
r'``autocommit\(\)`` is deprecated')
def test_explicit_compiled_deprecated(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(select([func.insert_foo('data1')],
autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', )]
conn1.execute(select([func.insert_foo('data2')]).autocommit())
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('data1', ), ('data2', )]
conn1.close()
conn2.close()
@testing.uses_deprecated(r'autocommit on text\(\) is deprecated')
def test_explicit_text_deprecated(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("select insert_foo('moredata')",
autocommit=True))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('moredata', )]
conn1.close()
conn2.close()
def test_implicit_text(self):
conn1 = testing.db.connect()
conn2 = testing.db.connect()
conn1.execute(text("insert into foo (data) values "
"('implicitdata')"))
assert conn2.execute(select([foo.c.data])).fetchall() \
== [('implicitdata', )]
conn1.close()
conn2.close()
tlengine = None
class TLTransactionTest(fixtures.TestBase):
__requires__ = ('ad_hoc_engines', )
__backend__ = True
@classmethod
def setup_class(cls):
global users, metadata, tlengine
tlengine = testing_engine(options=dict(strategy='threadlocal'))
metadata = MetaData()
users = Table('query_users', metadata, Column('user_id', INT,
Sequence('query_users_id_seq', optional=True),
primary_key=True), Column('user_name',
VARCHAR(20)), test_needs_acid=True)
metadata.create_all(tlengine)
def teardown(self):
tlengine.execute(users.delete()).close()
@classmethod
def teardown_class(cls):
tlengine.close()
metadata.drop_all(tlengine)
tlengine.dispose()
def setup(self):
# ensure tests start with engine closed
tlengine.close()
@testing.crashes('oracle', 'TNS error of unknown origin occurs on the buildbot.')
def test_rollback_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.rollback()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.rollback()
def test_commit_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.commit()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.commit()
def test_prepare_no_trans(self):
tlengine = testing_engine(options=dict(strategy="threadlocal"))
# shouldn't fail
tlengine.prepare()
tlengine.begin()
tlengine.rollback()
# shouldn't fail
tlengine.prepare()
def test_connection_close(self):
"""test that when connections are closed for real, transactions
are rolled back and disposed."""
c = tlengine.contextual_connect()
c.begin()
assert c.in_transaction()
c.close()
assert not c.in_transaction()
def test_transaction_close(self):
c = tlengine.contextual_connect()
t = c.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
t2 = c.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.execute(users.insert(), user_id=4, user_name='user4')
t2.close()
result = c.execute('select * from query_users')
assert len(result.fetchall()) == 4
t.close()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
c.close()
external_connection.close()
def test_rollback(self):
"""test a basic rollback"""
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
external_connection.close()
def test_commit(self):
"""test a basic commit"""
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 3
finally:
external_connection.close()
def test_with_interface(self):
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
trans.commit()
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
trans.__exit__(Exception, "fake", None)
trans = tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
trans.__exit__(None, None, None)
eq_(
tlengine.execute(users.select().order_by(users.c.user_id)).fetchall(),
[
(1, 'user1'),
(2, 'user2'),
(4, 'user4'),
]
)
def test_commits(self):
connection = tlengine.connect()
assert connection.execute('select count(*) from query_users'
).scalar() == 0
connection.close()
connection = tlengine.contextual_connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name='user1')
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
transaction.commit()
transaction = connection.begin()
result = connection.execute('select * from query_users')
l = result.fetchall()
assert len(l) == 3, 'expected 3 got %d' % len(l)
transaction.commit()
connection.close()
def test_rollback_off_conn(self):
# test that a TLTransaction opened off a TLConnection allows
# that TLConnection to be aware of the transactional context
conn = tlengine.contextual_connect()
trans = conn.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
conn.close()
external_connection.close()
def test_morerollback_off_conn(self):
# test that an existing TLConnection automatically takes place
# in a TLTransaction opened on a second TLConnection
conn = tlengine.contextual_connect()
conn2 = tlengine.contextual_connect()
trans = conn2.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.rollback()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 0
finally:
conn.close()
conn2.close()
external_connection.close()
def test_commit_off_connection(self):
conn = tlengine.contextual_connect()
trans = conn.begin()
conn.execute(users.insert(), user_id=1, user_name='user1')
conn.execute(users.insert(), user_id=2, user_name='user2')
conn.execute(users.insert(), user_id=3, user_name='user3')
trans.commit()
external_connection = tlengine.connect()
result = external_connection.execute('select * from query_users'
)
try:
assert len(result.fetchall()) == 3
finally:
conn.close()
external_connection.close()
def test_nesting_rollback(self):
"""tests nesting of transactions, rollback at the end"""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.commit()
tlengine.rollback()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
def test_nesting_commit(self):
"""tests nesting of transactions, commit at the end."""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.commit()
tlengine.commit()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 5)
finally:
external_connection.close()
def test_mixed_nesting(self):
"""tests nesting of transactions off the TLEngine directly
inside of transactions off the connection from the TLEngine"""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
conn = tlengine.contextual_connect()
trans = conn.begin()
trans2 = conn.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.begin()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.begin()
tlengine.execute(users.insert(), user_id=5, user_name='user5')
tlengine.execute(users.insert(), user_id=6, user_name='user6')
tlengine.execute(users.insert(), user_id=7, user_name='user7')
tlengine.commit()
tlengine.execute(users.insert(), user_id=8, user_name='user8')
tlengine.commit()
trans2.commit()
trans.rollback()
conn.close()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
def test_more_mixed_nesting(self):
"""tests nesting of transactions off the connection from the
TLEngine inside of transactions off the TLEngine directly."""
external_connection = tlengine.connect()
self.assert_(external_connection.connection
is not tlengine.contextual_connect().connection)
tlengine.begin()
connection = tlengine.contextual_connect()
connection.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin()
connection.execute(users.insert(), user_id=2, user_name='user2')
connection.execute(users.insert(), user_id=3, user_name='user3')
trans = connection.begin()
connection.execute(users.insert(), user_id=4, user_name='user4')
connection.execute(users.insert(), user_id=5, user_name='user5')
trans.commit()
tlengine.commit()
tlengine.rollback()
connection.close()
try:
self.assert_(external_connection.scalar(
'select count(*) from query_users'
) == 0)
finally:
external_connection.close()
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.rollback()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (3, )])
tlengine.close()
@testing.requires.savepoints
@testing.crashes('oracle+zxjdbc',
'Errors out and causes subsequent tests to '
'deadlock')
def test_nested_subtransaction_commit(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.commit()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, ), (3, )])
tlengine.close()
@testing.requires.savepoints
def test_rollback_to_subtransaction(self):
tlengine.begin()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.begin_nested()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.begin()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
tlengine.rollback()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.commit()
tlengine.close()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (4, )])
tlengine.close()
def test_connections(self):
"""tests that contextual_connect is threadlocal"""
c1 = tlengine.contextual_connect()
c2 = tlengine.contextual_connect()
assert c1.connection is c2.connection
c2.close()
assert not c1.closed
assert not tlengine.closed
@testing.requires.independent_cursors
def test_result_closing(self):
"""tests that contextual_connect is threadlocal"""
r1 = tlengine.execute(select([1]))
r2 = tlengine.execute(select([1]))
row1 = r1.fetchone()
row2 = r2.fetchone()
r1.close()
assert r2.connection is r1.connection
assert not r2.connection.closed
assert not tlengine.closed
# close again, nothing happens since resultproxy calls close()
# only once
r1.close()
assert r2.connection is r1.connection
assert not r2.connection.closed
assert not tlengine.closed
r2.close()
assert r2.connection.closed
assert tlengine.closed
@testing.crashes('oracle+cx_oracle', 'intermittent failures on the buildbot')
def test_dispose(self):
eng = testing_engine(options=dict(strategy='threadlocal'))
result = eng.execute(select([1]))
eng.dispose()
eng.execute(select([1]))
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self):
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=1, user_name='user1')
tlengine.prepare()
tlengine.commit()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=2, user_name='user2')
tlengine.commit()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=3, user_name='user3')
tlengine.rollback()
tlengine.begin_twophase()
tlengine.execute(users.insert(), user_id=4, user_name='user4')
tlengine.prepare()
tlengine.rollback()
eq_(tlengine.execute(select([users.c.user_id]).
order_by(users.c.user_id)).fetchall(),
[(1, ), (2, )])
class IsolationLevelTest(fixtures.TestBase):
__requires__ = ('isolation_level', 'ad_hoc_engines')
__backend__ = True
def _default_isolation_level(self):
if testing.against('sqlite'):
return 'SERIALIZABLE'
elif testing.against('postgresql'):
return 'READ COMMITTED'
elif testing.against('mysql'):
return "REPEATABLE READ"
else:
assert False, "default isolation level not known"
def _non_default_isolation_level(self):
if testing.against('sqlite'):
return 'READ UNCOMMITTED'
elif testing.against('postgresql'):
return 'SERIALIZABLE'
elif testing.against('mysql'):
return "SERIALIZABLE"
else:
assert False, "non default isolation level not known"
def test_engine_param_stays(self):
eng = testing_engine()
isolation_level = eng.dialect.get_isolation_level(
eng.connect().connection)
level = self._non_default_isolation_level()
ne_(isolation_level, level)
eng = testing_engine(options=dict(isolation_level=level))
eq_(
eng.dialect.get_isolation_level(
eng.connect().connection),
level
)
# check that it stays
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
level
)
conn.close()
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
level
)
conn.close()
def test_default_level(self):
eng = testing_engine(options=dict())
isolation_level = eng.dialect.get_isolation_level(
eng.connect().connection)
eq_(isolation_level, self._default_isolation_level())
def test_reset_level(self):
eng = testing_engine(options=dict())
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level()
)
eng.dialect.set_isolation_level(
conn.connection, self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level()
)
eng.dialect.reset_isolation_level(conn.connection)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level()
)
conn.close()
def test_reset_level_with_setting(self):
eng = testing_engine(
options=dict(
isolation_level=self._non_default_isolation_level()))
conn = eng.connect()
eq_(eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level())
eng.dialect.set_isolation_level(
conn.connection,
self._default_isolation_level())
eq_(eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level())
eng.dialect.reset_isolation_level(conn.connection)
eq_(eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level())
conn.close()
def test_invalid_level(self):
eng = testing_engine(options=dict(isolation_level='FOO'))
assert_raises_message(
exc.ArgumentError,
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
("FOO",
eng.dialect.name, ", ".join(eng.dialect._isolation_lookup)),
eng.connect
)
def test_connection_invalidated(self):
eng = testing_engine()
conn = eng.connect()
c2 = conn.execution_options(
isolation_level=self._non_default_isolation_level())
c2.invalidate()
c2.connection
# TODO: do we want to rebuild the previous isolation?
# for now, this is current behavior so we will leave it.
eq_(c2.get_isolation_level(), self._default_isolation_level())
def test_per_connection(self):
from sqlalchemy.pool import QueuePool
eng = testing_engine(
options=dict(
poolclass=QueuePool,
pool_size=2, max_overflow=0))
c1 = eng.connect()
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
c2 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(c2.connection),
self._default_isolation_level()
)
c1.close()
c2.close()
c3 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c3.connection),
self._default_isolation_level()
)
c4 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c4.connection),
self._default_isolation_level()
)
c3.close()
c4.close()
def test_warning_in_transaction(self):
eng = testing_engine()
c1 = eng.connect()
with expect_warnings(
"Connection is already established with a Transaction; "
"setting isolation_level may implicitly rollback or commit "
"the existing transaction, or have no effect until next "
"transaction"
):
with c1.begin():
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level()
)
# stays outside of transaction
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level()
)
def test_per_statement_bzzt(self):
assert_raises_message(
exc.ArgumentError,
r"'isolation_level' execution option may only be specified "
r"on Connection.execution_options\(\), or "
r"per-engine using the isolation_level "
r"argument to create_engine\(\).",
select([1]).execution_options,
isolation_level=self._non_default_isolation_level()
)
def test_per_engine(self):
# new in 0.9
eng = create_engine(
testing.db.url,
execution_options={
'isolation_level':
self._non_default_isolation_level()}
)
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level()
)
def test_isolation_level_accessors_connection_default(self):
eng = create_engine(
testing.db.url
)
with eng.connect() as conn:
eq_(conn.default_isolation_level, self._default_isolation_level())
with eng.connect() as conn:
eq_(conn.get_isolation_level(), self._default_isolation_level())
def test_isolation_level_accessors_connection_option_modified(self):
eng = create_engine(
testing.db.url
)
with eng.connect() as conn:
c2 = conn.execution_options(
isolation_level=self._non_default_isolation_level())
eq_(conn.default_isolation_level, self._default_isolation_level())
eq_(conn.get_isolation_level(),
self._non_default_isolation_level())
eq_(c2.get_isolation_level(), self._non_default_isolation_level())
| mit | -3,474,624,209,129,519,600 | 36.613683 | 85 | 0.588421 | false |
JeanKossaifi/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause | -6,774,058,060,806,427,000 | 32.254545 | 79 | 0.67906 | false |
maestrano/openerp | openerp/addons/l10n_be_hr_payroll/__openerp__.py | 118 | 1817 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Payroll',
'category': 'Localization',
'author': 'OpenERP SA',
'depends': ['hr_payroll'],
'version': '1.0',
'description': """
Belgian Payroll Rules.
======================
* Employee Details
* Employee Contracts
* Passport based Contract
* Allowances/Deductions
* Allow to configure Basic/Gross/Net Salary
* Employee Payslip
* Monthly Payroll Register
* Integrated with Holiday Management
* Salary Maj, ONSS, Withholding Tax, Child Allowance, ...
""",
'auto_install': False,
'demo': ['l10n_be_hr_payroll_demo.xml'],
'data':[
'l10n_be_hr_payroll_view.xml',
'l10n_be_hr_payroll_data.xml',
'data/hr.salary.rule.csv',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,182,060,247,907,120,600 | 33.942308 | 78 | 0.60044 | false |
openstack/manila | manila/tests/share/drivers/glusterfs/test_layout.py | 1 | 12982 | # Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
from unittest import mock
import ddt
from oslo_config import cfg
from oslo_utils import importutils
from manila import exception
from manila.share import configuration as config
from manila.share import driver
from manila.share.drivers.glusterfs import layout
from manila import test
from manila.tests import fake_share
from manila.tests import fake_utils
CONF = cfg.CONF
fake_local_share_path = '/mnt/nfs/testvol/fakename'
fake_path_to_private_key = '/fakepath/to/privatekey'
fake_remote_server_password = 'fakepassword'
def fake_access(kwargs):
fake_access_rule = fake_share.fake_access(**kwargs)
fake_access_rule.to_dict = lambda: fake_access_rule.values
return fake_access_rule
class GlusterfsFakeShareDriver(layout.GlusterfsShareDriverBase):
supported_layouts = ('layout_fake.FakeLayout',
'layout_something.SomeLayout')
supported_protocols = ('NFS,')
_supported_access_types = ('ip',)
_supported_access_levels = ('rw',)
@ddt.ddt
class GlusterfsShareDriverBaseTestCase(test.TestCase):
"""Tests GlusterfsShareDriverBase."""
def setUp(self):
super(GlusterfsShareDriverBaseTestCase, self).setUp()
CONF.set_default('driver_handles_share_servers', False)
fake_conf, __ = self._setup()
self._driver = GlusterfsFakeShareDriver(False, configuration=fake_conf)
self.fake_share = mock.Mock(name='fake_share')
self.fake_context = mock.Mock(name='fake_context')
self.fake_access = mock.Mock(name='fake_access')
def _setup(self):
fake_conf = config.Configuration(None)
fake_layout = mock.Mock()
self.mock_object(importutils, "import_object",
mock.Mock(return_value=fake_layout))
return fake_conf, fake_layout
def test_init(self):
self.assertRaises(IndexError, layout.GlusterfsShareDriverBase, False,
configuration=config.Configuration(None))
@ddt.data({'has_snap': None, 'layout_name': None},
{'has_snap': False, 'layout_name': 'layout_fake.FakeLayout'},
{'has_snap': True, 'layout_name': 'layout_something.SomeLayout'})
@ddt.unpack
def test_init_subclass(self, has_snap, layout_name):
conf, _layout = self._setup()
if layout_name is not None:
conf.glusterfs_share_layout = layout_name
if has_snap is None:
del(_layout._snapshots_are_supported)
else:
_layout._snapshots_are_supported = has_snap
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
snap_result = {None: False}.get(has_snap, has_snap)
layout_result = {None: 'layout_fake.FakeLayout'}.get(layout_name,
layout_name)
importutils.import_object.assert_called_once_with(
'manila.share.drivers.glusterfs.%s' % layout_result,
_driver, configuration=conf)
self.assertEqual(_layout, _driver.layout)
self.assertEqual(snap_result, _driver.snapshots_are_supported)
def test_init_nosupp_layout(self):
conf = config.Configuration(None)
conf.glusterfs_share_layout = 'nonsense_layout'
self.assertRaises(exception.GlusterfsException,
GlusterfsFakeShareDriver, False, configuration=conf)
def test_setup_via_manager(self):
self.assertIsNone(self._driver._setup_via_manager(mock.Mock()))
def test_supported_access_types(self):
self.assertEqual(('ip',), self._driver.supported_access_types)
def test_supported_access_levels(self):
self.assertEqual(('rw',), self._driver.supported_access_levels)
def test_access_rule_validator(self):
rule = mock.Mock()
abort = mock.Mock()
valid = mock.Mock()
self.mock_object(layout.ganesha_utils, 'validate_access_rule',
mock.Mock(return_value=valid))
ret = self._driver._access_rule_validator(abort)(rule)
self.assertEqual(valid, ret)
layout.ganesha_utils.validate_access_rule.assert_called_once_with(
('ip',), ('rw',), rule, abort)
@ddt.data({'inset': ([], ['ADD'], []), 'outset': (['ADD'], []),
'recovery': False},
{'inset': ([], [], ['DELETE']), 'outset': ([], ['DELETE']),
'recovery': False},
{'inset': (['EXISTING'], ['ADD'], ['DELETE']),
'outset': (['ADD'], ['DELETE']), 'recovery': False},
{'inset': (['EXISTING'], [], []), 'outset': (['EXISTING'], []),
'recovery': True})
@ddt.unpack
def test_update_access(self, inset, outset, recovery):
conf, _layout = self._setup()
gluster_mgr = mock.Mock(name='gluster_mgr')
self.mock_object(_layout, '_share_manager',
mock.Mock(return_value=gluster_mgr))
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
self.mock_object(_driver, '_update_access_via_manager', mock.Mock())
rulemap = {t: fake_access({'access_type': "ip",
'access_level': "rw",
'access_to': t}) for t in (
'EXISTING', 'ADD', 'DELETE')}
in_rules, out_rules = (
[
[
rulemap[t] for t in r
] for r in rs
] for rs in (inset, outset))
_driver.update_access(self.fake_context, self.fake_share, *in_rules)
_layout._share_manager.assert_called_once_with(self.fake_share)
_driver._update_access_via_manager.assert_called_once_with(
gluster_mgr, self.fake_context, self.fake_share,
*out_rules, recovery=recovery)
def test_update_access_via_manager(self):
self.assertRaises(NotImplementedError,
self._driver._update_access_via_manager,
mock.Mock(), self.fake_context, self.fake_share,
[self.fake_access], [self.fake_access])
@ddt.data('NFS', 'PROTATO')
def test_check_proto_baseclass(self, proto):
self.assertRaises(exception.ShareBackendException,
layout.GlusterfsShareDriverBase._check_proto,
{'share_proto': proto})
def test_check_proto(self):
GlusterfsFakeShareDriver._check_proto({'share_proto': 'NFS'})
def test_check_proto_notsupported(self):
self.assertRaises(exception.ShareBackendException,
GlusterfsFakeShareDriver._check_proto,
{'share_proto': 'PROTATO'})
@ddt.data('', '_from_snapshot')
def test_create_share(self, variant):
conf, _layout = self._setup()
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
self.mock_object(_driver, '_check_proto', mock.Mock())
getattr(_driver, 'create_share%s' % variant)(self.fake_context,
self.fake_share)
_driver._check_proto.assert_called_once_with(self.fake_share)
getattr(_layout,
'create_share%s' % variant).assert_called_once_with(
self.fake_context, self.fake_share)
@ddt.data(True, False)
def test_update_share_stats(self, internal_exception):
data = mock.Mock()
conf, _layout = self._setup()
def raise_exception(*args, **kwargs):
raise NotImplementedError
layoutstats = mock.Mock()
mock_kw = ({'side_effect': raise_exception} if internal_exception
else {'return_value': layoutstats})
self.mock_object(_layout, '_update_share_stats', mock.Mock(**mock_kw))
self.mock_object(driver.ShareDriver, '_update_share_stats',
mock.Mock())
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
_driver._update_share_stats(data)
if internal_exception:
self.assertFalse(data.update.called)
else:
data.update.assert_called_once_with(layoutstats)
driver.ShareDriver._update_share_stats.assert_called_once_with(
data)
@ddt.data('do_setup', 'create_snapshot', 'delete_share', 'delete_snapshot',
'ensure_share', 'manage_existing', 'unmanage', 'extend_share',
'shrink_share')
def test_delegated_methods(self, method):
conf, _layout = self._setup()
_driver = GlusterfsFakeShareDriver(False, configuration=conf)
fake_args = (mock.Mock(), mock.Mock(), mock.Mock())
getattr(_driver, method)(*fake_args)
getattr(_layout, method).assert_called_once_with(*fake_args)
@ddt.ddt
class GlusterfsShareLayoutBaseTestCase(test.TestCase):
"""Tests GlusterfsShareLayoutBaseTestCase."""
def setUp(self):
super(GlusterfsShareLayoutBaseTestCase, self).setUp()
fake_utils.stub_out_utils_execute(self)
self._execute = fake_utils.fake_execute
self.addCleanup(fake_utils.fake_execute_set_repliers, [])
self.addCleanup(fake_utils.fake_execute_clear_log)
self.fake_driver = mock.Mock()
self.mock_object(self.fake_driver, '_execute',
self._execute)
class FakeLayout(layout.GlusterfsShareLayoutBase):
def _share_manager(self, share):
"""Return GlusterManager object representing share's backend."""
def do_setup(self, context):
"""Any initialization the share driver does while starting."""
def create_share(self, context, share, share_server=None):
"""Is called to create share."""
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""
def create_snapshot(self, context, snapshot, share_server=None):
"""Is called to create snapshot."""
def delete_share(self, context, share, share_server=None):
"""Is called to remove share."""
def delete_snapshot(self, context, snapshot, share_server=None):
"""Is called to remove snapshot."""
def ensure_share(self, context, share, share_server=None):
"""Invoked to ensure that share is exported."""
def manage_existing(self, share, driver_options):
"""Brings an existing share under Manila management."""
def unmanage(self, share):
"""Removes the specified share from Manila management."""
def extend_share(self, share, new_size, share_server=None):
"""Extends size of existing share."""
def shrink_share(self, share, new_size, share_server=None):
"""Shrinks size of existing share."""
def test_init_invalid(self):
self.assertRaises(TypeError, layout.GlusterfsShareLayoutBase,
mock.Mock())
def test_subclass(self):
fake_conf = mock.Mock()
_layout = self.FakeLayout(self.fake_driver, configuration=fake_conf)
self.assertEqual(fake_conf, _layout.configuration)
self.assertRaises(NotImplementedError, _layout._update_share_stats)
def test_check_mount_glusterfs(self):
fake_conf = mock.Mock()
_driver = mock.Mock()
_driver._execute = mock.Mock()
_layout = self.FakeLayout(_driver, configuration=fake_conf)
_layout._check_mount_glusterfs()
_driver._execute.assert_called_once_with(
'mount.glusterfs',
check_exit_code=False)
@ddt.data({'_errno': errno.ENOENT,
'_exception': exception.GlusterfsException},
{'_errno': errno.EACCES, '_exception': OSError})
@ddt.unpack
def test_check_mount_glusterfs_not_installed(self, _errno, _exception):
fake_conf = mock.Mock()
_layout = self.FakeLayout(self.fake_driver, configuration=fake_conf)
def exec_runner(*ignore_args, **ignore_kwargs):
raise OSError(_errno, os.strerror(_errno))
expected_exec = ['mount.glusterfs']
fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)])
self.assertRaises(_exception, _layout._check_mount_glusterfs)
| apache-2.0 | 6,427,301,124,843,634,000 | 38.220544 | 79 | 0.612772 | false |
kartikgupta0909/gittest | configs/builds/releng_base_windows_32_builds.py | 2 | 4098 | import os
import sys
STAGE_USERNAME = 'ffxbld'
STAGE_SSH_KEY = 'ffxbld_dsa'
config = {
#########################################################################
######## WINDOWS GENERIC CONFIG KEYS/VAlUES
# if you are updating this with custom 32 bit keys/values please add them
# below under the '32 bit specific' code block otherwise, update in this
# code block and also make sure this is synced with
# releng_base_windows_64_builds.py
'default_actions': [
'clobber',
'clone-tools',
# 'setup-mock', windows do not use mock
'build',
'sendchanges',
'generate-build-stats',
'update', # decided by query_is_nightly()
],
"buildbot_json_path": "buildprops.json",
'exes': {
'python2.7': sys.executable,
'hgtool.py': [
sys.executable,
os.path.join(
os.getcwd(), 'build', 'tools', 'buildfarm', 'utils', 'hgtool.py'
)
],
"buildbot": [
sys.executable,
'c:\\mozilla-build\\buildbotve\\scripts\\buildbot'
],
"make": [
sys.executable,
os.path.join(
os.getcwd(), 'build', 'src', 'build', 'pymake', 'make.py'
)
]
},
'app_ini_path': '%(obj_dir)s/dist/bin/application.ini',
# decides whether we want to use moz_sign_cmd in env
'enable_signing': True,
'purge_skip': ['info', 'rel-*:45d', 'tb-rel-*:45d'],
'purge_basedirs': [],
'enable_ccache': False,
'vcs_share_base': 'C:/builds/hg-shared',
'objdir': 'obj-firefox',
'tooltool_script': [sys.executable,
'C:/mozilla-build/tooltool.py'],
'tooltool_bootstrap': "setup.sh",
'enable_count_ctors': False,
'enable_talos_sendchange': True,
'enable_unittest_sendchange': True,
'platform_supports_partials': True,
#########################################################################
#########################################################################
###### 32 bit specific ######
'base_name': 'WINNT_5.2_%(branch)s',
'platform': 'win32',
'stage_platform': 'win32',
'enable_max_vsize': True,
'env': {
'MOZBUILD_STATE_PATH': os.path.join(os.getcwd(), '.mozbuild'),
'MOZ_AUTOMATION': '1',
'BINSCOPE': 'C:/Program Files (x86)/Microsoft/SDL BinScope/BinScope.exe',
'HG_SHARE_BASE_DIR': 'C:/builds/hg-shared',
'MOZ_CRASHREPORTER_NO_REPORT': '1',
'MOZ_OBJDIR': 'obj-firefox',
'PATH': 'C:/mozilla-build/nsis-3.0a2;C:/mozilla-build/nsis-2.46u;C:/mozilla-build/python27;'
'C:/mozilla-build/buildbotve/scripts;'
'%s' % (os.environ.get('path')),
'PDBSTR_PATH': '/c/Program Files (x86)/Windows Kits/8.0/Debuggers/x64/srcsrv/pdbstr.exe',
'PROPERTIES_FILE': os.path.join(os.getcwd(), 'buildprops.json'),
# SYMBOL_SERVER_HOST is dictated from build_pool_specifics.py
'SYMBOL_SERVER_HOST': '%(symbol_server_host)s',
'SYMBOL_SERVER_SSH_KEY': '/c/Users/cltbld/.ssh/ffxbld_dsa',
'SYMBOL_SERVER_USER': 'ffxbld',
'SYMBOL_SERVER_PATH': '/mnt/netapp/breakpad/symbols_ffx/',
'POST_SYMBOL_UPLOAD_CMD': '/usr/local/bin/post-symbol-upload.py',
'TINDERBOX_OUTPUT': '1',
},
'upload_env': {
# UPLOAD_HOST is set to stage_server
# stage_server is dictated from build_pool_specifics.py
'UPLOAD_USER': STAGE_USERNAME,
'UPLOAD_TO_TEMP': '1',
'UPLOAD_SSH_KEY': '~/.ssh/%s' % (STAGE_SSH_KEY,),
},
"check_test_env": {
'MINIDUMP_STACKWALK': '%(abs_tools_dir)s/breakpad/win32/minidump_stackwalk.exe',
'MINIDUMP_SAVE_PATH': '%(base_work_dir)s/minidumps',
},
'purge_minsize': 12,
'src_mozconfig': 'browser/config/mozconfigs/win32/nightly',
'tooltool_manifest_src': "browser/config/tooltool-manifests/win32/releng.manifest",
'platform_ftp_name': 'win32.complete.mar',
#########################################################################
}
| mpl-2.0 | -5,999,572,464,100,952,000 | 38.403846 | 100 | 0.531723 | false |
Neural-Network/TicTacToe | pybrain/datasets/supervised.py | 21 | 4400 | from __future__ import print_function
__author__ = 'Thomas Rueckstiess, [email protected]'
from numpy import random
from random import sample
from scipy import isscalar
from pybrain.datasets.dataset import DataSet
from pybrain.utilities import fListToString
class SupervisedDataSet(DataSet):
"""SupervisedDataSets have two fields, one for input and one for the target.
"""
def __init__(self, inp, target):
"""Initialize an empty supervised dataset.
Pass `inp` and `target` to specify the dimensions of the input and
target vectors."""
DataSet.__init__(self)
if isscalar(inp):
# add input and target fields and link them
self.addField('input', inp)
self.addField('target', target)
else:
self.setField('input', inp)
self.setField('target', target)
self.linkFields(['input', 'target'])
# reset the index marker
self.index = 0
# the input and target dimensions
self.indim = self.getDimension('input')
self.outdim = self.getDimension('target')
def __reduce__(self):
_, _, state, _, _ = super(SupervisedDataSet, self).__reduce__()
creator = self.__class__
args = self.indim, self.outdim
return creator, args, state, iter([]), iter({})
def addSample(self, inp, target):
"""Add a new sample consisting of `input` and `target`."""
self.appendLinked(inp, target)
def getSample(self, index=None):
"""Return a sample at `index` or the current sample."""
return self.getLinked(index)
def setField(self, label, arr, **kwargs):
"""Set the given array `arr` as the new array of the field specfied by
`label`."""
DataSet.setField(self, label, arr, **kwargs)
# refresh dimensions, in case any of these fields were modified
if label == 'input':
self.indim = self.getDimension('input')
elif label == 'target':
self.outdim = self.getDimension('target')
def _provideSequences(self):
"""Return an iterator over sequence lists, although the dataset contains
only single samples."""
return iter([[x] for x in iter(self)])
def evaluateMSE(self, f, **args):
"""Evaluate the predictions of a function on the dataset and return the
Mean Squared Error, incorporating importance."""
ponderation = 0.
totalError = 0
for seq in self._provideSequences():
e, p = self._evaluateSequence(f, seq, **args)
totalError += e
ponderation += p
assert ponderation > 0
return totalError/ponderation
def _evaluateSequence(self, f, seq, verbose = False):
"""Return the ponderated MSE over one sequence."""
totalError = 0.
ponderation = 0.
for input, target in seq:
res = f(input)
e = 0.5 * sum((target-res).flatten()**2)
totalError += e
ponderation += len(target)
if verbose:
print(( 'out: ', fListToString( list( res ) )))
print(( 'correct:', fListToString( target )))
print(( 'error: % .8f' % e))
return totalError, ponderation
def evaluateModuleMSE(self, module, averageOver = 1, **args):
"""Evaluate the predictions of a module on a dataset and return the MSE
(potentially average over a number of epochs)."""
res = 0.
for dummy in range(averageOver):
module.reset()
res += self.evaluateMSE(module.activate, **args)
return res/averageOver
def splitWithProportion(self, proportion = 0.5):
"""Produce two new datasets, the first one containing the fraction given
by `proportion` of the samples."""
indicies = random.permutation(len(self))
separator = int(len(self) * proportion)
leftIndicies = indicies[:separator]
rightIndicies = indicies[separator:]
leftDs = SupervisedDataSet(inp=self['input'][leftIndicies].copy(),
target=self['target'][leftIndicies].copy())
rightDs = SupervisedDataSet(inp=self['input'][rightIndicies].copy(),
target=self['target'][rightIndicies].copy())
return leftDs, rightDs
| bsd-3-clause | -5,855,689,213,137,836,000 | 35.97479 | 80 | 0.594091 | false |
sakuraio/python-sakuraio | sakuraio/hardware/commands/operation.py | 1 | 3230 | import struct
import datetime
# Operation
CMD_GET_PRODUCT_ID = 0xA0
CMD_GET_UNIQUE_ID = 0xA1
CMD_GET_FIRMWARE_VERSION = 0xA2
CMD_UNLOCK = 0xA8
CMD_UPDATE_FIRMWARE = 0xA9
CMD_GET_FIRMWARE_UPDATE_STATUS = 0xAA
CMD_SOFTWARE_RESET = 0xAF
CMD_SET_POWER_SAVE_MODE = 0xB0
CMD_GET_POWER_SAVE_MODE = 0xB1
UNLOCK_MAGIC_NUMBERS = [0x53, 0x6B, 0x72, 0x61]
PRODUCT_ID_SCM_LTE_BETA = 0x01
PRODUCT_ID_SCM_LTE_01 = 0x02
PRODUCT_ID_MAP = {
PRODUCT_ID_SCM_LTE_BETA: "SCM-LTE-BETA",
PRODUCT_ID_SCM_LTE_01: "SCM-LTE-01",
}
POWER_SAVE_MODE_DISABLE = 0
POWER_SAVE_MODE_AUTO_SLEEP = 1
POWER_SAVE_MODE_RF_OFF = 2
class OperationMixins(object):
def get_product_id(self):
"""Get product id
:return: Product ID. Possible values:
:const:`PRODUCT_ID_SCM_LTE_BETA`, :const:`PRODUCT_ID_SCM_LTE_01`
:rtype: int
"""
response = self.execute_command(CMD_GET_PRODUCT_ID, as_bytes=True)
product_id = struct.unpack("<H", response)[0]
return product_id
def get_product_name(self):
"""Get product name
:return: Product name. Possible values: ``"SCM-LTE-BETA"``, ``"SCM-LTE-01"``.
:rtype: str
"""
product_id = self.get_product_id()
return PRODUCT_ID_MAP.get(product_id, "{0:04X}".format(product_id))
def get_unique_id(self):
"""Get unique id
:return: Unique ID. For example `"16X0000001"``.
:rtype: str
"""
return self.execute_command(CMD_GET_UNIQUE_ID, as_bytes=True).decode("ascii")
def get_firmware_version(self):
"""Get firmware version
:return: Firmware version. For example `"v1.1.2-170223-7e6ce64"``.
:rtype: str
"""
return self.execute_command(CMD_GET_FIRMWARE_VERSION, as_bytes=True).decode("ascii")
def unlock(self):
"""Unlock critical command"""
self.execute_command(CMD_UNLOCK, UNLOCK_MAGIC_NUMBERS)
def update_firmware(self):
"""Request to update firmware"""
self.execute_command(CMD_UPDATE_FIRMWARE)
def get_firmware_update_status(self):
"""Get firmware update status
:return: Status.
:rtype: dict
"""
response = self.execute_command(CMD_GET_FIRMWARE_UPDATE_STATUS)[0]
inprogress = (response & 0x80) == 0x80
return {
"inprogress": inprogress,
"error": response & 0x7f,
}
def reset(self):
"""Request software reset"""
self.execute_command(CMD_SOFTWARE_RESET)
def set_power_save_mode(self, mode):
"""Set PowerSaveMode
:param integer mode:
Power Save Mode number.
0: Disable mode.
1: Auto sleep mode.
2: Rf off mode.
"""
self.execute_command(CMD_SET_POWER_SAVE_MODE, [mode])
def get_power_save_mode(self):
"""Get PowerSaveMode
:rtype: int
Power Save Mode number.
0: Disable mode.
1: Auto sleep mode.
2: Rf off mode.
"""
ret = self.execute_command(CMD_GET_POWER_SAVE_MODE)
if isinstance(ret, list):
if len(ret) == 1:
return ret[0]
return None
| mit | -1,776,822,353,497,591,000 | 24.84 | 92 | 0.588235 | false |
aviweit/libcloud | libcloud/common/brightbox.py | 55 | 3413 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
from libcloud.compute.types import InvalidCredsError
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
try:
import simplejson as json
except ImportError:
import json
class BrightboxResponse(JsonResponse):
def success(self):
return self.status >= httplib.OK and self.status < httplib.BAD_REQUEST
def parse_body(self):
if self.headers['content-type'].split(';')[0] == 'application/json':
return super(BrightboxResponse, self).parse_body()
else:
return self.body
def parse_error(self):
response = super(BrightboxResponse, self).parse_body()
if 'error' in response:
if response['error'] in ['invalid_client', 'unauthorized_client']:
raise InvalidCredsError(response['error'])
return response['error']
elif 'error_name' in response:
return '%s: %s' % (response['error_name'], response['errors'][0])
return self.body
class BrightboxConnection(ConnectionUserAndKey):
"""
Connection class for the Brightbox driver
"""
host = 'api.gb1.brightbox.com'
responseCls = BrightboxResponse
def _fetch_oauth_token(self):
body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'})
authorization = 'Basic ' + str(base64.encodestring(b('%s:%s' %
(self.user_id, self.key)))).rstrip()
self.connect()
headers = {
'Host': self.host,
'User-Agent': self._user_agent(),
'Authorization': authorization,
'Content-Type': 'application/json',
'Content-Length': str(len(body))
}
response = self.connection.request(method='POST', url='/token',
body=body, headers=headers)
response = self.connection.getresponse()
if response.status == httplib.OK:
return json.loads(response.read())['access_token']
else:
responseCls = BrightboxResponse(response=response, connection=self)
message = responseCls.parse_error()
raise InvalidCredsError(message)
def add_default_headers(self, headers):
try:
headers['Authorization'] = 'OAuth ' + self.token
except AttributeError:
self.token = self._fetch_oauth_token()
headers['Authorization'] = 'OAuth ' + self.token
return headers
def encode_data(self, data):
return json.dumps(data)
| apache-2.0 | 8,668,437,354,898,299,000 | 32.792079 | 79 | 0.64225 | false |
growingio/phoenix | bin/end2endTest.py | 31 | 1881 | #!/usr/bin/env python
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
# !!! PLEASE READ !!!
# !!! Do NOT run the script against a prodcution cluster because it wipes out
# !!! existing data of the cluster
import os
import subprocess
import sys
import phoenix_utils
phoenix_utils.setPath()
phoenix_jar_path = os.getenv(phoenix_utils.phoenix_class_path, phoenix_utils.phoenix_test_jar_path)
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
hbase_library_path = os.getenv('HBASE_LIBRARY_DIR', '')
print "Current ClassPath=%s:%s:%s" % (phoenix_utils.hbase_conf_dir, phoenix_jar_path,
hbase_library_path)
java_cmd = "java -cp " + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_jar_path + os.pathsep + \
hbase_library_path + " org.apache.phoenix.end2end.End2EndTestDriver " + \
' '.join(sys.argv[1:])
exitcode = subprocess.call(java_cmd, shell=True)
sys.exit(exitcode)
| apache-2.0 | -1,432,413,328,038,505,200 | 39.021277 | 102 | 0.671983 | false |
torufuru/oolhackathon | ryu/lib/xflow/netflow.py | 60 | 4009 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
NETFLOW_V1 = 0x01
NETFLOW_V5 = 0x05
NETFLOW_V6 = 0x06
NETFLOW_V7 = 0x07
NETFLOW_V8 = 0x08
NETFLOW_V9 = 0x09
class NetFlow(object):
_PACK_STR = '!H'
_NETFLOW_VERSIONS = {}
@staticmethod
def register_netflow_version(version):
def _register_netflow_version(cls):
NetFlow._NETFLOW_VERSIONS[version] = cls
return cls
return _register_netflow_version
def __init__(self):
super(NetFlow, self).__init__()
@classmethod
def parser(cls, buf):
(version,) = struct.unpack_from(cls._PACK_STR, buf)
cls_ = cls._NETFLOW_VERSIONS.get(version, None)
if cls_:
return cls_.parser(buf)
else:
return None
@NetFlow.register_netflow_version(NETFLOW_V5)
class NetFlowV5(object):
_PACK_STR = '!HHIIIIBBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, version, count, sys_uptime, unix_secs,
unix_nsecs, flow_sequence, engine_type, engine_id,
sampling_interval, flows=None):
self.version = version
self.count = count
self.sys_uptime = sys_uptime
self.unix_secs = unix_secs
self.unix_nsecs = unix_nsecs
self.flow_sequence = flow_sequence
self.engine_type = engine_type
self.engine_id = engine_id
self.sampling_interval = sampling_interval
@classmethod
def parser(cls, buf):
(version, count, sys_uptime, unix_secs, unix_nsecs,
flow_sequence, engine_type, engine_id, sampling_interval) = \
struct.unpack_from(cls._PACK_STR, buf)
msg = cls(version, count, sys_uptime, unix_secs, unix_nsecs,
flow_sequence, engine_type, engine_id,
sampling_interval)
offset = cls._MIN_LEN
msg.flows = []
while len(buf) > offset:
f = NetFlowV5Flow.parser(buf, offset)
offset += NetFlowV5Flow._MIN_LEN
msg.flows.append(f)
return msg
class NetFlowV5Flow(object):
_PACK_STR = '!IIIHHIIIIHHxBBBHHBB2x'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, srcaddr, dstaddr, nexthop, input_, output,
dpkts, doctets, first, last, srcport, dstport,
tcp_flags, prot, tos, src_as, dst_as, src_mask,
dst_mask):
self.srcaddr = srcaddr
self.dstaddr = dstaddr
self.nexthop = nexthop
self.input = input_
self.output = output
self.dpkts = dpkts
self.doctets = doctets
self.first = first
self.last = last
self.srcport = srcport
self.dstport = dstport
self.tcp_flags = tcp_flags
self.prot = prot
self.tos = tos
self.src_as = src_as
self.dst_as = dst_as
self.src_mask = src_mask
self.dst_mask = dst_mask
@classmethod
def parser(cls, buf, offset):
(srcaddr, dstaddr, nexthop, input_, output, dpkts, doctets,
first, last, srcport, dstport, tcp_flags, prot, tos, src_as,
dst_as, src_mask, dst_mask) = struct.unpack_from(
cls._PACK_STR, buf, offset)
msg = cls(srcaddr, dstaddr, nexthop, input_, output, dpkts,
doctets, first, last, srcport, dstport, tcp_flags,
prot, tos, src_as, dst_as, src_mask, dst_mask)
return msg
| apache-2.0 | 208,989,595,103,367,400 | 31.072 | 70 | 0.607383 | false |
bq-xiao/mongo-python-driver | test/test_replica_set_reconfig.py | 18 | 5751 | # Copyright 2013-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test clients and replica set configuration changes, using mocks."""
import sys
sys.path[0:0] = [""]
from pymongo.errors import ConnectionFailure, AutoReconnect
from pymongo import ReadPreference
from test import unittest, client_context, client_knobs, MockClientTest
from test.pymongo_mocks import MockClient
from test.utils import wait_until
@client_context.require_connection
def setUpModule():
pass
class TestSecondaryBecomesStandalone(MockClientTest):
# An administrator removes a secondary from a 3-node set and
# brings it back up as standalone, without updating the other
# members' config. Verify we don't continue using it.
def test_client(self):
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='a:1,b:2,c:3',
replicaSet='rs',
serverSelectionTimeoutMS=100)
# MongoClient connects to primary by default.
wait_until(lambda: c.address is not None, 'connect to primary')
self.assertEqual(c.address, ('a', 1))
# C is brought up as a standalone.
c.mock_members.remove('c:3')
c.mock_standalones.append('c:3')
# Fail over.
c.kill_host('a:1')
c.kill_host('b:2')
# Force reconnect.
c.close()
with self.assertRaises(AutoReconnect):
c.db.command('ismaster')
self.assertEqual(c.address, None)
def test_replica_set_client(self):
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='a:1,b:2,c:3',
replicaSet='rs')
wait_until(lambda: ('b', 2) in c.secondaries,
'discover host "b"')
wait_until(lambda: ('c', 3) in c.secondaries,
'discover host "c"')
# C is brought up as a standalone.
c.mock_members.remove('c:3')
c.mock_standalones.append('c:3')
wait_until(lambda: set([('b', 2)]) == c.secondaries,
'update the list of secondaries')
self.assertEqual(('a', 1), c.primary)
class TestSecondaryRemoved(MockClientTest):
# An administrator removes a secondary from a 3-node set *without*
# restarting it as standalone.
def test_replica_set_client(self):
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='a:1,b:2,c:3',
replicaSet='rs')
wait_until(lambda: ('b', 2) in c.secondaries, 'discover host "b"')
wait_until(lambda: ('c', 3) in c.secondaries, 'discover host "c"')
# C is removed.
c.mock_ismaster_hosts.remove('c:3')
wait_until(lambda: set([('b', 2)]) == c.secondaries,
'update list of secondaries')
self.assertEqual(('a', 1), c.primary)
class TestSocketError(MockClientTest):
def test_socket_error_marks_member_down(self):
# Disable background refresh.
with client_knobs(heartbeat_frequency=999999):
c = MockClient(
standalones=[],
members=['a:1', 'b:2'],
mongoses=[],
host='a:1',
replicaSet='rs')
wait_until(lambda: len(c.nodes) == 2, 'discover both nodes')
# b now raises socket.error.
c.mock_down_hosts.append('b:2')
self.assertRaises(
ConnectionFailure,
c.db.collection.with_options(
read_preference=ReadPreference.SECONDARY).find_one)
self.assertEqual(1, len(c.nodes))
class TestSecondaryAdded(MockClientTest):
def test_client(self):
c = MockClient(
standalones=[],
members=['a:1', 'b:2'],
mongoses=[],
host='a:1',
replicaSet='rs')
wait_until(lambda: len(c.nodes) == 2, 'discover both nodes')
# MongoClient connects to primary by default.
self.assertEqual(c.address, ('a', 1))
self.assertEqual(set([('a', 1), ('b', 2)]), c.nodes)
# C is added.
c.mock_members.append('c:3')
c.mock_ismaster_hosts.append('c:3')
c.close()
c.db.command('ismaster')
self.assertEqual(c.address, ('a', 1))
wait_until(lambda: set([('a', 1), ('b', 2), ('c', 3)]) == c.nodes,
'reconnect to both secondaries')
def test_replica_set_client(self):
c = MockClient(
standalones=[],
members=['a:1', 'b:2'],
mongoses=[],
host='a:1',
replicaSet='rs')
wait_until(lambda: ('a', 1) == c.primary, 'discover the primary')
wait_until(lambda: set([('b', 2)]) == c.secondaries,
'discover the secondary')
# C is added.
c.mock_members.append('c:3')
c.mock_ismaster_hosts.append('c:3')
wait_until(lambda: set([('b', 2), ('c', 3)]) == c.secondaries,
'discover the new secondary')
self.assertEqual(('a', 1), c.primary)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -5,490,628,702,185,979,000 | 30.086486 | 74 | 0.56738 | false |
rbooth200/DiscEvolution | DiscEvolution/internal_photo.py | 1 | 30463 | # internal_photo.py
#
# Author: A. Sellek
# Date: 12 - Aug - 2020
#
# Implementation of Photoevaporation Models
################################################################################
import numpy as np
import argparse
import json
import matplotlib.pyplot as plt
from DiscEvolution.constants import *
from DiscEvolution.star import PhotoStar
from scipy.signal import argrelmin
class NotHoleError(Exception):
"""Raised if finds an outer edge, not a hole"""
pass
class PhotoBase():
def __init__(self, disc, Regime=None, Type=None):
# Basic mass loss properties
self._regime = Regime # EUV or X-ray
self._type = Type # 'Primordial' or 'InnerHole'
self._Sigmadot = np.zeros_like(disc.R)
self.mdot_XE(disc.star)
# Evolutionary state flags
self._Hole = False # Has the hole started to open?
self._reset = False # Have we needed to reset a decoy hole?
self._empty = False # When no longer a valid hole radius or all below density threshold
self._Thin = False # Is the hole exposed (ie low column density to star)?
# Parameters of hole
self._R_hole = None
self._N_hole = None
# The column density threshold below which the inner disc is "Thin"
if self._regime=='X-ray':
self._N_crit = 1e22
elif self._regime=='EUV':
self._N_crit = 1e18
else:
self._N_crit = 0.0 # (if 0, can never switch)
# Outer radius
self._R_out = max(disc.R_edge)
def mdot_XE(self, star, Mdot=0):
# Generic wrapper for initiating X-ray or EUV mass loss
# Without prescription, mass loss is 0
self._Mdot = Mdot
self._Mdot_true = Mdot
def Sigma_dot(self, R, star):
if self._type=='Primordial':
self.Sigma_dot_Primordial(R, star)
elif self._type=='InnerHole':
self.Sigma_dot_InnerHole(R, star)
def Sigma_dot_Primordial(self, R, star, ret=False):
# Without prescription, mass loss is 0
if ret:
return np.zeros(len(R)+1)
else:
self._Sigmadot = np.zeros_like(R)
def Sigma_dot_InnerHole(self, R, star, ret=False):
# Without prescription, mass loss is 0
if ret:
return np.zeros(len(R)+1)
else:
self._Sigmadot = np.zeros_like(R)
def scaled_R(self, R, star):
# Prescriptions may rescale the radius variable
# Without prescription, radius is unscaled
return R
def R_inner(self, star):
# Innermost mass loss
return 0
def check_dt(self, disc, dt):
# Work out the timescale to clear cell
where_photoevap = (self.dSigmadt > 0)
t_w = np.full_like(disc.R,np.inf)
t_w[where_photoevap] = disc.Sigma_G[where_photoevap] / self.dSigmadt[where_photoevap]
# Return minimum value for cells inside outer edge
indisc = (disc.R < self._R_out) * where_photoevap # Prohibit hole outside of mass loss region.
try:
imin = argrelmin(t_w[indisc])[0][0] # Find local minima in clearing time, neglecting outer edge where tails off. Take first to avoid solutions due to noise in dusty outskirts
except IndexError: # If no local minimum, try to find hole as wherever the min is.
imin = np.argmin(t_w[indisc])
# Check against timestep and report
if (dt > t_w[where_photoevap][imin]): # If an entire cell can deplete
#if not self._Hole:
# print("Alert - hole can open after this timestep at {:.2f} AU".format(disc.R[imin]))
# print("Outer radius is currently {:.2f} AU".format(self._R_out))
self._Hole = True # Set hole flag
return t_w[where_photoevap][imin]
def remove_mass(self, disc, dt, external_photo=None):
# Find disc "outer edge" so we can apply mass loss only inside
if external_photo:
self._R_out = external_photo._Rot # If external photoevaporation is present, only consider radii inside its influence
else:
self._R_out = disc.Rout(thresh=1e-10)
if disc.Rout()==0.0:
print("Disc everywhere below density threshold. Declare Empty.")
self._empty = True
# Check whether hole can open
if not self._Hole: #self._type=='Primordial':
self.check_dt(disc, dt)
# Determine mass loss
dSigma = np.minimum(self.dSigmadt * dt, disc.Sigma_G) # Limit mass loss to density of cell
dSigma *= (disc.R < self._R_out) # Only apply mass loss inside disc outer edge
# Apply, preserving the dust mass
if hasattr(disc, 'Sigma_D'):
Sigma_D = disc.Sigma_D # Save the dust density
disc._Sigma -= dSigma
if hasattr(disc, 'Sigma_D'):
dusty = Sigma_D.sum(0)>0
disc.dust_frac[:,dusty] = np.fmin(Sigma_D[:,dusty]/disc.Sigma[dusty],disc.dust_frac[:,dusty]/disc.dust_frac.sum(0)[dusty])
disc.dust_frac[:] /= np.maximum(disc.dust_frac.sum(0), 1.0) # Renormalise to 1 if it exceeds
# Calculate actual mass loss given limit
if dt>0:
dM = 2*np.pi * disc.R * dSigma
self._Mdot_true = np.trapz(dM,disc.R) / dt * AU**2 / Msun
def get_Rhole(self, disc, external_photo=None):
"""Deal with calls when there is no hole"""
if not self._Hole:
print("No hole for which to get radius. Ignoring command and returning nans.")
return np.nan, np.nan
"""Otherwise continue on to find hole
First find outer edge of disc - hole must be inside this"""
if external_photo:
self._R_out = external_photo._Rot # If external photoevaporation is present, only consider radii inside its influence
else:
self._R_out = disc.Rout(thresh=1e-10)
where_photoevap = (self.dSigmadt > 0)
indisc = (disc.R < self._R_out) * where_photoevap # Prohibit hole outside of mass loss region.
empty_indisc = (disc.Sigma_G <= 1e-10) * indisc # Consider empty if below 10^-10 g/cm^2
try:
if np.sum(empty_indisc) == 0: # If none in disc are empty
minima = argrelmin(disc.Sigma_G)
if len(minima[0]) > 0:
i_hole_out = minima[0][0] # Position of hole is minimum density
else: # No empty cells anymore - disc has cleared to outside
raise NotHoleError
else:
# First find the inner edge of the innermost hole
i_hole_in = np.nonzero(empty_indisc)[0][0]
# The hole cell is defined as the one inside the first non-empty cell outside the inner edge of the hole
outer_disc = ~empty_indisc * (disc.R>disc.R_edge[i_hole_in])
if np.sum(outer_disc) > 0:
i_hole_out = np.nonzero(outer_disc)[0][0] - 1
else: # No non-empty cells outside this - this is not a hole, but an outer edge.
raise NotHoleError
if i_hole_out == np.nonzero(indisc)[0][-1]: # This is not a hole, but the outermost photoevaporating cell
raise NotHoleError
"""If hole position drops by an order of magnitude, it is likely that the previous was really the clearing of low surface density material in the outer disc, so reset"""
if self._R_hole:
R_old = self._R_hole
if disc.R_edge[i_hole_out+1]/R_old<0.1:
self._reset = True
"""If everything worked, update hole properties"""
if not self._R_hole:
print("Hole opened at {:.2f} AU".format(disc.R_edge[i_hole_out+1]))
self._R_hole = disc.R_edge[i_hole_out+1]
self._N_hole = disc.column_density[i_hole_out]
# Test whether Thin
if (self._N_hole < self._N_crit):
self._Thin = True
except NotHoleError:
"""Potential hole isn't a hole but an outer edge"""
if self._type == 'Primordial':
self._Hole = False
self._reset = True
if self._R_hole:
print("No hole found")
print("Last known location {} AU".format(self._R_hole))
return 0, 0
elif self._type == 'InnerHole':
if not self._empty:
print("Transition Disc has cleared to outside")
self._empty = True
# Proceed as usual to report but without update
# Save state if tracking
return self._R_hole, self._N_hole
@property
def Mdot(self):
return self._Mdot
@property
def dSigmadt(self):
return self._Sigmadot
def __call__(self, disc, dt, external_photo=None):
# For inner hole discs, need to update the hole radius and then the mass-loss as the normalisation changes based on R, not just x~R-Rhole.
if self._type=='InnerHole':
self.get_Rhole(disc)
self.Sigma_dot(disc.R_edge, disc.star)
# Remove the mass
self.remove_mass(disc,dt, external_photo)
# Check for new holes
if self._Hole and not self._Thin: # If there is a hole but the inner disc is not already optically thin, update its properties
R_hole, N_hole = self.get_Rhole(disc, external_photo)
# Check if hole is now large enough that inner disc optically thin, switch internal photoevaporation to direct field if so
if self._Thin:
print("Column density to hole has fallen to N = {} < {} g cm^-2".format(N_hole,self._N_crit))
self._type = 'InnerHole'
# Run the mass loss rates to update the table
self.mdot_XE(disc.star)
self.Sigma_dot(disc.R_edge, disc.star)
# Report
print("At initiation of InnerHole Type, M_D = {} M_J, Mdot = {}, t_clear ~ {} yr".format(disc.Mtot()/Mjup, self._Mdot, disc.Mtot()/Msun/self._Mdot))
def ASCII_header(self):
return ("# InternalEvaporation, Type: {}, Mdot: {}"
"".format(self._type+self.__class__.__name__,self._Mdot))
def HDF5_attributes(self):
header = {}
header['Type'] = self._type+"/"+self._regime
header['Mdot'] = '{}'.format(self._Mdot)
return self.__class__.__name__, header
#################################################################################
"""""""""
X-ray dominated photoevaporation
-Following prescription of Owen, Ercolano and Clarke (2012)
-Following prescription of Picogna, Ercolano, Owen and Weber (2019)
"""""""""
#################################################################################
"""Owen, Ercolano and Clarke (2012)"""
class XrayDiscOwen(PhotoBase):
def __init__(self, disc, Type='Primordial', R_hole=None):
super().__init__(disc, Regime='X-ray', Type=Type)
# Parameters for Primordial mass loss profile
self._a1 = 0.15138
self._b1 = -1.2182
self._c1 = 3.4046
self._d1 = -3.5717
self._e1 = -0.32762
self._f1 = 3.6064
self._g1 = -2.4918
# Parameters for Inner Hole mass loss profile
self._a2 = -0.438226
self._b2 = -0.10658387
self._c2 = 0.5699464
self._d2 = 0.010732277
self._e2 = -0.131809597
self._f2 = -1.32285709
# If initiating with an Inner Hole disc, need to update properties
if self._type == 'InnerHole':
self._Hole = True
self._R_hole = R_hole
#self.get_Rhole(disc)
# Run the mass loss rates to update the table
self.Sigma_dot(disc.R_edge, disc.star)
def mdot_XE(self, star, Mdot=None):
# In Msun/yr
if Mdot is not None:
self._Mdot = Mdot
elif self._type=='Primordial':
self._Mdot = 6.25e-9 * star.M**(-0.068) * (star.L_X / 1e30)**(1.14) # Equation B1
elif self._type=='InnerHole':
self._Mdot = 4.8e-9 * star.M**(-0.148) * (star.L_X / 1e30)**(1.14) # Equation B4
else:
raise NotImplementedError("Disc is of unrecognised type, and no mass-loss rate has been manually specified")
self._Mdot_true = self._Mdot
def scaled_R(self, R, star):
# Where R in AU
x = 0.85 * R / star.M # Equation B3
if self._Hole:
y = 0.95 * (R-self._R_hole) / star.M # Equation B6
else:
y = R
return x, y
def R_inner(self, star):
# Innermost mass loss
return 0.7 / 0.85 * star.M
def Sigma_dot_Primordial(self, R, star, ret=False):
# Equation B2
Sigmadot = np.zeros_like(R)
x, y = self.scaled_R(R,star)
where_photoevap = (x >= 0.7) * (x<=99) # No mass loss close to star, mass loss prescription becomes negative at log10(x)=1.996
logx = np.log(x[where_photoevap])
log10 = np.log(10)
log10x = logx/log10
# First term
exponent = self._a1 * log10x**6 + self._b1 * log10x**5 + self._c1 * log10x**4 + self._d1 * log10x**3 + self._e1 * log10x**2 + self._f1 * log10x + self._g1
t1 = 10**exponent
# Second term
terms = 6*self._a1*logx**5/log10**7 + 5*self._b1*logx**4/log10**6 + 4*self._c1*logx**3/log10**5 + 3*self._d1*logx**2/log10**4 + 2*self._e1*logx/log10**3 + self._f1/log10**2
t2 = terms/x[where_photoevap]**2
# Third term
t3 = np.exp(-(x[where_photoevap]/100)**10)
# Combine terms
Sigmadot[where_photoevap] = t1 * t2 * t3
# Work out total mass loss rate for normalisation
M_dot = 2*np.pi * R * Sigmadot
total = np.trapz(M_dot,R)
# Normalise, convert to cgs
Sigmadot = np.maximum(Sigmadot,0)
Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
def Sigma_dot_InnerHole(self, R, star, ret=False):
# Equation B5
Sigmadot = np.zeros_like(R)
x, y = self.scaled_R(R,star)
where_photoevap = (y >= 0.0) # No mass loss inside hole
use_y = y[where_photoevap]
# Exponent of second term
exp2 = -(use_y/57)**10
# Numerator
terms = self._a2*self._b2 * np.exp(self._b2*use_y+exp2) + self._c2*self._d2 * np.exp(self._d2*use_y+exp2) + self._e2*self._f2 * np.exp(self._f2*use_y+exp2)
# Divide by Denominator
Sigmadot[where_photoevap] = terms/R[where_photoevap]
# Work out total mass loss rate for normalisation
M_dot = 2*np.pi * R * Sigmadot
total = np.trapz(M_dot,R)
# Normalise, convert to cgs
Sigmadot = np.maximum(Sigmadot,0)
Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr
# Mopping up in the gap
mop_up = (x >= 0.7) * (y < 0.0)
Sigmadot[mop_up] = np.inf
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
"""Picogna, Ercolano, Owen and Weber (2019)"""
class XrayDiscPicogna(PhotoBase):
def __init__(self, disc, Type='Primordial', R_hole=None):
super().__init__(disc, Regime='X-ray', Type=Type)
# Parameters for Primordial mass loss profile
self._a1 = -0.5885
self._b1 = 4.3130
self._c1 = -12.1214
self._d1 = 16.3587
self._e1 = -11.4721
self._f1 = 5.7248
self._g1 = -2.8562
# Parameters for Inner Hole mass loss profile
self._a2 = 0.11843
self._b2 = 0.99695
self._c2 = 0.48835
# If initiating with an Inner Hole disc, need to update properties
if self._type == 'InnerHole':
self._Hole = True
self._R_hole = R_hole
#self.get_Rhole(disc)
# Run the mass loss rates to update the table
self.Sigma_dot(disc.R_edge, disc.star)
def mdot_XE(self, star, Mdot=None):
# In Msun/yr
if Mdot is not None:
self._Mdot = Mdot
elif self._type=='Primordial':
logMd = -2.7326 * np.exp((np.log(np.log(star.L_X)/np.log(10))-3.3307)**2/-2.9868e-3) - 7.2580 # Equation 5
self._Mdot = 10**logMd
elif self._type=='InnerHole':
logMd = -2.7326 * np.exp((np.log(np.log(star.L_X)/np.log(10))-3.3307)**2/-2.9868e-3) - 7.2580 # 1.12 * Equation 5
self._Mdot = 1.12 * (10**logMd)
else:
raise NotImplementedError("Disc is of unrecognised type, and no mass-loss rate has been manually specified")
self._Mdot_true = self._Mdot
def scaled_R(self, R, star):
# Where R in AU
# All are divided by stellar mass normalised to 0.7 Msun (value used by Picogna+19) to represent rescaling by gravitational radius
x = R / (star.M/0.7)
if self._Hole:
y = (R-self._R_hole) / (star.M/0.7) # Equation B6
else:
y = R / (star.M/0.7)
return x, y
def R_inner(self, star):
# Innermost mass loss
if self._type=='Primordial':
return 0 # Mass loss possible throughout
elif self._type=='InnerHole':
return self._R_hole # Mass loss profile applies outside hole
else:
return 0 # If unspecified, assume mass loss possible throughout
def Sigma_dot_Primordial(self, R, star, ret=False):
# Equation B2
Sigmadot = np.zeros_like(R)
x, y = self.scaled_R(R,star)
where_photoevap = (x<=137) # Mass loss prescription becomes negative at x=1.3785
logx = np.log(x[where_photoevap])
log10 = np.log(10)
log10x = logx/log10
# First term
exponent = self._a1 * log10x**6 + self._b1 * log10x**5 + self._c1 * log10x**4 + self._d1 * log10x**3 + self._e1 * log10x**2 + self._f1 * log10x + self._g1
t1 = 10**exponent
# Second term
terms = 6*self._a1*log10x**5 + 5*self._b1*log10x**4 + 4*self._c1*log10x**3 + 3*self._d1*log10x**2 + 2*self._e1*log10x + self._f1
t2 = terms/(2*np.pi*x[where_photoevap]**2)
# Combine terms
Sigmadot[where_photoevap] = t1 * t2
# Work out total mass loss rate for normalisation
M_dot = 2*np.pi * R * Sigmadot
total = np.trapz(M_dot,R)
# Normalise, convert to cgs
Sigmadot = np.maximum(Sigmadot,0)
Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
def Sigma_dot_InnerHole(self, R, star, ret=False):
# Equation B5
Sigmadot = np.zeros_like(R)
x, y = self.scaled_R(R,star)
where_photoevap = (y > 0.0) * (y < -self._c2/np.log(self._b2)) # No mass loss inside hole, becomes negative at x=-c/ln(b)
use_y = y[where_photoevap]
# Numerator
terms = self._a2 * np.power(self._b2,use_y) * np.power(use_y,self._c2-1) * (use_y * np.log(self._b2) + self._c2)
# Divide by Denominator
Sigmadot[where_photoevap] = terms/(2*np.pi*R[where_photoevap])
# Work out total mass loss rate for normalisation
M_dot = 2*np.pi * R * Sigmadot
total = np.trapz(M_dot,R)
# Normalise, convert to cgs
Sigmadot = np.maximum(Sigmadot,0)
Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr
# Mopping up in the gap - assume usual primordial rates there.
Sigmadot[(y<=0.0) * (x<=137)] = self.Sigma_dot_Primordial(R, star, ret=True)[(y<=0.0)*(x<=137)]/1.12 # divide by 1.12 so that normalise to correct mass loss rate
mop_up = (x > 137) * (y < 0.0)
Sigmadot[mop_up] = np.inf # Avoid having discontinuous mass-loss by filling in the rest
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
#################################################################################
"""""""""
EUV dominated photoevaporation
-Following prescription given in Alexander and Armitage (2007)
and based on Font, McCarthy, Johnstone and Ballantyne (2004) for Primordial Discs
and based on Alexander, Clarke and Pringle (2006) for Inner Hole Discs
"""""""""
#################################################################################
class EUVDiscAlexander(PhotoBase):
def __init__(self, disc, Type='Primordial', R_hole=None):
super().__init__(disc, Regime='EUV', Type=Type)
# Parameters for mass loss profiles
self._cs = 10 # Sound speed in km s^-1
self._RG = disc.star.M / (self._cs*1e5 /Omega0/AU)**2 # Gravitational Radius in AU
self._mu = 1.35
self._aB = 2.6e-13 # Case B Recombination coeff. in cm^3 s^-1
self._C1 = 0.14
self._A = 0.3423
self._B = -0.3612
self._D = 0.2457
self._C2 = 0.235
self._a = 2.42
h = disc.H/disc.R
he = np.empty_like(disc.R_edge)
he[1:-1] = 0.5*(h[1:] + h[-1:])
he[0] = 1.5*h[0] - 0.5*h[1]
he[-1] = 1.5*h[-1] - 0.5*h[-2]
self._h = he
# If initiating with an Inner Hole disc, need to update properties
if self._type == 'InnerHole':
self._Hole = True
self._R_hole = R_hole
#self.get_Rhole(disc)
# Run the mass loss rates to update the table
self.Sigma_dot(disc.R_edge, disc.star)
def mdot_XE(self, star, Mdot=0):
# Store Mdot calculated from profile
self._Mdot = Mdot # In Msun/yr
self._Mdot_true = self._Mdot
def scaled_R(self, R, star):
if self._type=='Primordial':
return R / self._RG # Normalise to RG
elif self._type=='InnerHole':
return R / self.R_inner() # Normalise to inner edge
else:
return R # If unspecified, don't modify
def R_inner(self):
# Innermost mass loss
if self._type=='Primordial':
return 0.1 * self._RG # Mass loss profile is only positive for >0.1 RG
elif self._type=='InnerHole':
return self._R_hole # Mass loss profile applies outside hole
else:
return 0 # If unspecified, assume mass-loss possible throughout
def Sigma_dot_Primordial(self, R, star, ret=False):
Sigmadot = np.zeros_like(R)
x = self.scaled_R(R,star)
where_photoevap = (x >= 0.1) # No mass loss close to star
# Equation A3
nG = self._C1 * (3 * star.Phi / (4*np.pi * (self._RG*AU)**3 * self._aB))**(1/2) # cm^-3
# Equation A2
n0 = nG * (2 / (x**7.5 + x**12.5))**(1/5)
# Equation A4
u1 = self._cs*1e5*yr/Omega0 * self._A * np.exp(self._B * (x-0.1)) * (x-0.1)**self._D # cm yr^-1
# Combine terms (Equation A1)
Sigmadot[where_photoevap] = 2 * self._mu * m_H * (n0 * u1)[where_photoevap] # g cm^-2 /yr
Sigmadot = np.maximum(Sigmadot,0)
# Work out total mass loss rate
dMdot = 2*np.pi * R * Sigmadot
Mdot = np.trapz(dMdot,R) # g yr^-1 (AU/cm)^2
# Normalise, convert to cgs
Mdot = Mdot * AU**2/Msun # g yr^-1
# Store result
self.mdot_XE(star, Mdot=Mdot)
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
def Sigma_dot_InnerHole(self, R, star, ret=False):
Sigmadot = np.zeros_like(R)
x = self.scaled_R(R,star)
where_photoevap = (x > 1) # No mass loss inside hole
# Combine terms (Equation A5)
Sigmadot[where_photoevap] = (2 * self._mu * m_H * self._C2 * self._cs*1e5*yr/Omega0 * (star.Phi / (4*np.pi * (self.R_inner()*AU)**3 * self._aB * self._h))**(1/2) * x**(-self._a))[where_photoevap] # g cm^-2 /yr
Sigmadot = np.maximum(Sigmadot,0)
# Work out total mass loss rate
dMdot = 2*np.pi * R * Sigmadot
Mdot = np.trapz(dMdot,R) # g yr^-1 (AU/cm)^2
# Normalise, convert to cgs
Mdot = Mdot * AU**2/Msun # g yr^-1
# Store result
self.mdot_XE(star, Mdot=Mdot)
# Mopping up in the gap
mop_up = (R >= 0.1 * self._RG) * (x <= 1.0)
Sigmadot[mop_up] = np.inf
if ret:
# Return unaveraged values at cell edges
return Sigmadot
else:
# Store values as average of mass loss rate at cell edges
self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2
#################################################################################
"""""""""
Functions for running as main
Designed for plotting to test things out
"""""""""
#################################################################################
class DummyDisc(object):
def __init__(self, R, star, MD=10, RC=100):
self._M = MD * Mjup
self.Rc = RC
self.R_edge = R
self.R = 0.5*(self.R_edge[1:]+self.R_edge[:-1])
self._Sigma = self._M / (2 * np.pi * self.Rc * self.R * AU**2) * np.exp(-self.R/self.Rc)
self.star = star
def Rout(self, thresh=None):
return max(self.R_edge)
@property
def Sigma(self):
return self._Sigma
@property
def Sigma_G(self):
return self._Sigma
def main():
Sigma_dot_plot()
Test_Removal()
def Test_Removal():
"""Removes gas fom a power law disc in regular timesteps without viscous evolution etc"""
star1 = PhotoStar(LX=1e30, M=1.0, R=2.5, T_eff=4000)
R = np.linspace(0.1,200,2000)
disc1 = DummyDisc(R, star1, RC=10)
internal_photo = XrayDiscPicogna(disc1)
plt.figure()
for t in np.linspace(0,2e3,6):
internal_photo(disc1, 2e3)
plt.loglog(0.5*(R[1:]+R[:-1]), disc1.Sigma, label='{}'.format(t))
plt.xlabel("R / AU")
plt.ylabel("$\Sigma_G~/~\mathrm{g~cm^{-2}}$")
plt.legend(title='Time / yr')
plt.show()
def Sigma_dot_plot():
"""Plot a comparison of the mass loss rate prescriptions"""
from control_scripts import run_model
# Set up dummy model
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", type=str, default=DefaultModel)
args = parser.parse_args()
model = json.load(open(args.model, 'r'))
plt.figure(figsize=(6,6))
starX = PhotoStar(LX=1e30, M=model['star']['mass'], R=model['star']['radius'], T_eff=model['star']['T_eff'])
starE = PhotoStar(Phi=1e42, M=model['star']['mass'], R=model['star']['radius'], T_eff=model['star']['T_eff'])
disc = run_model.setup_disc(model)
R = disc.R
# Calculate EUV rates
disc._star = starE
internal_photo_E = EUVDiscAlexander(disc)
Sigma_dot_E = internal_photo_E.dSigmadt
photoevaporating_E = (Sigma_dot_E>0)
t_w_E = disc.Sigma[photoevaporating_E] / Sigma_dot_E[photoevaporating_E]
print("Mdot maximum at R = {} AU".format(R[np.argmax(Sigma_dot_E)]))
print("Time minimum at R = {} AU".format(R[photoevaporating_E][np.argmin(t_w_E)]))
plt.loglog(R, Sigma_dot_E, label='EUV (AA07), $\Phi={}~\mathrm{{s^{{-1}}}}$'.format(1e42), linestyle='--')
# Calculate X-ray rates
disc._star = starX
internal_photo_X = XrayDiscOwen(disc)
Sigma_dot_X = internal_photo_X.dSigmadt
photoevaporating_X = (Sigma_dot_X>0)
t_w_X = disc.Sigma[photoevaporating_X] / Sigma_dot_X[photoevaporating_X]
print("Mdot maximum at R = {} AU".format(R[np.argmax(Sigma_dot_X)]))
print("Time minimum at R = {} AU".format(R[photoevaporating_X][np.argmin(t_w_X)]))
plt.loglog(R, Sigma_dot_X, label='X-ray (OEC12), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30))
# Calculate X-ray rates
disc._star = starX
internal_photo_X2 = XrayDiscPicogna(disc)
Sigma_dot_X2 = internal_photo_X2.dSigmadt
photoevaporating_X2 = (Sigma_dot_X2>0)
t_w_X2 = disc.Sigma[photoevaporating_X2] / Sigma_dot_X2[photoevaporating_X2]
print("Mdot maximum at R = {} AU".format(R[np.argmax(Sigma_dot_X2)]))
print("Time minimum at R = {} AU".format(R[photoevaporating_X2][np.argmin(t_w_X2)]))
plt.loglog(R, Sigma_dot_X2, label='X-ray (PEOW19), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30))
# Plot mass loss rates
plt.xlabel("R / AU")
plt.ylabel("$\dot{\Sigma}_{\\rm w}$ / g cm$^{-2}$ yr$^{-1}$")
plt.xlim([0.1,1000])
plt.ylim([1e-8,1e-2])
plt.legend()
plt.show()
# Plot depletion time
plt.figure(figsize=(6,6))
plt.loglog(R[photoevaporating_E], t_w_E, label='EUV (AA07), $\Phi={}~\mathrm{{s^{{-1}}}}$'.format(1e42), linestyle='--')
plt.loglog(R[photoevaporating_X], t_w_X, label='X-ray (OEC12), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30))
plt.loglog(R[photoevaporating_X2], t_w_X2, label='X-ray (PEOW19), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30))
plt.xlabel("R / AU")
plt.ylabel("$t_w / \mathrm{yr}$")
plt.xlim([0.1,1000])
plt.ylim([1e4,1e12])
plt.legend()
plt.show()
if __name__ == "__main__":
# Set extra things
DefaultModel = "../control_scripts/DiscConfig_default.json"
plt.rcParams['text.usetex'] = "True"
plt.rcParams['font.family'] = "serif"
main()
| gpl-3.0 | 6,671,760,707,096,831,000 | 39.082895 | 218 | 0.549782 | false |
puttarajubr/commcare-hq | corehq/apps/hqadmin/system_info/checks.py | 2 | 3064 | from django.core import cache
from django.conf import settings
from django.utils.safestring import mark_safe
from restkit import Resource
import json
from corehq.apps.hqadmin.system_info.utils import human_bytes
from soil import heartbeat
def check_redis():
#redis status
ret = {}
redis_status = ""
redis_results = ""
if 'redis' in settings.CACHES:
rc = cache.get_cache('redis')
try:
import redis
redis_api = redis.StrictRedis.from_url('redis://%s' % rc._server)
info_dict = redis_api.info()
redis_status = "Online"
redis_results = "Used Memory: %s" % info_dict['used_memory_human']
except Exception, ex:
redis_status = "Offline"
redis_results = "Redis connection error: %s" % ex
else:
redis_status = "Not Configured"
redis_results = "Redis is not configured on this system!"
ret['redis_status'] = redis_status
ret['redis_results'] = redis_results
return ret
def check_rabbitmq():
ret ={}
mq_status = "Unknown"
if settings.BROKER_URL.startswith('amqp'):
amqp_parts = settings.BROKER_URL.replace('amqp://','').split('/')
mq_management_url = amqp_parts[0].replace('5672', '15672')
vhost = amqp_parts[1]
try:
mq = Resource('http://%s' % mq_management_url, timeout=2)
vhost_dict = json.loads(mq.get('api/vhosts', timeout=2).body_string())
mq_status = "Offline"
for d in vhost_dict:
if d['name'] == vhost:
mq_status='RabbitMQ OK'
except Exception, ex:
mq_status = "RabbitMQ Error: %s" % ex
else:
mq_status = "RabbitMQ Not configured"
ret['rabbitmq_status'] = mq_status
return ret
def check_celery_health():
ret = {}
celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None)
worker_status = ""
if celery_monitoring:
cresource = Resource(celery_monitoring, timeout=3)
all_workers = {}
try:
t = cresource.get("api/workers").body_string()
all_workers = json.loads(t)
except Exception, ex:
pass
worker_ok = '<span class="label label-success">OK</span>'
worker_bad = '<span class="label label-important">Down</span>'
tasks_ok = 'label-success'
tasks_full = 'label-warning'
worker_info = []
for hostname, w in all_workers.items():
status_html = mark_safe(worker_ok if w['status'] else worker_bad)
tasks_class = tasks_full if w['running_tasks'] == w['concurrency'] else tasks_ok
tasks_html = mark_safe('<span class="label %s">%d / %d</span> :: %d' % (tasks_class, w['running_tasks'], w['concurrency'], w['completed_tasks']))
worker_info.append(' '.join([hostname, status_html, tasks_html]))
worker_status = '<br>'.join(worker_info)
ret['worker_status'] = mark_safe(worker_status)
ret['heartbeat'] = heartbeat.is_alive()
return ret
| bsd-3-clause | 6,678,510,842,359,630,000 | 34.627907 | 157 | 0.587467 | false |
phalax4/CarnotKE | jyhton/lib-python/2.7/test/test_abc.py | 119 | 7715 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Unit tests for abc.py."""
import unittest, weakref
from test import test_support
import abc
from inspect import isabstract
class TestABC(unittest.TestCase):
def test_abstractmethod_basics(self):
@abc.abstractmethod
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
def test_abstractproperty_basics(self):
@abc.abstractproperty
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
class C:
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def foo(self): return 3
class D(C):
@property
def foo(self): return super(D, self).foo
self.assertEqual(D().foo, 3)
def test_abstractmethod_integration(self):
for abstractthing in [abc.abstractmethod, abc.abstractproperty]:
class C:
__metaclass__ = abc.ABCMeta
@abstractthing
def foo(self): pass # abstract
def bar(self): pass # concrete
self.assertEqual(C.__abstractmethods__, set(["foo"]))
self.assertRaises(TypeError, C) # because foo is abstract
self.assertTrue(isabstract(C))
class D(C):
def bar(self): pass # concrete override of concrete
self.assertEqual(D.__abstractmethods__, set(["foo"]))
self.assertRaises(TypeError, D) # because foo is still abstract
self.assertTrue(isabstract(D))
class E(D):
def foo(self): pass
self.assertEqual(E.__abstractmethods__, set())
E() # now foo is concrete, too
self.assertFalse(isabstract(E))
class F(E):
@abstractthing
def bar(self): pass # abstract override of concrete
self.assertEqual(F.__abstractmethods__, set(["bar"]))
self.assertRaises(TypeError, F) # because bar is abstract now
self.assertTrue(isabstract(F))
def test_subclass_oldstyle_class(self):
class A:
__metaclass__ = abc.ABCMeta
class OldstyleClass:
pass
self.assertFalse(issubclass(OldstyleClass, A))
self.assertFalse(issubclass(A, OldstyleClass))
def test_isinstance_class(self):
class A:
__metaclass__ = abc.ABCMeta
class OldstyleClass:
pass
self.assertFalse(isinstance(OldstyleClass, A))
self.assertTrue(isinstance(OldstyleClass, type(OldstyleClass)))
self.assertFalse(isinstance(A, OldstyleClass))
# This raises a recursion depth error, but is low-priority:
# self.assertTrue(isinstance(A, abc.ABCMeta))
def test_registration_basics(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
pass
b = B()
self.assertFalse(issubclass(B, A))
self.assertFalse(issubclass(B, (A,)))
self.assertNotIsInstance(b, A)
self.assertNotIsInstance(b, (A,))
A.register(B)
self.assertTrue(issubclass(B, A))
self.assertTrue(issubclass(B, (A,)))
self.assertIsInstance(b, A)
self.assertIsInstance(b, (A,))
class C(B):
pass
c = C()
self.assertTrue(issubclass(C, A))
self.assertTrue(issubclass(C, (A,)))
self.assertIsInstance(c, A)
self.assertIsInstance(c, (A,))
def test_isinstance_invalidation(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
pass
b = B()
self.assertFalse(isinstance(b, A))
self.assertFalse(isinstance(b, (A,)))
A.register(B)
self.assertTrue(isinstance(b, A))
self.assertTrue(isinstance(b, (A,)))
def test_registration_builtins(self):
class A:
__metaclass__ = abc.ABCMeta
A.register(int)
self.assertIsInstance(42, A)
self.assertIsInstance(42, (A,))
self.assertTrue(issubclass(int, A))
self.assertTrue(issubclass(int, (A,)))
class B(A):
pass
B.register(basestring)
self.assertIsInstance("", A)
self.assertIsInstance("", (A,))
self.assertTrue(issubclass(str, A))
self.assertTrue(issubclass(str, (A,)))
def test_registration_edge_cases(self):
class A:
__metaclass__ = abc.ABCMeta
A.register(A) # should pass silently
class A1(A):
pass
self.assertRaises(RuntimeError, A1.register, A) # cycles not allowed
class B(object):
pass
A1.register(B) # ok
A1.register(B) # should pass silently
class C(A):
pass
A.register(C) # should pass silently
self.assertRaises(RuntimeError, C.register, A) # cycles not allowed
C.register(B) # ok
def test_register_non_class(self):
class A(object):
__metaclass__ = abc.ABCMeta
self.assertRaisesRegexp(TypeError, "Can only register classes",
A.register, 4)
def test_registration_transitiveness(self):
class A:
__metaclass__ = abc.ABCMeta
self.assertTrue(issubclass(A, A))
self.assertTrue(issubclass(A, (A,)))
class B:
__metaclass__ = abc.ABCMeta
self.assertFalse(issubclass(A, B))
self.assertFalse(issubclass(A, (B,)))
self.assertFalse(issubclass(B, A))
self.assertFalse(issubclass(B, (A,)))
class C:
__metaclass__ = abc.ABCMeta
A.register(B)
class B1(B):
pass
self.assertTrue(issubclass(B1, A))
self.assertTrue(issubclass(B1, (A,)))
class C1(C):
pass
B1.register(C1)
self.assertFalse(issubclass(C, B))
self.assertFalse(issubclass(C, (B,)))
self.assertFalse(issubclass(C, B1))
self.assertFalse(issubclass(C, (B1,)))
self.assertTrue(issubclass(C1, A))
self.assertTrue(issubclass(C1, (A,)))
self.assertTrue(issubclass(C1, B))
self.assertTrue(issubclass(C1, (B,)))
self.assertTrue(issubclass(C1, B1))
self.assertTrue(issubclass(C1, (B1,)))
C1.register(int)
class MyInt(int):
pass
self.assertTrue(issubclass(MyInt, A))
self.assertTrue(issubclass(MyInt, (A,)))
self.assertIsInstance(42, A)
self.assertIsInstance(42, (A,))
def test_all_new_methods_are_called(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
counter = 0
def __new__(cls):
B.counter += 1
return super(B, cls).__new__(cls)
class C(A, B):
pass
self.assertEqual(B.counter, 0)
C()
self.assertEqual(B.counter, 1)
def test_cache_leak(self):
# See issue #2521.
class A(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def f(self):
pass
class C(A):
def f(self):
A.f(self)
r = weakref.ref(C)
# Trigger cache.
C().f()
del C
test_support.gc_collect()
self.assertEqual(r(), None)
def test_main():
test_support.run_unittest(TestABC)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 838,110,080,379,842,700 | 32.111588 | 77 | 0.558782 | false |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.127/roles/lib_openshift/library/oc_secret.py | 12 | 57672 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/secret -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_secret
short_description: Module to manage openshift secrets
description:
- Manage openshift secrets programmatically.
options:
state:
description:
- If present, the secret will be created if it doesn't exist or updated if different. If absent, the secret will be removed if present. If list, information about the secret will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: default
aliases: []
files:
description:
- A list of files provided for secrets
required: false
default: None
aliases: []
delete_after:
description:
- Whether or not to delete the files after processing them.
required: false
default: false
aliases: []
contents:
description:
- Content of the secrets
required: false
default: None
aliases: []
force:
description:
- Whether or not to force the operation
required: false
default: false
aliases: []
decode:
description:
- base64 decode the object
required: false
default: false
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create secret
oc_secret:
state: present
namespace: openshift-infra
name: metrics-deployer
files:
- name: nothing
path: /dev/null
register: secretout
run_once: true
- name: get ca from hawkular
oc_secret:
state: list
namespace: openshift-infra
name: hawkular-metrics-certificate
decode: True
register: hawkout
run_once: true
- name: Create secrets
oc_secret:
namespace: mynamespace
name: mysecrets
contents:
- path: data.yml
data: "{{ data_content }}"
- path: auth-keys
data: "{{ auth_keys_content }}"
- path: configdata.yml
data: "{{ configdata_content }}"
- path: cert.crt
data: "{{ cert_content }}"
- path: key.pem
data: "{{ osso_site_key_content }}"
- path: ca.cert.pem
data: "{{ ca_cert_content }}"
register: secretout
'''
# -*- -*- -*- End included fragment: doc/secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
% (inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# If vtype is not str then go ahead and attempt to yaml load it.
if isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming ' +
'value. value=[%s] vtype=[%s]'
% (type(inc_value), vtype))
return inc_value
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=module.params['src'],
backup=module.params['backup'],
separator=module.params['separator'])
if module.params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and \
module.params['state'] != 'present':
return {'failed': True,
'msg': 'Error opening file [%s]. Verify that the ' +
'file exists, that it is has correct' +
' permissions, and is valid yaml.'}
if module.params['state'] == 'list':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
rval = yamlfile.get(module.params['key']) or {}
return {'changed': False, 'result': rval, 'state': "list"}
elif module.params['state'] == 'absent':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
rval = yamlfile.pop(module.params['key'],
module.params['value'])
else:
rval = yamlfile.delete(module.params['key'])
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
module.params['value'] is None:
return {'changed': False,
'result': yamlfile.yaml_dict,
'state': "present"}
yamlfile.yaml_dict = content
# we were passed a value; parse it
if module.params['value']:
value = Yedit.parse_value(module.params['value'],
module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
elif module.params['append']:
rval = yamlfile.append(key, value)
else:
rval = yamlfile.put(key, value)
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0],
'result': rval[1], 'state': "present"}
# no edits to make
if module.params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': "present"}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, rname, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource, rname]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
elif rname:
cmd.append(rname)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.args:
err = err.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.secrets = secrets
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_secret.py -*- -*- -*-
# pylint: disable=wrong-import-position,wrong-import-order
import base64
# pylint: disable=too-many-arguments
class OCSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
def __init__(self,
namespace,
secret_name=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
self.decode = decode
def get(self):
'''return a secret by name '''
results = self._get('secrets', self.name)
results['decoded'] = {}
results['exists'] = False
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if self.decode:
if 'data' in results['results'][0]:
for sname, value in results['results'][0]['data'].items():
results['decoded'][sname] = base64.b64decode(value)
if results['returncode'] != 0 and '"%s" not found' % self.name in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a secret by name'''
return self._delete('secrets', self.name)
def create(self, files=None, contents=None):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
return results
def update(self, files, force=False):
'''run update secret
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
secret = self.prep_secret(files)
if secret['returncode'] != 0:
return secret
sfile_path = '/tmp/%s' % self.name
with open(sfile_path, 'w') as sfd:
sfd.write(json.dumps(secret['results']))
atexit.register(Utils.cleanup, [sfile_path])
return self._replace(sfile_path, force=force)
def prep_secret(self, files=None, contents=None):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocsecret = OCSecret(params['namespace'],
params['name'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = ocsecret.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, state: 'list'}
if not params['name']:
return {'failed': True,
'msg': 'Please specify a name when state is absent|present.'}
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], params['name']):
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = ocsecret.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
if state == 'present':
if params['files']:
files = params['files']
elif params['contents']:
files = Utils.create_tmp_files_from_contents(params['contents'])
else:
return {'failed': True,
'msg': 'Either specify files or contents.'}
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True,
'msg': 'Would have performed a create.'}
api_rval = ocsecret.create(files, params['contents'])
# Remove files
if files and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
return {'failed': True,
'msg': api_rval}
return {'changed': True,
'results': api_rval,
'state': 'present'}
########
# Update
########
secret = ocsecret.prep_secret(params['files'], params['contents'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
# Remove files
if files and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
return {'changed': False,
'results': secret['results'],
'state': 'present'}
if check_mode:
return {'changed': True,
'msg': 'Would have performed an update.'}
api_rval = ocsecret.update(files, force=params['force'])
# Remove files
if secret and params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
return {'failed': True,
'msg': api_rval}
return {'changed': True,
'results': api_rval,
'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_secret.py -*- -*- -*-
def main():
'''
ansible oc module for managing OpenShift Secrets
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
force=dict(default=False, type='bool'),
decode=dict(default=False, type='bool'),
),
mutually_exclusive=[["contents", "files"]],
supports_check_mode=True,
)
rval = OCSecret.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_secret.py -*- -*- -*-
| apache-2.0 | 6,445,151,070,839,982,000 | 32.08778 | 244 | 0.526131 | false |
CERNDocumentServer/invenio | modules/bibsword/lib/bibsword_webinterface.py | 1 | 15249 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibSword Web Interface.
"""
from invenio.access_control_engine import(
acc_authorize_action
)
import invenio.bibsword_client as sword_client
from invenio.config import(
CFG_SITE_LANG,
CFG_SITE_URL
)
from invenio.messages import(
gettext_set_language
)
from invenio.webinterface_handler import(
wash_urlargd,
WebInterfaceDirectory
)
from invenio.webpage import(
page
)
from invenio.webuser import(
getUid,
page_not_authorized
)
__lastupdated__ = """$Date$"""
class WebInterfaceSwordClient(WebInterfaceDirectory):
"""Web interface for the BibSword client."""
_exports = [
"",
"servers",
"server_options",
"submissions",
"submission_options",
"submit",
"submit_step_1",
"submit_step_2",
"submit_step_3",
"submit_step_4",
]
def submissions(self, req, form):
"""Web interface for the existing submissions."""
# Check if the user has rights to manage the Sword client
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client/",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"ln": (str, CFG_SITE_LANG),
})
# Get the user ID
uid = getUid(req)
# Set language for i18n text auto generation
ln = argd["ln"]
_ = gettext_set_language(ln)
body = sword_client.perform_request_submissions(
ln
)
navtrail = """
> <a class="navtrail" href="%(CFG_SITE_URL)s/sword_client">%(label)s</a>
""" % {
'CFG_SITE_URL': CFG_SITE_URL,
'label': _("Sword Client"),
}
return page(
title=_("Submissions"),
body=body,
navtrail=navtrail,
lastupdated=__lastupdated__,
req=req,
language=ln
)
def submission_options(self, req, form):
"""Web interface for the options on the submissions."""
# Check if the user has rights to manage the Sword client
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"option": (str, ""),
"action": (str, "submit"),
"server_id": (int, 0),
"status_url": (str, ""),
"ln": (str, CFG_SITE_LANG),
})
if argd["option"] in ("update",):
option = argd["option"]
else:
option = ""
if argd["action"] in ("submit",):
action = argd["action"]
else:
action = ""
server_id = argd["server_id"]
status_url = argd["status_url"]
ln = argd["ln"]
(error, result) = sword_client.perform_request_submission_options(
option,
action,
server_id,
status_url,
ln
)
if error:
req.set_content_type("text/plain; charset=utf-8")
req.set_status("400")
req.send_http_header()
req.write("Error: {0}".format(error))
return
return result
def servers(self, req, form):
"""Web interface for the available servers."""
# Check if the user has rights to manage the Sword client
auth_code, auth_message = acc_authorize_action(
req,
"manage_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client/",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"ln": (str, CFG_SITE_LANG),
})
# Get the user ID
uid = getUid(req)
# Set language for i18n text auto generation
ln = argd["ln"]
_ = gettext_set_language(ln)
body = sword_client.perform_request_servers(
ln
)
navtrail = """
> <a class="navtrail" href="%(CFG_SITE_URL)s/sword_client">%(label)s</a>
""" % {
'CFG_SITE_URL': CFG_SITE_URL,
'label': _("Sword Client"),
}
return page(
title=_("Servers"),
body=body,
navtrail=navtrail,
lastupdated=__lastupdated__,
req=req,
language=ln
)
def server_options(self, req, form):
"""Web interface for the options on the available servers."""
# Check if the user has rights to manage the Sword client
auth_code, auth_message = acc_authorize_action(
req,
"manage_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"option": (str, ""),
"action": (str, "submit"),
"server_id": (int, 0),
"sword_client_server_name": (str, ""),
"sword_client_server_engine": (str, ""),
"sword_client_server_username": (str, ""),
"sword_client_server_password": (str, ""),
"sword_client_server_email": (str, ""),
"sword_client_server_update_frequency": (str, ""),
"ln": (str, CFG_SITE_LANG),
})
if argd["option"] in ("add", "update", "modify", "delete"):
option = argd["option"]
else:
option = ""
if argd["action"] in ("prepare", "submit"):
action = argd["action"]
else:
action = ""
server_id = argd["server_id"]
server = (
argd["sword_client_server_name"],
argd["sword_client_server_engine"],
argd["sword_client_server_username"],
argd["sword_client_server_password"],
argd["sword_client_server_email"],
argd["sword_client_server_update_frequency"],
)
ln = argd["ln"]
(error, result) = sword_client.perform_request_server_options(
option,
action,
server_id,
server,
ln
)
if error:
req.set_content_type("text/plain; charset=utf-8")
req.set_status("400")
req.send_http_header()
req.write("Error: {0}".format(error))
return
return result
def submit(self, req, form):
"""Submit a record using SWORD."""
# Check if the user has rights to manage the Sword client
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"record_id": (int, 0),
"server_id": (int, 0),
"ln": (str, CFG_SITE_LANG),
})
# Get the user ID
uid = getUid(req)
# Set language for i18n text auto generation
ln = argd["ln"]
_ = gettext_set_language(ln)
record_id = argd["record_id"]
server_id = argd["server_id"]
body = sword_client.perform_submit(
uid,
record_id,
server_id,
ln
)
navtrail = """
> <a class="navtrail" href="%(CFG_SITE_URL)s/sword_client">%(label)s</a>
""" % {
'CFG_SITE_URL': CFG_SITE_URL,
'label': _("Sword Client"),
}
return page(
title=_("Submit"),
body=body,
navtrail=navtrail,
lastupdated=__lastupdated__,
req=req,
language=ln
)
def submit_step_1(self, req, form):
"""Process step 1 in the submission workflow."""
# Check if the user has adequate rights to run the bibsword client
# TODO: in a more advanced model, also check if the give user has
# rights to the current submission based on the user id and the
# submission id. It would get even more complicated if we
# introduced people that can approve specific submissions etc.
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
'sid': (str, ''),
'server_id': (int, 0),
'ln': (str, CFG_SITE_LANG),
})
sid = argd['sid']
server_id = argd['server_id']
ln = argd['ln']
return sword_client.perform_submit_step_1(
sid,
server_id,
ln
)
def submit_step_2(self, req, form):
"""Process step 2 in the submission workflow."""
# Check if the user has adequate rights to run the bibsword client
# TODO: in a more advanced model, also check if the give user has
# rights to the current submission based on the user id and the
# submission id. It would get even more complicated if we
# introduced people that can approve specific submissions etc.
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
'sid': (str, ""),
'collection_url': (str, ""),
'ln': (str, CFG_SITE_LANG),
})
sid = argd['sid']
collection_url = argd['collection_url']
ln = argd['ln']
return sword_client.perform_submit_step_2(
sid,
collection_url,
ln
)
def submit_step_3(self, req, form):
"""Process step 3 in the submission workflow."""
# Check if the user has adequate rights to run the bibsword client
# TODO: in a more advanced model, also check if the give user has
# rights to the current submission based on the user id and the
# submission id. It would get even more complicated if we
# introduced people that can approve specific submissions etc.
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
'sid': (str, ""),
'mandatory_category_url': (str, ""),
'optional_categories_urls': (list, []),
'ln': (str, CFG_SITE_LANG),
})
sid = argd['sid']
ln = argd['ln']
mandatory_category_url = argd['mandatory_category_url']
optional_categories_urls = argd['optional_categories_urls']
return sword_client.perform_submit_step_3(
sid,
mandatory_category_url,
optional_categories_urls,
ln
)
def submit_step_4(self, req, form):
"""Process step 4 in the submission workflow."""
# Check if the user has adequate rights to run the bibsword client
# TODO: in a more advanced model, also check if the give user has
# rights to the current submission based on the user id and the
# submission id. It would get even more complicated if we
# introduced people that can approve specific submissions etc.
auth_code, auth_message = acc_authorize_action(
req,
"run_sword_client"
)
if auth_code != 0:
return page_not_authorized(
req=req,
referer="/sword_client",
text=auth_message,
navtrail=""
)
argd = wash_urlargd(form, {
"sid": (str, ""),
"rn": (str, ""),
"additional_rn": (list, []),
"title": (str, ""),
"author_fullname": (str, ""),
"author_email": (str, ""),
"author_affiliation": (str, ""),
"abstract": (str, ""),
"contributor_fullname": (list, []),
"contributor_email": (list, []),
"contributor_affiliation": (list, []),
"files": (list, []),
"ln": (str, CFG_SITE_LANG),
})
sid = argd["sid"]
rn = argd["rn"]
additional_rn = argd["additional_rn"]
title = argd["title"]
author_fullname = argd["author_fullname"]
author_email = argd["author_email"]
author_affiliation = argd["author_affiliation"]
abstract = argd["abstract"]
contributor_fullname = argd["contributor_fullname"]
contributor_email = argd["contributor_email"]
contributor_affiliation = argd["contributor_affiliation"]
files_indexes = argd["files"]
ln = argd["ln"]
return sword_client.perform_submit_step_4(
sid,
(
rn,
additional_rn,
title,
author_fullname,
author_email,
author_affiliation,
abstract,
contributor_fullname,
contributor_email,
contributor_affiliation,
files_indexes
),
ln
)
index = submissions
| gpl-2.0 | -6,689,792,295,317,898,000 | 28.9 | 77 | 0.506525 | false |
omnirom/android_external_chromium-org | chrome/browser/PRESUBMIT.py | 36 | 2691 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser code.
This script currently only checks HTML/CSS/JS files in resources/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
checked for here.
"""
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
path = input_api.os_path
cwd = input_api.PresubmitLocalPath()
resources = path.join(cwd, 'resources')
webui = path.join(cwd, 'ui', 'webui')
affected_files = (f.AbsoluteLocalPath() for f in input_api.AffectedFiles())
would_affect_tests = (
path.join(cwd, 'PRESUBMIT.py'),
path.join(cwd, 'test_presubmit.py'),
path.join(cwd, 'web_dev_style', 'css_checker.py'),
path.join(cwd, 'web_dev_style', 'html_checker.py'),
path.join(cwd, 'web_dev_style', 'js_checker.py'),
)
if any(f for f in affected_files if f in would_affect_tests):
tests = [path.join(cwd, 'test_presubmit.py')]
results.extend(
input_api.canned_checks.RunUnitTests(input_api, output_api, tests))
import sys
old_path = sys.path
try:
sys.path = [cwd] + old_path
from web_dev_style import (resource_checker, css_checker, html_checker,
js_checker)
search_dirs = (resources, webui)
def _html_css_js_resource(p):
return p.endswith(('.html', '.css', '.js')) and p.startswith(search_dirs)
BLACKLIST = ['chrome/browser/resources/pdf/index.html',
'chrome/browser/resources/pdf/index.js']
def is_resource(maybe_resource):
return (maybe_resource.LocalPath() not in BLACKLIST and
_html_css_js_resource(maybe_resource.AbsoluteLocalPath()))
results.extend(resource_checker.ResourceChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
results.extend(css_checker.CSSChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
results.extend(html_checker.HtmlChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
results.extend(js_checker.JSChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
finally:
sys.path = old_path
return results
| bsd-3-clause | -8,704,589,916,678,059,000 | 34.88 | 79 | 0.689335 | false |
TarasRudnyk/scrapy | docs/utils/linkfix.py | 141 | 1764 | #!/usr/bin/python
"""
Linkfix - a companion to sphinx's linkcheck builder.
Uses the linkcheck's output file to fix links in docs.
Originally created for this issue:
https://github.com/scrapy/scrapy/issues/606
Author: dufferzafar
"""
import re
# Used for remembering the file (and its contents)
# so we don't have to open the same file again.
_filename = None
_contents = None
# A regex that matches standard linkcheck output lines
line_re = re.compile(ur'(.*)\:\d+\:\s\[(.*)\]\s(?:(.*)\sto\s(.*)|(.*))')
# Read lines from the linkcheck output file
try:
with open("build/linkcheck/output.txt") as out:
output_lines = out.readlines()
except IOError:
print("linkcheck output not found; please run linkcheck first.")
exit(1)
# For every line, fix the respective file
for line in output_lines:
match = re.match(line_re, line)
if match:
newfilename = match.group(1)
errortype = match.group(2)
# Broken links can't be fixed and
# I am not sure what do with the local ones.
if errortype.lower() in ["broken", "local"]:
print("Not Fixed: " + line)
else:
# If this is a new file
if newfilename != _filename:
# Update the previous file
if _filename:
with open(_filename, "w") as _file:
_file.write(_contents)
_filename = newfilename
# Read the new file to memory
with open(_filename) as _file:
_contents = _file.read()
_contents = _contents.replace(match.group(3), match.group(4))
else:
# We don't understand what the current line means!
print("Not Understood: " + line)
| bsd-3-clause | -4,291,072,471,067,289,600 | 27 | 73 | 0.592404 | false |
MarsSnail/gyp_tools | test/rules/gyptest-default.py | 25 | 1660 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
Hello from program.c
Hello from function1.in
Hello from function2.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
expect = """\
Hello from program.c
Hello from function3.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('program2', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file1.out', 'Hello from file1.in\n')
test.must_match('relocate/src/subdir2/file2.out', 'Hello from file2.in\n')
test.must_match('relocate/src/subdir2/file1.out2', 'Hello from file1.in\n')
test.must_match('relocate/src/subdir2/file2.out2', 'Hello from file2.in\n')
test.must_match('relocate/src/subdir2/file1.out4', 'Hello from file1.in\n')
test.must_match('relocate/src/subdir2/file2.out4', 'Hello from file2.in\n')
test.must_match('relocate/src/subdir2/file1.copy', 'Hello from file1.in\n')
test.must_match('relocate/src/external/file1.external_rules.out',
'Hello from file1.in\n')
test.must_match('relocate/src/external/file2.external_rules.out',
'Hello from file2.in\n')
test.pass_test()
| bsd-3-clause | 8,107,704,772,019,640,000 | 27.135593 | 75 | 0.713253 | false |
patvarilly/DNACC | examples/competing_linkages/competing_linkages.py | 1 | 10390 | # Copyright 2012 Patrick Varilly, Stefano Angioletti-Uberti
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
# Python script to produce all the figures in Bortolo's competing
# interactions paper:
#
# B.M. Mognetti, M.E. Leunissen and D. Frenkel, Soft Matter 8, 2213 (2012),
# doi: 10.1039/c2sm06635a
import numpy as np
from math import sqrt
import subprocess
import operator
import dnacc
from dnacc.units import nm
# Set up basic system
plates = dnacc.PlatesMeanField()
L = 20 * nm
plates.set_tether_type_prototype(L=L, sigma=0.0)
ALPHA = plates.add_tether_type(plate='lower', sticky_end='alpha')
BETA = plates.add_tether_type(plate='lower', sticky_end='beta')
ALPHA_P = plates.add_tether_type(plate='upper', sticky_end='alphap')
BETA_P = plates.add_tether_type(plate='upper', sticky_end='betap')
# A few useful utility methods
def reset_plates(plates):
for t in plates.tether_types:
t['sigma'] = 0.0
t['L'] = L
plates.beta_DeltaG0.clear()
def set_competing_interactions(plates, beta_DeltaG0a, beta_DeltaG0b):
plates.beta_DeltaG0['alpha', 'alphap'] = beta_DeltaG0a
plates.beta_DeltaG0['alpha', 'betap'] = beta_DeltaG0b
plates.beta_DeltaG0['beta', 'alphap'] = beta_DeltaG0b
# Sample interactions potentials at every 0.05 * L
hArr = np.linspace(0.05 * L, 2.00 * L, 40)
# Figure 2
# ========
def figure2():
reset_plates(plates)
S = 0.75 * sqrt(2.0)
sigma = 1 / (S * L) ** 2
for t in plates.tether_types:
t['sigma'] = sigma
plates.separation = L
with open('fig2.txt', 'w') as f:
f.write('# betaDeltaG0a (kT)\t' 'n_alpha / N\t' 'n_beta / N\n')
for beta_DeltaDeltaG in (3, 5, 8, 11, 14):
f.write("# beta_DeltaDeltaG = %g kT\n" % beta_DeltaDeltaG)
for beta_DeltaG0a in xrange(0, -51, -1):
set_competing_interactions(plates, beta_DeltaG0a,
beta_DeltaG0a + beta_DeltaDeltaG)
plates.update()
f.write('%g\t%g\t%g\n' %
(beta_DeltaG0a,
plates.sigma_bound[ALPHA, ALPHA_P] / (2 * sigma),
(plates.sigma_bound[ALPHA, BETA_P] +
plates.sigma_bound[BETA, ALPHA_P]) / (2 * sigma)))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig2.gp'])
# Figure 3a
# =========
def figure3a():
single = dnacc.PlatesMeanField()
single.set_tether_type_prototype(L=L, sigma=0)
ALPHA = single.add_tether_type(plate='lower', sticky_end='alpha')
ALPHA_P = single.add_tether_type(plate='upper', sticky_end='alphap')
with open('fig3a.txt', 'w') as f:
f.write('# betaDeltaG0a (kT)\t' 'n_alpha / N (MF)\t'
'n_alpha / N (SCMF)\n')
for S in (1.06, 0.75, 0.53):
f.write('# S = %.1f L_alpha\n' % S)
sigma = 1 / (S * L) ** 2
for t in single.tether_types:
t['sigma'] = sigma
for h in (1, 1.5):
f.write('# h = %.1f L_alpha\n' % h)
single.separation = h * L
for beta_DeltaG0a in xrange(0, -31, -1):
single.beta_DeltaG0['alpha', 'alphap'] = beta_DeltaG0a
single.update()
f.write('%g\t%g\t%g\n' %
(beta_DeltaG0a,
# Mean Field (MF) [not really]
single.sigma_bound[ALPHA, ALPHA_P] / sigma,
# Self-consistent mean field
single.sigma_bound[ALPHA, ALPHA_P] / sigma))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig3a.gp'])
# Figure 3b
# =========
def figure3b():
reset_plates(plates)
S = 0.75
sigma = 1 / (S * L) ** 2
for t in plates.tether_types:
t['sigma'] = sigma
plates.separation = L
with open('fig3b.txt', 'w') as f:
f.write('# betaDeltaG0a (kT)\t' 'n_alpha / N\t' 'n_beta / N\n')
beta_DeltaDeltaG = 8
for beta_DeltaG0a in xrange(0, -41, -1):
set_competing_interactions(plates, beta_DeltaG0a,
beta_DeltaG0a + beta_DeltaDeltaG)
plates.update()
f.write('%g\t%g\t%g\n' %
(beta_DeltaG0a,
plates.sigma_bound[ALPHA, ALPHA_P] / (2 * sigma),
(plates.sigma_bound[ALPHA, BETA_P] +
plates.sigma_bound[BETA, ALPHA_P]) / (2 * sigma)))
subprocess.call(['gnuplot', 'plot_fig3b.gp'])
# Figure 4a
# =========
def figure4a():
reset_plates(plates)
S = 0.75
ts = plates.tether_types
ts[ALPHA]['sigma'] = ts[ALPHA_P]['sigma'] = 0.3 / (S * L) ** 2
ts[BETA]['sigma'] = ts[BETA_P]['sigma'] = 0.7 / (S * L) ** 2
with open('fig4a.txt', 'w') as f:
f.write('# betaDeltaG0b (kT)\t' 'F_min (kT/L^2)\n')
for beta_DeltaDeltaG in (-1000, 8, 5, 3):
f.write("# beta_DeltaDeltaG = %g kT\n" % beta_DeltaDeltaG)
for beta_DeltaG0b in xrange(-20, 9):
set_competing_interactions(plates,
beta_DeltaG0b - beta_DeltaDeltaG,
beta_DeltaG0b)
f.write('%g\t%g\n' %
(beta_DeltaG0b,
min((plates.at(h).free_energy_density
for h in hArr)) / (1 / L ** 2)))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig4a.gp'])
# Figure 4b
# =========
def figure4b():
reset_plates(plates)
S = 0.75
ts = plates.tether_types
ts[ALPHA]['sigma'] = ts[ALPHA_P]['sigma'] = 0.3 / (S * L) ** 2
ts[BETA]['sigma'] = ts[BETA_P]['sigma'] = 0.7 / (S * L) ** 2
with open('fig4b.txt', 'w') as f:
f.write('# f (Fraction of hybridised linkages)\t'
'F_min (kT/L^2)\n')
for beta_DeltaDeltaG in (+1000, -1000, 8, 5, 3):
f.write("# beta_DeltaDeltaG = %g kT\n" % beta_DeltaDeltaG)
for beta_DeltaG0b in xrange(-24, 5):
beta_DeltaG0a = beta_DeltaG0b - beta_DeltaDeltaG
if beta_DeltaDeltaG == +1000:
set_competing_interactions(plates,
beta_DeltaG0b,
+1000)
else:
set_competing_interactions(plates,
beta_DeltaG0a,
beta_DeltaG0b)
hAtMin, minF = min(((h, plates.at(h).free_energy_density)
for h in hArr),
key=operator.itemgetter(1))
plates.at(hAtMin)
if beta_DeltaDeltaG == +1000:
maxFract = plates.tether_types[ALPHA]['sigma']
else:
maxFract = 2 * min(plates.tether_types[ALPHA]['sigma'],
plates.tether_types[BETA]['sigma'])
fract = sum(plates.sigma_bound[x]
for x in [(ALPHA, ALPHA_P), (ALPHA, BETA_P),
(BETA, ALPHA_P)]) / maxFract
f.write('%g\t%g\n' % (fract, minF / (1 / L ** 2)))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig4b.gp'])
# Figure 5a
# =========
#
# Here, the mean field theory seems to work poorly due to the importance
# of coverage fluctuations
def figure5a():
reset_plates(plates)
S = 0.75
sigma = 0.5 / (S * L) ** 2
ts = plates.tether_types
ts[ALPHA]['L'] = ts[ALPHA_P]['L'] = 0.3 * L
ts[BETA]['L'] = ts[BETA_P]['L'] = 1.7 * L
for t in plates.tether_types:
t['sigma'] = sigma
plates.separation = 0.3 * L
with open('fig5a.txt', 'w') as f:
f.write('# betaDeltaG0a (kT)\t' 'n_alpha / N\t' 'n_beta / N\n')
for beta_DeltaDeltaG in (3, 5, 8, 11, 14):
f.write("# beta_DeltaDeltaG = %g\n" % beta_DeltaDeltaG)
for beta_DeltaG0a in xrange(-40, 1):
set_competing_interactions(plates, beta_DeltaG0a,
beta_DeltaG0a + beta_DeltaDeltaG)
plates.update()
f.write('%g\t%g\t%g\n' %
(beta_DeltaG0a,
plates.sigma_bound[ALPHA, ALPHA_P] / (2 * sigma),
(plates.sigma_bound[ALPHA, BETA_P] +
plates.sigma_bound[BETA, ALPHA_P]) / (2 * sigma)))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig5a.gp'])
# Figure 5b
# =========
def figure5b():
reset_plates(plates)
S = 0.75
sigma = 0.5 / (S * L) ** 2
ts = plates.tether_types
ts[ALPHA]['L'] = ts[ALPHA_P]['L'] = 0.3 * L
ts[BETA]['L'] = ts[BETA_P]['L'] = 1.7 * L
for t in plates.tether_types:
t['sigma'] = sigma
with open('fig5b.txt', 'w') as f:
f.write('# h/L (L = 20 nm)\t' 'F (kT / L^2)\n')
beta_DeltaDeltaG = 8
for beta_DeltaG0a in (-5.8, -8.7, -11.6, -14.5, -17.4, -20.3):
f.write("# beta_DeltaG0a = %g\n" % beta_DeltaG0a)
set_competing_interactions(plates, beta_DeltaG0a,
beta_DeltaG0a + beta_DeltaDeltaG)
VArr = [plates.at(h).free_energy_density for h in hArr]
for (h, V) in zip(hArr, VArr):
f.write('%g\t%g\n' % (h / L, V / (1 / L ** 2)))
f.write('\n\n')
subprocess.call(['gnuplot', 'plot_fig5b.gp'])
# Main module
figure2()
figure3a()
figure3b()
figure4a()
figure4b()
figure5a()
figure5b()
| gpl-3.0 | 7,039,810,188,648,784,000 | 31.984127 | 76 | 0.507988 | false |
sebrandon1/neutron | neutron/plugins/ml2/drivers/l2pop/mech_driver.py | 2 | 14284 | # Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as const
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _, _LW
from neutron import context as n_context
from neutron.db import api as db_api
from neutron.db import l3_hamode_db
from neutron import manager
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.l2pop import config # noqa
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
LOG = logging.getLogger(__name__)
class L2populationMechanismDriver(api.MechanismDriver):
def __init__(self):
super(L2populationMechanismDriver, self).__init__()
self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI()
def initialize(self):
LOG.debug("Experimental L2 population driver")
self.rpc_ctx = n_context.get_admin_context_without_session()
def _get_port_fdb_entries(self, port):
# the port might be concurrently deleted
if not port or not port.get('fixed_ips'):
return []
return [l2pop_rpc.PortInfo(mac_address=port['mac_address'],
ip_address=ip['ip_address'])
for ip in port['fixed_ips']]
def check_vlan_transparency(self, context):
"""L2population driver vlan transparency support."""
return True
def _get_ha_port_agents_fdb(
self, session, network_id, router_id):
other_fdb_ports = {}
for agent in l2pop_db.get_ha_agents_by_router_id(session, router_id):
agent_active_ports = l2pop_db.get_agent_network_active_port_count(
session, agent.host, network_id)
if agent_active_ports == 0:
ip = l2pop_db.get_agent_ip(agent)
other_fdb_ports[ip] = [const.FLOODING_ENTRY]
return other_fdb_ports
def delete_port_postcommit(self, context):
port = context.current
agent_host = context.host
fdb_entries = self._get_agent_fdb(context.bottom_bound_segment,
port, agent_host)
if port['device_owner'] in l2pop_db.HA_ROUTER_PORTS and fdb_entries:
session = db_api.get_session()
network_id = port['network_id']
other_fdb_ports = self._get_ha_port_agents_fdb(
session, network_id, port['device_id'])
fdb_entries[network_id]['ports'] = other_fdb_ports
self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx,
fdb_entries)
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
# NOTE(cbrandily): let other mechanisms (openvswitch, linuxbridge, ...)
# perform the filtering
return set()
def _get_diff_ips(self, orig, port):
orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']])
port_ips = set([ip['ip_address'] for ip in port['fixed_ips']])
# check if an ip has been added or removed
orig_chg_ips = orig_ips.difference(port_ips)
port_chg_ips = port_ips.difference(orig_ips)
if orig_chg_ips or port_chg_ips:
return orig_chg_ips, port_chg_ips
def _fixed_ips_changed(self, context, orig, port, diff_ips):
orig_ips, port_ips = diff_ips
if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
agent_host = context.host
else:
agent_host = context.original_host
if not agent_host:
return
agent_ip = l2pop_db.get_agent_ip_by_host(db_api.get_session(),
agent_host)
orig_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'],
ip_address=ip)
for ip in orig_ips]
port_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'],
ip_address=ip)
for ip in port_ips]
upd_fdb_entries = {port['network_id']: {agent_ip: {}}}
ports = upd_fdb_entries[port['network_id']][agent_ip]
if orig_mac_ip:
ports['before'] = orig_mac_ip
if port_mac_ip:
ports['after'] = port_mac_ip
self.L2populationAgentNotify.update_fdb_entries(
self.rpc_ctx, {'chg_ip': upd_fdb_entries})
return True
def update_port_precommit(self, context):
port = context.current
orig = context.original
if (orig['mac_address'] != port['mac_address'] and
context.status == const.PORT_STATUS_ACTIVE):
msg = _("unable to modify mac_address of ACTIVE port "
"%s") % port['id']
raise exceptions.InvalidInput(error_message=msg)
def update_port_postcommit(self, context):
port = context.current
orig = context.original
if l3_hamode_db.is_ha_router_port(port['device_owner'],
port['device_id']):
return
diff_ips = self._get_diff_ips(orig, port)
if diff_ips:
self._fixed_ips_changed(context, orig, port, diff_ips)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
if context.status == const.PORT_STATUS_ACTIVE:
self.update_port_up(context)
if context.status == const.PORT_STATUS_DOWN:
agent_host = context.host
fdb_entries = self._get_agent_fdb(
context.bottom_bound_segment, port, agent_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif (context.host != context.original_host
and context.original_status == const.PORT_STATUS_ACTIVE
and context.status == const.PORT_STATUS_DOWN):
# The port has been migrated. Send notification about port
# removal from old host.
fdb_entries = self._get_agent_fdb(
context.original_bottom_bound_segment,
orig, context.original_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif context.status != context.original_status:
if context.status == const.PORT_STATUS_ACTIVE:
self.update_port_up(context)
elif context.status == const.PORT_STATUS_DOWN:
fdb_entries = self._get_agent_fdb(
context.bottom_bound_segment, port, context.host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
def _validate_segment(self, segment, port_id, agent):
if not segment:
LOG.debug("Port %(port)s updated by agent %(agent)s isn't bound "
"to any segment", {'port': port_id, 'agent': agent})
return False
network_types = l2pop_db.get_agent_l2pop_network_types(agent)
if network_types is None:
network_types = l2pop_db.get_agent_tunnel_types(agent)
if segment['network_type'] not in network_types:
return False
return True
def _create_agent_fdb(self, session, agent, segment, network_id):
agent_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {}}}
tunnel_network_ports = (
l2pop_db.get_distributed_active_network_ports(session, network_id))
fdb_network_ports = (
l2pop_db.get_nondistributed_active_network_ports(session,
network_id))
ports = agent_fdb_entries[network_id]['ports']
ports.update(self._get_tunnels(
fdb_network_ports + tunnel_network_ports,
agent.host))
for agent_ip, fdbs in ports.items():
for binding, agent in fdb_network_ports:
if l2pop_db.get_agent_ip(agent) == agent_ip:
fdbs.extend(self._get_port_fdb_entries(binding.port))
return agent_fdb_entries
def _get_tunnels(self, tunnel_network_ports, exclude_host):
agents = {}
for __, agent in tunnel_network_ports:
if agent.host == exclude_host:
continue
ip = l2pop_db.get_agent_ip(agent)
if not ip:
LOG.debug("Unable to retrieve the agent ip, check "
"the agent %s configuration.", agent.host)
continue
if ip not in agents:
agents[ip] = [const.FLOODING_ENTRY]
return agents
def update_port_down(self, context):
port = context.current
agent_host = context.host
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
# when agent transitions to backup, don't remove flood flows
if agent_host and l3plugin and getattr(
l3plugin, "list_router_ids_on_host", None):
admin_context = n_context.get_admin_context()
if l3plugin.list_router_ids_on_host(
admin_context, agent_host, [port['device_id']]):
return
fdb_entries = self._get_agent_fdb(
context.bottom_bound_segment, port, agent_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
def update_port_up(self, context):
port = context.current
agent_host = context.host
session = db_api.get_session()
agent = l2pop_db.get_agent_by_host(session, agent_host)
if not agent:
LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"),
agent_host)
return
network_id = port['network_id']
agent_active_ports = l2pop_db.get_agent_network_active_port_count(
session, agent_host, network_id)
agent_ip = l2pop_db.get_agent_ip(agent)
segment = context.bottom_bound_segment
if not self._validate_segment(segment, port['id'], agent):
return
other_fdb_entries = self._get_fdb_entries_template(
segment, agent_ip, network_id)
other_fdb_ports = other_fdb_entries[network_id]['ports']
if agent_active_ports == 1 or (l2pop_db.get_agent_uptime(agent) <
cfg.CONF.l2pop.agent_boot_time):
# First port activated on current agent in this network,
# we have to provide it with the whole list of fdb entries
agent_fdb_entries = self._create_agent_fdb(session,
agent,
segment,
network_id)
# And notify other agents to add flooding entry
other_fdb_ports[agent_ip].append(const.FLOODING_ENTRY)
if agent_fdb_entries[network_id]['ports'].keys():
self.L2populationAgentNotify.add_fdb_entries(
self.rpc_ctx, agent_fdb_entries, agent_host)
# Notify other agents to add fdb rule for current port
if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and
not l3_hamode_db.is_ha_router_port(port['device_owner'],
port['device_id'])):
other_fdb_ports[agent_ip] += self._get_port_fdb_entries(port)
self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
other_fdb_entries)
def _get_agent_fdb(self, segment, port, agent_host):
if not agent_host:
return
network_id = port['network_id']
session = db_api.get_session()
agent_active_ports = l2pop_db.get_agent_network_active_port_count(
session, agent_host, network_id)
agent = l2pop_db.get_agent_by_host(db_api.get_session(), agent_host)
if not self._validate_segment(segment, port['id'], agent):
return
agent_ip = l2pop_db.get_agent_ip(agent)
other_fdb_entries = self._get_fdb_entries_template(
segment, agent_ip, port['network_id'])
if agent_active_ports == 0:
# Agent is removing its last activated port in this network,
# other agents needs to be notified to delete their flooding entry.
other_fdb_entries[network_id]['ports'][agent_ip].append(
const.FLOODING_ENTRY)
# Notify other agents to remove fdb rules for current port
if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and
not l3_hamode_db.is_ha_router_port(port['device_owner'],
port['device_id'])):
fdb_entries = self._get_port_fdb_entries(port)
other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries
return other_fdb_entries
@classmethod
def _get_fdb_entries_template(cls, segment, agent_ip, network_id):
return {
network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {agent_ip: []}}}
| apache-2.0 | 5,595,001,444,596,550,000 | 41.260355 | 79 | 0.577989 | false |
arenadata/ambari | ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0/package/scripts/interpreter_json_template.py | 3 | 10516 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
template = '''
{
"interpreterSettings": {
"2CKEKWY8Z": {
"id": "2CKEKWY8Z",
"name": "angular",
"group": "angular",
"properties": {},
"status": "READY",
"interpreterGroup": [
{
"name": "angular",
"class": "org.apache.zeppelin.angular.AngularInterpreter",
"defaultInterpreter": false,
"editor": {
"editOnDblClick": true
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CKX8WPU1": {
"id": "2CKX8WPU1",
"name": "spark",
"group": "spark",
"properties": {
"spark.executor.memory": "512m",
"args": "",
"zeppelin.spark.printREPLOutput": "true",
"spark.cores.max": "",
"zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;",
"zeppelin.spark.sql.stacktrace": "false",
"zeppelin.spark.importImplicit": "true",
"zeppelin.spark.concurrentSQL": "false",
"zeppelin.spark.useHiveContext": "true",
"zeppelin.pyspark.python": "python",
"zeppelin.dep.localrepo": "local-repo",
"zeppelin.R.knitr": "true",
"zeppelin.spark.maxResult": "1000",
"master": "yarn-client",
"spark.app.name": "Zeppelin",
"zeppelin.R.image.width": "100%",
"zeppelin.R.render.options": "out.format \u003d \u0027html\u0027, comment \u003d NA, echo \u003d FALSE, results \u003d \u0027asis\u0027, message \u003d F, warning \u003d F",
"zeppelin.R.cmd": "R"
},
"status": "READY",
"interpreterGroup": [
{
"name": "spark",
"class": "org.apache.zeppelin.spark.SparkInterpreter",
"defaultInterpreter": true,
"editor": {
"language": "scala"
}
},
{
"name": "sql",
"class": "org.apache.zeppelin.spark.SparkSqlInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "sql"
}
},
{
"name": "dep",
"class": "org.apache.zeppelin.spark.DepInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "scala"
}
},
{
"name": "pyspark",
"class": "org.apache.zeppelin.spark.PySparkInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "python"
}
},
{
"name": "r",
"class": "org.apache.zeppelin.spark.SparkRInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "r"
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CK8A9MEG": {
"id": "2CK8A9MEG",
"name": "jdbc",
"group": "jdbc",
"properties": {
"default.password": "",
"zeppelin.jdbc.auth.type": "",
"common.max_count": "1000",
"zeppelin.jdbc.principal": "",
"default.user": "gpadmin",
"default.url": "jdbc:postgresql://localhost:5432/",
"default.driver": "org.postgresql.Driver",
"zeppelin.jdbc.keytab.location": "",
"zeppelin.jdbc.concurrent.use": "true",
"zeppelin.jdbc.concurrent.max_connection": "10"
},
"status": "READY",
"interpreterGroup": [
{
"name": "sql",
"class": "org.apache.zeppelin.jdbc.JDBCInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "sql",
"editOnDblClick": false
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CKX6DGQZ": {
"id": "2CKX6DGQZ",
"name": "livy",
"group": "livy",
"properties": {
"zeppelin.livy.pull_status.interval.millis": "1000",
"livy.spark.executor.memory": "",
"zeppelin.livy.session.create_timeout": "120",
"zeppelin.livy.principal": "",
"zeppelin.livy.spark.sql.maxResult": "1000",
"zeppelin.livy.keytab": "",
"zeppelin.livy.concurrentSQL": "false",
"zeppelin.livy.spark.sql.field.truncate": "true",
"livy.spark.executor.cores": "",
"zeppelin.livy.displayAppInfo": "false",
"zeppelin.livy.url": "http://localhost:8998",
"livy.spark.dynamicAllocation.minExecutors": "",
"livy.spark.driver.cores": "",
"livy.spark.jars.packages": "",
"livy.spark.dynamicAllocation.enabled": "",
"livy.spark.executor.instances": "",
"livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": "",
"livy.spark.dynamicAllocation.maxExecutors": "",
"livy.spark.dynamicAllocation.initialExecutors": "",
"livy.spark.driver.memory": ""
},
"status": "READY",
"interpreterGroup": [
{
"name": "spark",
"class": "org.apache.zeppelin.livy.LivySparkInterpreter",
"defaultInterpreter": true,
"editor": {
"language": "scala",
"editOnDblClick": false
}
},
{
"name": "sql",
"class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "sql",
"editOnDblClick": false
}
},
{
"name": "pyspark",
"class": "org.apache.zeppelin.livy.LivyPySparkInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "python",
"editOnDblClick": false
}
},
{
"name": "pyspark3",
"class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter",
"defaultInterpreter": false,
"editor": {
"language": "python",
"editOnDblClick": false
}
},
{
"name": "sparkr",
"class": "org.apache.zeppelin.livy.LivySparkRInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "r",
"editOnDblClick": false
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "scoped",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CKAY1A8Y": {
"id": "2CKAY1A8Y",
"name": "md",
"group": "md",
"properties": {
"markdown.parser.type": "pegdown"
},
"status": "READY",
"interpreterGroup": [
{
"name": "md",
"class": "org.apache.zeppelin.markdown.Markdown",
"defaultInterpreter": false,
"editor": {
"language": "markdown",
"editOnDblClick": true
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CHS8UYQQ": {
"id": "2CHS8UYQQ",
"name": "sh",
"group": "sh",
"properties": {
"zeppelin.shell.keytab.location": "",
"shell.command.timeout.millisecs": "60000",
"zeppelin.shell.principal": "",
"zeppelin.shell.auth.type": ""
},
"status": "READY",
"interpreterGroup": [
{
"name": "sh",
"class": "org.apache.zeppelin.shell.ShellInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "sh",
"editOnDblClick": false
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
}
},
"interpreterBindings": {},
"interpreterRepositories": [
{
"id": "central",
"type": "default",
"url": "http://repo1.maven.org/maven2/",
"releasePolicy": {
"enabled": true,
"updatePolicy": "daily",
"checksumPolicy": "warn"
},
"snapshotPolicy": {
"enabled": true,
"updatePolicy": "daily",
"checksumPolicy": "warn"
},
"mirroredRepositories": [],
"repositoryManager": false
},
{
"id": "local",
"type": "default",
"url": "file:///home/zeppelin/.m2/repository",
"releasePolicy": {
"enabled": true,
"updatePolicy": "daily",
"checksumPolicy": "warn"
},
"snapshotPolicy": {
"enabled": true,
"updatePolicy": "daily",
"checksumPolicy": "warn"
},
"mirroredRepositories": [],
"repositoryManager": false
}
]
}
'''
| apache-2.0 | -9,195,726,142,445,262,000 | 28.130194 | 181 | 0.509985 | false |
EnviroCentre/jython-upgrade | jython/lib/test/test_sha.py | 136 | 1703 | # Testing sha module (NIST's Secure Hash Algorithm)
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
import warnings
warnings.filterwarnings("ignore", "the sha module is deprecated.*",
DeprecationWarning)
import sha
import unittest
from test import test_support
class SHATestCase(unittest.TestCase):
def check(self, data, digest):
# Check digest matches the expected value
obj = sha.new(data)
computed = obj.hexdigest()
self.assertTrue(computed == digest)
# Verify that the value doesn't change between two consecutive
# digest operations.
computed_again = obj.hexdigest()
self.assertTrue(computed == computed_again)
# Check hexdigest() output matches digest()'s output
digest = obj.digest()
hexd = ""
for c in digest:
hexd += '%02x' % ord(c)
self.assertTrue(computed == hexd)
def test_case_1(self):
self.check("abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_2(self):
self.check("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_3(self):
self.check("a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
def test_case_4(self):
self.check(chr(0xAA) * 80,
'4ca0ef38f1794b28a8f8ee110ee79d48ce13be25')
def test_main():
test_support.run_unittest(SHATestCase)
if __name__ == "__main__":
test_main()
| mit | 587,045,028,039,699,600 | 29.410714 | 78 | 0.642983 | false |
Hawaii-Smart-Energy-Project/Maui-Smart-Grid | src/filelock.py | 1 | 4303 | # Copyright (c) 2009, Evan Fosmark
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
import os
import time
import errno
class FileLockException(Exception):
pass
class FileLock(object):
""" A file locking mechanism that has context-manager support so
you can use it in a with statement. This should be relatively cross
compatible as it doesn't rely on msvcrt or fcntl for the locking.
"""
__slots__ = ('fd', 'is_locked', 'lockfile', 'file_name', 'timeout', 'delay')
def __init__(self, file_name, timeout = 10, delay = .05):
""" Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
"""
self.is_locked = False
self.lockfile = os.path.abspath(
os.path.expanduser(os.path.expandvars("%s.lock" % file_name)))
self.file_name = file_name
self.timeout = timeout
self.delay = delay
def acquire(self):
""" Acquire the lock, if possible. If the lock is in use, it check again
every `wait` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception.
"""
start_time = time.time()
pid = os.getpid()
while True:
try:
self.fd = os.open(self.lockfile,
os.O_CREAT | os.O_EXCL | os.O_RDWR)
os.write(self.fd, "%d" % pid)
break
except OSError as e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= self.timeout:
raise FileLockException("Timeout occured.")
time.sleep(self.delay)
self.is_locked = True
def release(self):
""" Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end.
"""
if self.is_locked:
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False
def __enter__(self):
""" Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block.
"""
if not self.is_locked:
self.acquire()
return self
def __exit__(self, type, value, traceback):
""" Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.
"""
if self.is_locked:
self.release()
def __del__(self):
""" Make sure that the FileLock instance doesn't leave a lockfile
lying around.
"""
self.release()
| bsd-3-clause | -4,454,241,437,241,890,000 | 37.079646 | 80 | 0.638624 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.