repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ldgarcia/django-allauth | allauth/socialaccount/providers/vk/provider.py | 65 | 1616 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
from allauth.socialaccount import app_settings
class VKAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('link')
def get_avatar_url(self):
ret = None
photo_big_url = self.account.extra_data.get('photo_big')
photo_medium_url = self.account.extra_data.get('photo_medium')
if photo_big_url:
return photo_big_url
elif photo_medium_url:
return photo_medium_url
else:
return ret
def to_str(self):
first_name = self.account.extra_data.get('first_name', '')
last_name = self.account.extra_data.get('last_name', '')
name = ' '.join([first_name, last_name]).strip()
return name or super(VKAccount, self).to_str()
class VKProvider(OAuth2Provider):
id = 'vk'
name = 'VK'
package = 'allauth.socialaccount.providers.vk'
account_class = VKAccount
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append('email')
return scope
def extract_uid(self, data):
return str(data['uid'])
def extract_common_fields(self, data):
return dict(email=data.get('email'),
last_name=data.get('last_name'),
username=data.get('screen_name'),
first_name=data.get('first_name'))
providers.registry.register(VKProvider)
| mit | 225,250,552,841,582,700 | 30.686275 | 74 | 0.62995 | false |
adviti/melange | thirdparty/google_appengine/lib/django_0_96/django/utils/feedgenerator.py | 32 | 11349 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> feed = feedgenerator.Rss201rev2Feed(
... title=u"Poynter E-Media Tidbits",
... link=u"http://www.poynter.org/column.asp?id=31",
... description=u"A group weblog by the sharpest minds in online media/journalism/publishing.",
... language=u"en",
... )
>>> feed.add_item(title="Hello", link=u"http://www.holovaty.com/test/", description="Testing.")
>>> fp = open('test.rss', 'w')
>>> feed.write(fp, 'utf-8')
>>> fp.close()
For definitions of the different versions of RSS, see:
http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from django.utils.xmlutils import SimplerXMLGenerator
import datetime, re, time
import email.Utils
def rfc2822_date(date):
return email.Utils.formatdate(time.mktime(date.timetuple()))
def rfc3339_date(date):
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
def get_tag_uri(url, date):
"Creates a TagURI. See http://diveintomark.org/archives/2004/05/28/howto-atom-id"
tag = re.sub('^http://', '', url)
if date is not None:
tag = re.sub('/', ',%s:/' % date.strftime('%Y-%m-%d'), tag, 1)
tag = re.sub('#', '/', tag)
return 'tag:' + tag
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None):
self.feed = {
'title': title,
'link': link,
'description': description,
'language': language,
'author_email': author_email,
'author_name': author_name,
'author_link': author_link,
'subtitle': subtitle,
'categories': categories or (),
'feed_url': feed_url,
'feed_copyright': feed_copyright,
}
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
self.items.append({
'title': title,
'link': link,
'description': description,
'author_email': author_email,
'author_name': author_name,
'author_link': author_link,
'pubdate': pubdate,
'comments': comments,
'unique_id': unique_id,
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': item_copyright,
})
def num_items(self):
return len(self.items)
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
from StringIO import StringIO
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate. If none of them have a pubdate,
this returns the current date/time.
"""
updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.url, self.length, self.mime_type = url, length, mime_type
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement(u"rss", {u"version": self._version})
handler.startElement(u"channel", {})
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", self.feed['link'])
handler.addQuickElement(u"description", self.feed['description'])
if self.feed['language'] is not None:
handler.addQuickElement(u"language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"copyright", self.feed['feed_copyright'])
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement(u"rss")
def endChannelElement(self, handler):
handler.endElement(u"channel")
class RssUserland091Feed(RssFeed):
_version = u"0.91"
def write_items(self, handler):
for item in self.items:
handler.startElement(u"item", {})
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
handler.endElement(u"item")
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = u"2.0"
def write_items(self, handler):
for item in self.items:
handler.startElement(u"item", {})
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement(u"author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement(u"author", item["author_email"])
if item['pubdate'] is not None:
handler.addQuickElement(u"pubDate", rfc2822_date(item['pubdate']).decode('ascii'))
if item['comments'] is not None:
handler.addQuickElement(u"comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement(u"guid", item['unique_id'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"enclosure", '',
{u"url": item['enclosure'].url, u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", cat)
handler.endElement(u"item")
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml'
ns = u"http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
if self.feed['language'] is not None:
handler.startElement(u"feed", {u"xmlns": self.ns, u"xml:lang": self.feed['language']})
else:
handler.startElement(u"feed", {u"xmlns": self.ns})
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", "", {u"rel": u"alternate", u"href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement(u"link", "", {u"rel": u"self", u"href": self.feed['feed_url']})
handler.addQuickElement(u"id", self.feed['link'])
handler.addQuickElement(u"updated", rfc3339_date(self.latest_post_date()).decode('ascii'))
if self.feed['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement(u"email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement(u"uri", self.feed['author_link'])
handler.endElement(u"author")
if self.feed['subtitle'] is not None:
handler.addQuickElement(u"subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", "", {u"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"rights", self.feed['feed_copyright'])
self.write_items(handler)
handler.endElement(u"feed")
def write_items(self, handler):
for item in self.items:
handler.startElement(u"entry", {})
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", u"", {u"href": item['link'], u"rel": u"alternate"})
if item['pubdate'] is not None:
handler.addQuickElement(u"updated", rfc3339_date(item['pubdate']).decode('ascii'))
# Author information.
if item['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement(u"email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement(u"uri", item['author_link'])
handler.endElement(u"author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement(u"id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement(u"summary", item['description'], {u"type": u"html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"link", '',
{u"rel": u"enclosure",
u"href": item['enclosure'].url,
u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", u"", {u"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement(u"rights", item['item_copyright'])
handler.endElement(u"entry")
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| apache-2.0 | -1,065,506,411,078,544,900 | 40.571429 | 99 | 0.588774 | false |
chidea/GoPythonDLLWrapper | bin/lib/wsgiref/util.py | 119 | 5634 | """Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def __next__(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib.parse import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/', encoding='latin1')
return url
def request_uri(environ, include_query=True):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib.parse import quote
path_info = quote(environ.get('PATH_INFO',''), safe='/;=,', encoding='latin1')
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p != '.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def setup_testing_defaults(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from io import StringIO, BytesIO
environ.setdefault('wsgi.input', BytesIO())
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}.__contains__
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower())
| mit | -5,228,280,960,462,081,000 | 33.145455 | 82 | 0.634363 | false |
apigee/edx-platform | common/djangoapps/student/migrations/0031_drop_student_anonymoususerid_temp_archive.py | 5 | 15979 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
db.execute("DROP TABLE student_anonymoususerid_temp_archive")
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.testcenterregistration': {
'Meta': {'object_name': 'TestCenterRegistration'},
'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'accommodation_request': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.testcenteruser': {
'Meta': {'object_name': 'TestCenterUser'},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
symmetrical = True
| agpl-3.0 | 7,150,652,835,225,078,000 | 84.908602 | 182 | 0.550347 | false |
codrut3/tensorflow | tensorflow/compiler/tests/reduce_ops_test.py | 19 | 5418 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reduction operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class ReduceOpsTest(XLATestCase):
def _testReduction(self, tf_reduce_fn, np_reduce_fn, dtype, test_inputs,
rtol=1e-4, atol=1e-4):
"""Tests that the output of 'tf_reduce_fn' matches numpy's output."""
for test_input in test_inputs:
with self.test_session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtype)
index = array_ops.placeholder(dtypes.int32)
out = tf_reduce_fn(a, index)
result = sess.run(out, {a: test_input, index: [0]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=0),
rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [1]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=1),
rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [-1]})
self.assertAllClose(result, np_reduce_fn(test_input, axis=1),
rtol=rtol, atol=atol)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [-33]})
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [2]})
FLOAT_DATA = [
np.zeros(shape=(2, 0)),
np.zeros(shape=(0, 30)),
np.arange(1, 7).reshape(2, 3),
np.arange(-10, -4).reshape(2, 3),
np.arange(-4, 2).reshape(2, 3),
]
COMPLEX_DATA = [
np.zeros(shape=(2, 0)).astype(np.complex64),
np.zeros(shape=(0, 30)).astype(np.complex64),
np.arange(1, 13, dtype=np.float32).view(np.complex64).reshape(2, 3),
np.arange(-14, -2, dtype=np.float32).view(np.complex64).reshape(2, 3),
np.arange(-4, 8, dtype=np.float32).view(np.complex64).reshape(2, 3),
]
NONEMPTY_FLOAT_DATA = [x for x in FLOAT_DATA if np.size(x) > 0]
NONEMPTY_COMPLEX_DATA = [x for x in COMPLEX_DATA if np.size(x) > 0]
BOOL_DATA = [
np.array([], dtype=np.bool).reshape(2, 0),
np.array([], dtype=np.bool).reshape(0, 3),
np.array([[False, True, False], [True, True, False]]),
]
def testReduceSumF32(self):
self._testReduction(math_ops.reduce_sum, np.sum, np.float32,
self.FLOAT_DATA)
def testReduceSumC64(self):
self._testReduction(math_ops.reduce_sum, np.sum, np.complex64,
self.COMPLEX_DATA)
def testReduceProdF32(self):
self._testReduction(math_ops.reduce_prod, np.prod, np.float32,
self.FLOAT_DATA)
def testReduceProdC64(self):
self._testReduction(math_ops.reduce_prod, np.prod, np.complex64,
self.COMPLEX_DATA)
def testReduceMin(self):
def reference_min(inp, axis):
"""Wrapper around np.amin that returns +infinity for an empty input."""
if inp.shape[axis] == 0:
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('inf'))
return np.amin(inp, axis)
self._testReduction(math_ops.reduce_min, reference_min, np.float32,
self.FLOAT_DATA)
def testReduceMax(self):
def reference_max(inp, axis):
"""Wrapper around np.amax that returns -infinity for an empty input."""
if inp.shape[axis] == 0:
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('-inf'))
return np.amax(inp, axis)
self._testReduction(math_ops.reduce_max, reference_max, np.float32,
self.FLOAT_DATA)
def testReduceMeanF32(self):
# TODO(phawkins): mean on XLA currently returns 0 instead of NaN when
# reducing across zero inputs.
self._testReduction(math_ops.reduce_mean, np.mean, np.float32,
self.NONEMPTY_FLOAT_DATA)
def testReduceMeanC64(self):
self._testReduction(math_ops.reduce_mean, np.mean, np.complex64,
self.NONEMPTY_COMPLEX_DATA)
def testReduceAll(self):
self._testReduction(math_ops.reduce_all, np.all, np.bool, self.BOOL_DATA)
def testReduceAny(self):
self._testReduction(math_ops.reduce_any, np.any, np.bool, self.BOOL_DATA)
if __name__ == '__main__':
googletest.main()
| apache-2.0 | 5,810,733,029,646,663,000 | 37.425532 | 80 | 0.63289 | false |
johndpope/tensorflow | tensorflow/compiler/tests/clustering_test.py | 123 | 3878 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the behavior of the auto-compilation pass."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
class ClusteringTest(XLATestCase):
def testAdd(self):
val1 = np.array([4, 3, 2, 1], dtype=np.float32)
val2 = np.array([5, 6, 7, 8], dtype=np.float32)
expected = val1 + val2
with self.test_session():
with self.test_scope():
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
output = math_ops.add(input1, input2)
result = output.eval()
self.assertAllClose(result, expected, rtol=1e-3)
def testAddFromCpuMultiple(self):
val1 = np.array([4, 3, 2, 1]).astype(np.float32)
val2 = np.array([5, 6, 7, 8]).astype(np.float32)
expected = val1 + val2
with self.test_session():
with ops.device(CPU_DEVICE):
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
with self.test_scope():
output = math_ops.add(input1, input2)
for _ in xrange(10):
result = output.eval()
self.assertAllClose(result, expected, rtol=1e-3)
def testDeadlock(self):
# Builds a graph of the form:
# x -> y
# | \
# z -> w
# where x and z are placed on the CPU and y and w are placed on the XLA
# device. If y and w are clustered for compilation, then the graph will
# deadlock since the clustered graph will contain a self-loop.
with self.test_session() as sess:
with ops.device(CPU_DEVICE):
x = array_ops.placeholder(dtypes.float32, [2])
with self.test_scope():
y = x * 2
with ops.device(CPU_DEVICE):
z = y * y
with self.test_scope():
w = y + z
result = sess.run(w, {x: [1.5, 0.5]})
self.assertAllClose(result, [12., 2.], rtol=1e-3)
def testHostMemory(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.int32)
with self.test_scope():
y = x + 1
with ops.device(CPU_DEVICE):
# Place a computation on the CPU, so y and w cannot be merged into the
# same JIT compilation.
z = y * 2
with self.test_scope():
# Argument 'y' is a non-constant output of a previous cluster. Make sure
# it is properly copied to host memory so it can be used as a
# compile-time constant input for this cluster.
w = array_ops.reshape(z, y)
result = sess.run(w, {x: [1, 0]})
expected = np.array([[4], [2]], dtype=np.int32)
self.assertAllClose(expected, result, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
| apache-2.0 | 6,364,113,217,970,979,000 | 36.650485 | 80 | 0.648272 | false |
s1lvester/heuteinmannheim | heuteinma.py | 1 | 6121 | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
import facebook
import websites
import feeds
#import beachstatus
from event import EventVault
import logging
import datetime
import time
import locale
locale.setlocale(locale.LC_TIME, '') # locale for date, time an the infamous german "Umalaute"
LOG_FILENAME = os.path.join(os.path.dirname(__file__), 'log.log')
logging.basicConfig(filename=LOG_FILENAME, level=logging.ERROR)
class HeuteInMannheim:
def __init__(self):
super(HeuteInMannheim, self).__init__()
self.vault = EventVault() # Initialize main Storage Object
# Initialize Scrapers
self.facebook_scraper = facebook.FacebookScraper(self.vault)
self.website_scraper = websites.WebsiteScraper(self.vault)
self.feed_scraper = feeds.FeedScraper(self.vault)
self.events = self.vault.get_events_for_date(datetime.date.today())
#self.events = self.vault.get_all_events() # Only for testing/debugging
#self.beach_status = beachstatus.BeachStatus()
#self.beach_status = self.beach_status.get_status()
self.state_output = self.make_html()
self.write_html() # Make initial index.html
logging.info("Total amount of Events: " + str(len(self.vault.get_all_events())))
def make_html(self):
"""Generate HTML output from collected events"""
output = """<!DOCTYPE html>
<html>
<head>
<title>Heute in Mannheim</title>
<link href="style.css" media="all" rel="stylesheet" type="text/css">
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="description" content="Heute in Mannheim ist eine simple Website, die dir Events in Mannheim anzeigt. Unabhängig, werbefrei, unkommerziell, free as in freedom and free as in beer.">
<meta name="apple-mobile-web-app-capable" content="yes">
</head>
<body>
<table>\n"""
if not self.events: # Guess we're staying home tonight...
output += """<tr><td><p><span class=\"title\">Heute keine
Events.<br> Guess we're staying home tonight...
:-(</span></p></td></tr>\n"""
else:
eo = 0 # Even/Odd table-rows
for event in self.events:
if eo == 0:
output += " <tr class=\"even\">"
eo = 1
else:
output += " <tr class=\"odd\">"
eo = 0
# Facebook Icon by http://shimmi1.deviantart.com/ to warn Users from evil Facebook links
if event.get("event_url").find("facebook") > -1:
output_fb = "<img src=\"img/fb_ico.png\" alt=\"Achtung: Facebook Link!\">"
else:
output_fb = ""
output += """
<td><p><span class=\"title\"><a href=\"{}\">{} {}</a></span></p>
<span class=\"location\"><a href=\"{}\">{}</a></span><br>
<span class=\"adresse\">{} {} | {} {}</span></td>
<td><span class=\"zeit\">{}</span><br>
</tr>\n""".format(event.get("event_url"),
event.get("title"),
output_fb,
event.get("url"),
event.get("name"),
event.get("strasse"),
event.get("hausnr"),
event.get("plz"),
event.get("ort"),
event.get("uhrzeit"))
# output += """
# </table>
# <hr>
# <p><b>Status der Mannheimer Strände:</b></p>
# <table>"""
# for beach in self.beach_status:
# hours = ""
# if beach["status"] == "open":
# hours = str("<b>" + beach["hours_open"] + " - " + beach["hours_closed"] + "</b><br>")
# output += """
# <tr class=\"beach\">
# <td class=\"{}\">
# <span class=\"adresse"><a href=\"{}\">{}: {}</a></span><br>
# {}
# {} {} | {} {}
# </td>
# </tr>""".format(beach["status"],
# beach["event_obj"].get("url"),
# beach["event_obj"].get("name"),
# beach["status"],
# hours,
# beach["event_obj"].get("strasse"),
# beach["event_obj"].get("hausnr"),
# beach["event_obj"].get("plz"),
# beach["event_obj"].get("ort"))
output += """
</table>
<hr>
<p>Last update: {}</p>
<p><b><a href=\"imprint.html\">Contact, Impressum und Datenschutz</a></b></p>
<p class=\"footer\">Heute in Mannheim ist eine automatisch generierte
Website und wurde nach bestem Wissen und Gewissen erstellt. Die
Einträge wurden nicht redaktionell bearbeitet und ich übernehme
keinerlei Haftung für die Inhalte hinter den links</p>
<p class=\"footer\"><a href=\"https://github.com/s1lvester/heuteinmannheim\">Fork me on GitHub</a><br>Danke an die Jungs von <a href=\"http://heuteinstuttgart.de/\">heuteinstuttgart.de</a></p>
</body>
</html>""".format(time.strftime("%d.%m.%Y %H:%M", time.localtime()))
return output.encode("utf-8")
def write_html(self):
"""Write the index.html file. Requires self.state_output to be set"""
f = open(os.path.join(os.path.dirname(__file__), "static/index.html"),
"wb")
f.write(self.state_output)
f.close()
# Gooo !!!!11einself
main_obj = HeuteInMannheim()
| mit | 5,949,435,641,609,201,000 | 42.685714 | 204 | 0.483486 | false |
fhools/doom3.gpl | neo/sys/gllog/logfunc.py | 62 | 2211 | #!/usr/bin/env python
# generate logging code
# this requires an analysis of the parameters for verbose and do actual call
import sys, string, re
from read import read_gl
def do_logfunc(f_in, f_out):
(gl, wgl, glX) = read_gl(f_in)
for l in (gl, glX):
for t in l:
# process ret type to strip trailing spaces
t[0] = string.strip(t[0])
f_out.write('static %s APIENTRY log%s(%s) {\n' % ( t[0], t[2], t[3] ))
# work on parameters
base_params = string.split(t[3], ',')
#f_out.write('// %s\n' % repr(base_params))
# init format string and parameter list
params = []
format = t[1][1:] + t[2]
# a general help list
types = []
names = []
for i in base_params:
regex = re.compile('([a-zA-Z0-9]*)$')
name = regex.search(i).group(1)
type = string.strip(i[0:len(i)-len(name)])
# catch type with no name
if (len(type) == 0):
type = name
name = ''
#f_out.write('// type: "%s" name: "%s"\n' % (type, name))
types.append(type)
names.append(name)
# verbose the types
if (type == 'GLenum'):
format += ' %s'
params.append( 'EnumString(' + name + ')' )
elif (type == 'GLfloat' or type == 'GLclampf' or type == 'GLdouble'):
format += ' %g'
params.append( name )
elif (type == 'GLint' or type == 'GLuint' or type == 'GLsizei' or type == 'GLbyte' or type == 'GLshort'
or type == 'GLubyte' or type == 'GLushort'):
format += ' %d'
params.append( name )
elif (type == 'GLboolean'):
format += ' %s'
params.append( name + ' ? "Y" : "N"' )
elif (type == 'void'):
pass
else:
f_out.write('// unknown type: "%s" name: "%s"\n' % (type, name))
format += ' \'' + type + ' ' + name + '\''
f_out.write('\tfprintf( tr.logFile, "' + format + '\\n"')
for par in params:
f_out.write(', ' + par)
f_out.write(' );\n')
if (t[0] != 'void'):
f_out.write('\treturn dll%s(' % t[2])
else:
f_out.write('\tdll%s(' % t[2])
started = 0
for i in names:
if (started):
f_out.write(', ')
else:
started = 1
f_out.write(i)
f_out.write(');\n')
f_out.write('}\n\n')
if __name__ == '__main__':
do_logfunc(sys.stdin, sys.stdout)
| gpl-3.0 | -7,219,097,463,339,814,000 | 28.48 | 107 | 0.5346 | false |
ghtmtt/QGIS | tests/src/python/test_qgsrange.py | 23 | 23316 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRange
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '11.04.2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.testing import unittest
from qgis.core import (QgsIntRange,
QgsDoubleRange,
QgsDateRange)
from qgis.PyQt.QtCore import QDate
class TestQgsIntRange(unittest.TestCase):
def testGetters(self):
range = QgsIntRange(1, 11)
self.assertEqual(range.lower(), 1)
self.assertEqual(range.upper(), 11)
self.assertTrue(range.includeLower())
self.assertTrue(range.includeUpper())
range = QgsIntRange(-1, 3, False, False)
self.assertEqual(range.lower(), -1)
self.assertEqual(range.upper(), 3)
self.assertFalse(range.includeLower())
self.assertFalse(range.includeUpper())
def testIsInfinite(self):
range = QgsIntRange()
self.assertTrue(range.isInfinite())
range2 = QgsIntRange(range.lower(), 5)
self.assertFalse(range2.isInfinite())
range2 = QgsIntRange(5, range.upper())
self.assertFalse(range2.isInfinite())
def testEquality(self):
self.assertEqual(QgsIntRange(1, 10), QgsIntRange(1, 10))
self.assertNotEqual(QgsIntRange(1, 10), QgsIntRange(1, 11))
self.assertNotEqual(QgsIntRange(1, 10), QgsIntRange(2, 10))
self.assertNotEqual(QgsIntRange(1, 10, False), QgsIntRange(1, 10))
self.assertNotEqual(QgsIntRange(1, 10, True, False), QgsIntRange(1, 10))
def testIsEmpty(self):
range = QgsIntRange(1, 1)
# should not be empty because 1 is included
self.assertFalse(range.isEmpty())
range = QgsIntRange(1, 1, False, False)
# should be empty because 1 is NOT included
self.assertTrue(range.isEmpty())
# invalid range is empty
range = QgsIntRange(1, -1)
self.assertTrue(range.isEmpty())
def testIsSingleton(self):
range = QgsIntRange(1, 1)
self.assertTrue(range.isSingleton())
range = QgsIntRange(1, 10)
self.assertFalse(range.isSingleton())
range = QgsIntRange(1, 1, False, False)
# should not be singleton because 1 is NOT included
self.assertFalse(range.isSingleton())
# invalid range is not singleton
range = QgsIntRange(1, -1)
self.assertFalse(range.isSingleton())
def testContains(self):
# includes both ends
range = QgsIntRange(0, 10)
self.assertTrue(range.contains(QgsIntRange(1, 9)))
self.assertTrue(range.contains(QgsIntRange(1, 10)))
self.assertTrue(range.contains(QgsIntRange(0, 9)))
self.assertTrue(range.contains(QgsIntRange(0, 10)))
self.assertFalse(range.contains(QgsIntRange(-1, 9)))
self.assertFalse(range.contains(QgsIntRange(1, 11)))
# does not include left end
range = QgsIntRange(0, 10, False, True)
self.assertTrue(range.contains(QgsIntRange(1, 9)))
self.assertTrue(range.contains(QgsIntRange(1, 10)))
self.assertFalse(range.contains(QgsIntRange(0, 9)))
self.assertFalse(range.contains(QgsIntRange(0, 10)))
self.assertFalse(range.contains(QgsIntRange(-1, 9)))
self.assertFalse(range.contains(QgsIntRange(1, 11)))
# does not include right end
range = QgsIntRange(0, 10, True, False)
self.assertTrue(range.contains(QgsIntRange(1, 9)))
self.assertFalse(range.contains(QgsIntRange(1, 10)))
self.assertTrue(range.contains(QgsIntRange(0, 9)))
self.assertFalse(range.contains(QgsIntRange(0, 10)))
self.assertFalse(range.contains(QgsIntRange(-1, 9)))
self.assertFalse(range.contains(QgsIntRange(1, 11)))
def testContainsElement(self):
# includes both ends
range = QgsIntRange(0, 10)
self.assertTrue(range.contains(0))
self.assertTrue(range.contains(5))
self.assertTrue(range.contains(10))
self.assertFalse(range.contains(-1))
self.assertFalse(range.contains(11))
# includes left end
range = QgsIntRange(0, 10, True, False)
self.assertTrue(range.contains(0))
self.assertTrue(range.contains(5))
self.assertFalse(range.contains(10))
self.assertFalse(range.contains(-1))
self.assertFalse(range.contains(11))
# includes right end
range = QgsIntRange(0, 10, False, True)
self.assertFalse(range.contains(0))
self.assertTrue(range.contains(5))
self.assertTrue(range.contains(10))
self.assertFalse(range.contains(-1))
self.assertFalse(range.contains(11))
# includes neither end
range = QgsIntRange(0, 10, False, False)
self.assertFalse(range.contains(0))
self.assertTrue(range.contains(5))
self.assertFalse(range.contains(10))
self.assertFalse(range.contains(-1))
self.assertFalse(range.contains(11))
def testOverlaps(self):
# includes both ends
range = QgsIntRange(0, 10)
self.assertTrue(range.overlaps(QgsIntRange(1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(0, 9)))
self.assertTrue(range.overlaps(QgsIntRange(0, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(10, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 0)))
self.assertFalse(range.overlaps(QgsIntRange(-10, -1)))
self.assertFalse(range.overlaps(QgsIntRange(11, 12)))
# includes left end
range = QgsIntRange(0, 10, True, False)
self.assertTrue(range.overlaps(QgsIntRange(1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(0, 9)))
self.assertTrue(range.overlaps(QgsIntRange(0, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 11)))
self.assertFalse(range.overlaps(QgsIntRange(10, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 0)))
self.assertFalse(range.overlaps(QgsIntRange(-10, -1)))
self.assertFalse(range.overlaps(QgsIntRange(11, 12)))
# includes right end
range = QgsIntRange(0, 10, False, True)
self.assertTrue(range.overlaps(QgsIntRange(1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(0, 9)))
self.assertTrue(range.overlaps(QgsIntRange(0, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(10, 11)))
self.assertFalse(range.overlaps(QgsIntRange(-1, 0)))
self.assertFalse(range.overlaps(QgsIntRange(-10, -1)))
self.assertFalse(range.overlaps(QgsIntRange(11, 12)))
# includes neither end
range = QgsIntRange(0, 10, False, False)
self.assertTrue(range.overlaps(QgsIntRange(1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(0, 9)))
self.assertTrue(range.overlaps(QgsIntRange(0, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 10)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 9)))
self.assertTrue(range.overlaps(QgsIntRange(1, 11)))
self.assertTrue(range.overlaps(QgsIntRange(-1, 11)))
self.assertFalse(range.overlaps(QgsIntRange(10, 11)))
self.assertFalse(range.overlaps(QgsIntRange(-1, 0)))
self.assertFalse(range.overlaps(QgsIntRange(-10, -1)))
self.assertFalse(range.overlaps(QgsIntRange(11, 12)))
class TestQgsDoubleRange(unittest.TestCase):
def testGetters(self):
range = QgsDoubleRange(1.0, 11.0)
self.assertEqual(range.lower(), 1)
self.assertEqual(range.upper(), 11)
self.assertTrue(range.includeLower())
self.assertTrue(range.includeUpper())
range = QgsDoubleRange(-1.0, 3.0, False, False)
self.assertEqual(range.lower(), -1)
self.assertEqual(range.upper(), 3)
self.assertFalse(range.includeLower())
self.assertFalse(range.includeUpper())
def testEquality(self):
self.assertEqual(QgsDoubleRange(1, 10), QgsDoubleRange(1, 10))
self.assertNotEqual(QgsDoubleRange(1, 10), QgsDoubleRange(1, 11))
self.assertNotEqual(QgsDoubleRange(1, 10), QgsDoubleRange(2, 10))
self.assertNotEqual(QgsDoubleRange(1, 10, False), QgsDoubleRange(1, 10))
self.assertNotEqual(QgsDoubleRange(1, 10, True, False), QgsDoubleRange(1, 10))
def testIsInfinite(self):
range = QgsDoubleRange()
self.assertTrue(range.isInfinite())
range2 = QgsDoubleRange(range.lower(), 5)
self.assertFalse(range2.isInfinite())
range2 = QgsDoubleRange(5, range.upper())
self.assertFalse(range2.isInfinite())
class TestQgsDateRange(unittest.TestCase):
def testGetters(self):
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))
self.assertEqual(range.begin(), QDate(2010, 3, 1))
self.assertEqual(range.end(), QDate(2010, 6, 2))
self.assertTrue(range.includeBeginning())
self.assertTrue(range.includeEnd())
range = QgsDateRange(QDate(), QDate(2010, 6, 2))
self.assertFalse(range.begin().isValid())
self.assertEqual(range.end(), QDate(2010, 6, 2))
range = QgsDateRange(QDate(2010, 3, 1), QDate())
self.assertEqual(range.begin(), QDate(2010, 3, 1))
self.assertFalse(range.end().isValid())
def testIsEmpty(self):
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))
self.assertFalse(range.isEmpty())
range = QgsDateRange(QDate(), QDate(2010, 6, 2))
self.assertFalse(range.isEmpty())
range = QgsDateRange(QDate(2010, 3, 1), QDate())
self.assertFalse(range.isEmpty())
# check QgsDateRange docs - this is treated as an infinite range, so is NOT empty
range = QgsDateRange(QDate(), QDate())
self.assertFalse(range.isEmpty())
range = QgsDateRange(QDate(2017, 3, 1), QDate(2010, 6, 2))
self.assertTrue(range.isEmpty())
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1))
self.assertFalse(range.isEmpty())
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1), False, False)
self.assertTrue(range.isEmpty())
def testContains(self):
# includes both ends
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertFalse(range.contains(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertFalse(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertFalse(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertFalse(range.contains(QgsDateRange(QDate(), QDate(2010, 4, 1))))
# infinite left end
range = QgsDateRange(QDate(), QDate(2010, 6, 2))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertTrue(range.contains(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertFalse(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertFalse(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertTrue(range.contains(QgsDateRange(QDate(), QDate(2010, 4, 1))))
# infinite right end
range = QgsDateRange(QDate(2010, 3, 1), QDate())
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertFalse(range.contains(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertTrue(range.contains(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertFalse(range.contains(QgsDateRange(QDate(), QDate(2010, 4, 1))))
def testContainsElement(self):
# includes both ends
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))
self.assertTrue(range.contains(QDate(2010, 3, 1)))
self.assertTrue(range.contains(QDate(2010, 5, 2)))
self.assertTrue(range.contains(QDate(2010, 6, 2)))
self.assertFalse(range.contains(QDate(2009, 6, 2)))
self.assertFalse(range.contains(QDate(2017, 6, 2)))
self.assertFalse(range.contains(QDate()))
# infinite left end
range = QgsDateRange(QDate(), QDate(2010, 6, 2))
self.assertTrue(range.contains(QDate(2010, 3, 1)))
self.assertTrue(range.contains(QDate(2010, 5, 2)))
self.assertTrue(range.contains(QDate(2010, 6, 2)))
self.assertTrue(range.contains(QDate(2009, 6, 2)))
self.assertFalse(range.contains(QDate(2017, 6, 2)))
self.assertFalse(range.contains(QDate()))
# infinite right end
range = QgsDateRange(QDate(2010, 3, 1), QDate())
self.assertTrue(range.contains(QDate(2010, 3, 1)))
self.assertTrue(range.contains(QDate(2010, 5, 2)))
self.assertTrue(range.contains(QDate(2010, 6, 2)))
self.assertFalse(range.contains(QDate(2009, 6, 2)))
self.assertTrue(range.contains(QDate(2017, 6, 2)))
self.assertFalse(range.contains(QDate()))
def testOverlaps(self):
# includes both ends
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertTrue(range.overlaps(QgsDateRange(QDate(), QDate(2010, 4, 1))))
self.assertFalse(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2009, 8, 5))))
self.assertFalse(range.overlaps(QgsDateRange(QDate(2019, 4, 1), QDate(2019, 8, 5))))
range = QgsDateRange(QDate(), QDate(2010, 6, 2))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertTrue(range.overlaps(QgsDateRange(QDate(), QDate(2010, 4, 1))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2009, 8, 5))))
self.assertFalse(range.overlaps(QgsDateRange(QDate(2019, 4, 1), QDate(2019, 8, 5))))
range = QgsDateRange(QDate(2010, 3, 1), QDate())
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2010, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate(2017, 4, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2010, 4, 1), QDate())))
self.assertTrue(range.overlaps(QgsDateRange(QDate(), QDate(2010, 4, 1))))
self.assertFalse(range.overlaps(QgsDateRange(QDate(2009, 4, 1), QDate(2009, 8, 5))))
self.assertTrue(range.overlaps(QgsDateRange(QDate(2019, 4, 1), QDate(2019, 8, 5))))
def testIsInstant(self):
self.assertFalse(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2)).isInstant())
self.assertTrue(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1)).isInstant())
self.assertFalse(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1), False, False).isInstant())
self.assertFalse(QgsDateRange(QDate(), QDate()).isInstant())
def testIsInfinite(self):
self.assertFalse(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2)).isInfinite())
self.assertFalse(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1)).isInfinite())
self.assertFalse(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 3, 1), False, False).isInfinite())
self.assertTrue(QgsDateRange(QDate(), QDate()).isInfinite())
def testEquality(self):
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False))
self.assertNotEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, True))
self.assertNotEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), True, False))
self.assertNotEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 3), False, False))
self.assertNotEqual(range, QgsDateRange(QDate(2010, 3, 2), QDate(2010, 6, 2), False, False))
def testExtend(self):
range_empty = QgsDateRange(QDate(2010, 6, 2), QDate(2010, 3, 1))
# Empty
self.assertFalse(range_empty.extend(range_empty))
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertFalse(range.extend(range_empty))
range = QgsDateRange(QDate(2010, 6, 2), QDate(2010, 3, 1))
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False))
# Extend low
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 2, 1), QDate(2010, 6, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 2, 1), QDate(2010, 6, 2), False, False))
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 2, 1), QDate(2010, 5, 2), True, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 2, 1), QDate(2010, 6, 2), True, False))
# Extend high
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 7, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 7, 2), False, False))
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, True)))
self.assertEqual(range, QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, True))
# Extend both
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 2, 1), QDate(2010, 7, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 2, 1), QDate(2010, 7, 2), False, False))
# Extend none
range = QgsDateRange(QDate(2010, 3, 1), QDate(2010, 6, 2), False, False)
self.assertFalse(range.extend(QgsDateRange(QDate(2010, 4, 6), QDate(2010, 5, 2), False, False)))
# Test infinity
range = QgsDateRange(QDate(), QDate())
self.assertFalse(range.extend(QgsDateRange(QDate(2010, 4, 6), QDate(2010, 5, 2), False, False)))
range = QgsDateRange(QDate(), QDate(2010, 5, 2))
self.assertFalse(range.extend(QgsDateRange(QDate(2010, 4, 6), QDate(2010, 5, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(), QDate(2010, 5, 2), True, True))
range = QgsDateRange(QDate(2010, 4, 6), QDate())
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 3, 6), QDate(2010, 5, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(2010, 3, 6), QDate(), False, True))
range = QgsDateRange(QDate(), QDate(2010, 5, 2))
self.assertTrue(range.extend(QgsDateRange(QDate(2010, 3, 6), QDate(2010, 6, 2), False, False)))
self.assertEqual(range, QgsDateRange(QDate(), QDate(2010, 6, 2), True, False))
range = QgsDateRange(QDate(2010, 4, 6), QDate())
self.assertTrue(range.extend(QgsDateRange(QDate(), QDate(2010, 5, 2), True, False)))
self.assertEqual(range, QgsDateRange(QDate(), QDate(), True, True))
range = QgsDateRange(QDate(), QDate(2010, 4, 6))
self.assertTrue(range.extend(QgsDateRange(QDate(), QDate(), True, True)))
self.assertEqual(range, QgsDateRange(QDate(), QDate(), True, True))
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 2,931,418,365,467,184,000 | 50.243956 | 104 | 0.648868 | false |
AlCutter/certificate-transparency | python/ct/client/async_log_client_test.py | 8 | 16485 | #!/usr/bin/env trial
import gflags
import json
import mock
import sys
import urlparse
from ct.client import log_client
from ct.client import async_log_client
from ct.client import log_client_test_util as test_util
from ct.client.db import database
from twisted.internet import defer
from twisted.internet import task
from twisted.internet import reactor
from twisted.python import failure
from twisted.test import proto_helpers
from twisted.trial import unittest
FLAGS = gflags.FLAGS
class ResponseBodyHandlerTest(unittest.TestCase):
def test_send(self):
finished = defer.Deferred()
handler = async_log_client.ResponseBodyHandler(finished)
transport = proto_helpers.StringTransportWithDisconnection()
handler.makeConnection(transport)
transport.protocol = handler
handler.dataReceived("test")
transport.loseConnection()
finished.addCallback(self.assertEqual, "test")
return finished
def test_send_chunks(self):
test_msg = "x"*1024
chunk_size = 100
finished = defer.Deferred()
handler = async_log_client.ResponseBodyHandler(finished)
transport = proto_helpers.StringTransportWithDisconnection()
handler.makeConnection(transport)
transport.protocol = handler
sent = 0
while sent < len(test_msg):
handler.dataReceived(test_msg[sent:sent + chunk_size])
sent += chunk_size
transport.loseConnection()
finished.addCallback(self.assertEqual, test_msg)
return finished
def test_buffer_overflow(self):
original = FLAGS.response_buffer_size_bytes
FLAGS.response_buffer_size_bytes = 10
test_msg = "x"*11
finished = defer.Deferred()
handler = async_log_client.ResponseBodyHandler(finished)
transport = proto_helpers.StringTransportWithDisconnection()
handler.makeConnection(transport)
transport.protocol = handler
handler.dataReceived(test_msg)
transport.loseConnection()
# TODO(ekasper): find a more elegant and robust way to save flags.
FLAGS.response_buffer_size_bytes = original
return self.assertFailure(finished,
async_log_client.HTTPResponseSizeExceededError)
class AsyncLogClientTest(unittest.TestCase):
class FakeHandler(test_util.FakeHandlerBase):
# A class that mimics twisted.web.iweb.IResponse. Note: the IResponse
# interface is only partially implemented.
class FakeResponse(object):
def __init__(self, code, reason, json_content=None):
self.code = code
self.phrase = reason
self.headers = AsyncLogClientTest.FakeHandler.FakeHeader()
if json_content is not None:
self._body = json.dumps(json_content)
else:
self._body = ""
def deliverBody(self, protocol):
transport = proto_helpers.StringTransportWithDisconnection()
protocol.makeConnection(transport)
transport.protocol = protocol
protocol.dataReceived(self._body)
transport.loseConnection()
@classmethod
def make_response(cls, code, reason, json_content=None):
return cls.FakeResponse(code, reason, json_content=json_content)
class FakeHeader(object):
def getAllRawHeaders(self):
return []
# Twisted doesn't (yet) have an official fake Agent:
# https://twistedmatrix.com/trac/ticket/4024
class FakeAgent(object):
def __init__(self, responder):
self._responder = responder
def request(self, method, uri):
if method != "GET":
return defer.fail(failure.Failure())
# Naive, for testing.
path, _, params = uri.partition("?")
params = urlparse.parse_qs(params)
# Take the first value of each parameter.
if any([len(params[key]) != 1 for key in params]):
return defer.fail(failure.Failure())
params = {key: params[key][0] for key in params}
response = self._responder.get_response(path, params=params)
return defer.succeed(response)
class FakeDB(object):
def scan_entries(self, first, last):
raise database.KeyError("boom!")
def store_entries(self, entries):
self.entries = list(entries)
def setUp(self):
self.clock = task.Clock()
def one_shot_client(self, json_content):
"""Make a one-shot client and give it a mock response."""
mock_handler = mock.Mock()
response = self.FakeHandler.make_response(200, "OK",
json_content=json_content)
mock_handler.get_response.return_value = response
return async_log_client.AsyncLogClient(self.FakeAgent(mock_handler),
test_util.DEFAULT_URI,
reactor=self.clock)
def default_client(self, entries_db=None, reactor_=None):
# A client whose responder is configured to answer queries for the
# correct uri.
if reactor_ is None:
reactor_ = self.clock
return async_log_client.AsyncLogClient(self.FakeAgent(
self.FakeHandler(test_util.DEFAULT_URI)), test_util.DEFAULT_URI,
entries_db=entries_db,
reactor=reactor_)
def test_get_sth(self):
client = self.default_client()
self.assertEqual(test_util.DEFAULT_STH,
self.successResultOf(client.get_sth()))
def test_get_sth_raises_on_invalid_response(self):
json_sth = test_util.sth_to_json(test_util.DEFAULT_STH)
json_sth.pop("timestamp")
client = self.one_shot_client(json_sth)
return self.assertFailure(client.get_sth(),
log_client.InvalidResponseError)
def test_get_sth_raises_on_invalid_base64(self):
json_sth = test_util.sth_to_json(test_util.DEFAULT_STH)
json_sth["tree_head_signature"] = "garbagebase64^^^"
client = self.one_shot_client(json_sth)
return self.assertFailure(client.get_sth(),
log_client.InvalidResponseError)
class EntryConsumer(object):
def __init__(self):
self.received = []
self.consumed = defer.Deferred()
def done(self, result):
self.result = result
self.consumed.callback("Done")
def consume(self, entries):
self.received += entries
d = defer.Deferred()
d.callback(None)
return d
# Helper method.
def get_entries(self, client, start, end, batch_size=0):
producer = client.get_entries(start, end, batch_size=batch_size)
consumer = self.EntryConsumer()
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
# Ensure the tasks scheduled in the reactor are invoked.
# Since start of get entries is delayed, we have to pump to make up for
# that delay. If some test is going to force get_entries to do more than
# one fetch, then that test has to take care of additional pumping.
self.pump_get_entries()
return consumer
def pump_get_entries(self,
delay=None,
pumps=1):
if not delay:
delay = FLAGS.get_entries_retry_delay
# Helper method which advances time past get_entries delay
for _ in range(0, pumps):
self.clock.pump([0, delay])
def test_get_entries(self):
client = self.default_client()
consumer = self.get_entries(client, 0, 9)
self.assertEqual(10, consumer.result)
self.assertTrue(test_util.verify_entries(consumer.received, 0, 9))
def test_get_sth_consistency(self):
client = self.default_client()
self.assertEqual([],
self.successResultOf(client.get_sth_consistency(0, 9)))
def test_get_entries_raises_on_invalid_response(self):
json_entries = test_util.entries_to_json(test_util.make_entries(0, 9))
json_entries["entries"][5]["leaf_input"] = "garbagebase64^^^"
client = self.one_shot_client(json_entries)
producer = client.get_entries(0, 9)
# remove exponential back-off
producer._calculate_retry_delay = lambda _: 1
consumer = self.EntryConsumer()
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
# pump through retries (with retries there are 2 delays per request and
# and initial delay)
self.pump_get_entries(1, FLAGS.get_entries_max_retries * 2 + 1)
self.assertTrue(consumer.result.check(log_client.InvalidResponseError))
# The entire response should be discarded upon error.
self.assertFalse(consumer.received)
def test_get_entries_raises_on_too_large_response(self):
large_response = test_util.entries_to_json(
test_util.make_entries(4, 5))
client = self.one_shot_client(large_response)
producer = client.get_entries(4, 4)
# remove exponential back-off
producer._calculate_retry_delay = lambda _: 1
consumer = self.EntryConsumer()
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
# pump through retries (with retries there are 2 delays per request and
# initial delay)
self.pump_get_entries(1, FLAGS.get_entries_max_retries * 2 + 1)
self.assertTrue(consumer.result.check(log_client.InvalidResponseError))
def test_get_entries_succedes_after_retry(self):
json_entries = test_util.entries_to_json(test_util.make_entries(0, 9))
json_entries["entries"][5]["leaf_input"] = "garbagebase64^^^"
client = self.one_shot_client(json_entries)
producer = client.get_entries(0, 9)
# remove exponential back-off
producer._calculate_retry_delay = lambda _: 1
consumer = self.EntryConsumer()
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
# pump retries halfway through (there are actually two delays before
# firing requests, so this loop will go only through half of retries)
self.pump_get_entries(1, FLAGS.get_entries_max_retries)
self.assertFalse(hasattr(consumer, 'result'))
json_entries = test_util.entries_to_json(test_util.make_entries(0, 9))
response = self.FakeHandler.make_response(200, "OK",
json_content=json_entries)
client._handler._agent._responder.get_response.return_value = response
self.pump_get_entries(1)
self.assertTrue(test_util.verify_entries(consumer.received, 0, 9))
def test_get_entries_raises_if_query_is_larger_than_tree_size(self):
client = async_log_client.AsyncLogClient(
self.FakeAgent(self.FakeHandler(
test_util.DEFAULT_URI, tree_size=3)), test_util.DEFAULT_URI,
reactor=self.clock)
consumer = self.get_entries(client, 0, 9)
# also pump error
self.pump_get_entries()
self.assertTrue(consumer.result.check(log_client.HTTPClientError))
def test_get_entries_returns_all_in_batches(self):
mock_handler = mock.Mock()
fake_responder = self.FakeHandler(test_util.DEFAULT_URI)
mock_handler.get_response.side_effect = (
fake_responder.get_response)
client = async_log_client.AsyncLogClient(self.FakeAgent(mock_handler),
test_util.DEFAULT_URI,
reactor=self.clock)
consumer = self.get_entries(client, 0, 9, batch_size=4)
self.assertEqual(10, consumer.result)
self.assertTrue(test_util.verify_entries(consumer.received, 0, 9))
self.assertEqual(3, len(mock_handler.get_response.call_args_list))
def test_get_entries_returns_all_for_limiting_server(self):
client = async_log_client.AsyncLogClient(
self.FakeAgent(
self.FakeHandler(test_util.DEFAULT_URI, entry_limit=3)),
test_util.DEFAULT_URI, reactor=self.clock)
consumer = self.get_entries(client, 0, 9)
# 1 pump in get_entries and 3 more so we fetch everything
self.pump_get_entries(pumps=3)
self.assertTrue(test_util.verify_entries(consumer.received, 0, 9))
class PausingConsumer(object):
def __init__(self, pause_at):
self.received = []
self.pause_at = pause_at
self.already_paused = False
self.result = None
def registerProducer(self, producer):
self.producer = producer
def done(self, result):
self.result = result
def consume(self, entries):
self.received += entries
if (not self.already_paused and
len(self.received) >= self.pause_at):
self.producer.pauseProducing()
self.already_paused = True
d = defer.Deferred()
d.callback(None)
return d
def test_get_entries_pause_resume(self):
client = self.default_client()
producer = client.get_entries(0, 9, batch_size=4)
consumer = self.PausingConsumer(4)
consumer.registerProducer(producer)
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
# fire all pending callbacks, and then fire request
self.pump_get_entries()
self.assertTrue(test_util.verify_entries(consumer.received, 0, 3))
self.assertEqual(4, len(consumer.received))
self.assertIsNone(consumer.result)
producer.resumeProducing()
# pump next 2 batches
self.pump_get_entries(pumps=2)
self.assertEqual(10, consumer.result)
self.assertTrue(test_util.verify_entries(consumer.received, 0, 9))
def test_get_entries_use_stored_entries(self):
fake_db = self.FakeDB()
# if client tries to fetch entries instead of taking them from db, then
# he will get 0 - 9 entries. If he uses db then he will get 10 - 19
fake_db.scan_entries = mock.Mock(
return_value=test_util.make_entries(10, 19))
client = self.default_client(entries_db=fake_db, reactor_=reactor)
consumer = self.get_entries(client, 0, 9)
consumer.consumed.addCallback(lambda _:
self.assertEqual(len(consumer.received), 10))
consumer.consumed.addCallback(lambda _:
[self.assertEqual(test_util.make_entry(i + 10), consumer.received[i])
for i in range(0, 9)])
def test_get_entries_tries_to_fetch_if_not_available_in_db(self):
fake_db = self.FakeDB()
fake_db.scan_entries = mock.Mock(return_value=None)
client = self.default_client(entries_db=fake_db)
consumer = self.get_entries(client, 0, 9)
test_util.verify_entries(consumer.received, 0, 9)
def test_get_entries_stores_entries(self):
fake_db = self.FakeDB()
client = self.default_client(entries_db=fake_db, reactor_=reactor)
consumer = self.get_entries(client, 0, 9)
consumer.consumed.addCallback(lambda _:
test_util.verify_entries(consumer.received, 0, 9))
consumer.consumed.addCallback(lambda _:
test_util.verify_entries(fake_db.entries, 0, 9))
return consumer.consumed
class BadEntryConsumer(EntryConsumer):
def consume(self, entries):
self.received += entries
d = defer.Deferred()
d.errback(ValueError("Boom!"))
return d
def test_get_entries_fires_done_if_consumer_raises(self):
client = self.default_client()
producer = client.get_entries(0, 9)
consumer = self.BadEntryConsumer()
d = producer.startProducing(consumer)
d.addBoth(consumer.done)
self.pump_get_entries()
self.assertTrue(consumer.result.check(ValueError))
if __name__ == "__main__" or __name__ == "ct.client.async_log_client_test":
sys.argv = FLAGS(sys.argv)
| apache-2.0 | 2,579,803,520,188,143,000 | 41.161125 | 81 | 0.617228 | false |
GuLinux/PySpectrum | import_image.py | 1 | 5892 | from pyui.import_image import Ui_ImportImage
from PyQt5.QtWidgets import QWidget, QToolBar, QDialog, QDialogButtonBox, QProgressDialog, QMessageBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, QCoreApplication
from qmathplotwidget import QMathPlotWidget, QImPlotWidget
import matplotlib.pyplot as plt
from qtcommons import QtCommons
from pyspectrum_commons import *
import os
import numpy as np
from astropy.io import fits
from object_properties_dialog import ObjectPropertiesDialog
from object_properties import ObjectProperties
from rotate_image_dialog import RotateImageDialog
from project import Project
class ImportImage(QWidget):
def icon():
return QIcon(':/image_20')
ACTION_TEXT = 'Import Image'
def pick(on_ok, settings):
open_file_sticky('Open FITS Image',FITS_IMG_EXTS, on_ok, settings, IMPORT_IMG )
def __init__(self, fits_file, settings, project = None):
super(ImportImage, self).__init__()
self.settings = settings
self.fits_file = fits_file
self.project = project
try:
image_hdu_index = fits_file.index_of('IMAGE')
except KeyError:
image_hdu_index = 0
original_image = fits.ImageHDU(data=fits_file[image_hdu_index].data, header=fits_file[image_hdu_index].header, name='IMAGE')
for hdu in [h for h in self.fits_file if h.name == 'IMAGE']: self.fits_file.remove(hdu)
self.fits_file.append(original_image)
self.ui = Ui_ImportImage()
self.ui.setupUi(self)
self.rotate_dialog = RotateImageDialog(self.fits_file, image_hdu_index, project=project)
self.rotate_dialog.rotated.connect(self.rotated)
self.image_plot = QtCommons.nestWidget(self.ui.image_widget, QImPlotWidget(self.rotate_dialog.data_rotated, cmap='gray'))
self.spatial_plot = QtCommons.nestWidget(self.ui.spatial_plot_widget, QMathPlotWidget())
self.spectrum_plot = QtCommons.nestWidget(self.ui.spectrum_plot_widget, QMathPlotWidget())
self.image_view = self.image_plot.axes_image
self.toolbar = QToolBar('Image Toolbar')
self.toolbar.addAction(QIcon(':/rotate_20'), "Rotate", lambda: self.rotate_dialog.show())
self.toolbar.addAction(QIcon(':/save_20'), "Save", self.save_profile)
self.toolbar.addAction(QIcon(':/select_all_20'), "Select spectrum data", lambda: self.spatial_plot.add_span_selector('select_spectrum', self.spectrum_span_selected,direction='horizontal'))
self.toolbar.addAction(QIcon.fromTheme('edit-select-invert'), "Select background data", lambda: self.spatial_plot.add_span_selector('select_background', self.background_span_selected,direction='horizontal', rectprops = dict(facecolor='blue', alpha=0.5))).setEnabled(False)
#self.toolbar.addAction('Stack', self.show_stack_images_dialog)
self.toolbar.addSeparator()
self.object_properties = ObjectProperties(self.fits_file, project=project)
self.object_properties_dialog = ObjectPropertiesDialog(settings, self.object_properties)
self.toolbar.addAction("Object properties", self.object_properties_dialog.show)
self.rotated()
def rotated(self):
self.image_view.set_data(self.rotate_dialog.data_rotated)
self.image_view.axes.relim()
self.image_view.axes.autoscale_view()
self.image_view.set_extent([self.rotate_dialog.data_rotated.shape[1],0, self.rotate_dialog.data_rotated.shape[0],0])
self.image_view.figure.canvas.draw()
self.draw_plot(self.spectrum_plot.axes, self.spectrum_profile())
self.draw_plot(self.spatial_plot.axes, self.spatial_profile())
def background_span_selected(self, min, max):
self.background_span_selection = (min, max)
self.spatial_plot.add_span('background_window', min, max, 'v', facecolor='gray', alpha=0.5)
self.image_plot.add_span('background_window', min, max, 'h', facecolor='red', alpha=0.5, clip_on=True)
self.draw_plot(self.spectrum_plot.axes, self.spectrum_profile())
def spectrum_span_selected(self, min, max):
self.spectrum_span_selection = (min, max)
self.spatial_plot.add_span('spectrum_window', min, max, 'v', facecolor='g', alpha=0.5)
self.image_plot.add_span('spectrum_window', min, max, 'h', facecolor='y', alpha=0.25, clip_on=True)
self.draw_plot(self.spectrum_plot.axes, self.spectrum_profile())
def draw_plot(self, axes, data):
axes.clear()
axes.plot(data)
axes.figure.tight_layout()
axes.figure.canvas.draw()
def spatial_profile(self):
return self.rotate_dialog.data_rotated.sum(1)
def spectrum_profile(self):
return self.rotate_dialog.data_rotated[self.spectrum_span_selection[0]:self.spectrum_span_selection[1]+1,:].sum(0) if hasattr(self, 'spectrum_span_selection') else self.rotate_dialog.data_rotated.sum(0)
def save(self, save_file):
data = self.spectrum_profile()
data -= np.amin(data)
data /= np.amax(data)
hdu = self.fits_file[0]
hdu.data = data
hdu.header['ORIGIN'] = 'PySpectrum'
self.fits_file.writeto(save_file, clobber=True)
def save_profile(self):
if not self.project:
save_file_sticky('Save plot...', 'FITS file (.fit)', lambda f: self.save(f[0]), self.settings, RAW_PROFILE )
return
if not self.object_properties.name:
QMessageBox.information(self, 'Save FITS', 'Please set file information (name, date, etc) using the Object Properties button before saving')
return
file_path = self.project.add_file(Project.RAW_PROFILE, object_properties = self.object_properties, on_added=self.save)
#self.save(file_path)
| gpl-3.0 | -6,155,929,002,361,670,000 | 50.692982 | 280 | 0.672098 | false |
h-hirokawa/swampdragon | swampdragon/tests/test_selfpub_model.py | 1 | 3350 | from ..route_handler import ModelRouter
from ..pubsub_providers.base_provider import PUBACTIONS
from .dragon_test_case import DragonTestCase
from .models import FooSelfPub, BarSelfPub
from .serializers import FooSelfPubSerializer, BarSelfPubSerializer
from datetime import datetime
class FooModelRouter(ModelRouter):
serializer_class = FooSelfPubSerializer
class BarModelRouter(ModelRouter):
serializer_class = BarSelfPubSerializer
class TestSelfPubModel(DragonTestCase):
def test_self_pub_model(self):
router = FooModelRouter(self.connection)
router.subscribe(**{'channel': 'testchan'})
self.assertIsNone(self.connection.last_pub)
FooSelfPub.objects.create(name='test')
self.assertIsNotNone(self.connection.last_pub)
def test_self_pub_model_with_fk(self):
router = BarModelRouter(self.connection)
router.subscribe(**{'channel': 'testchan'})
self.assertIsNone(self.connection.last_pub)
foo = FooSelfPub.objects.create(name='test')
BarSelfPub.objects.create(date=datetime.now(), foo=foo)
self.assertIsNotNone(self.connection.last_pub)
def test_ignore_id_when_getting_updated_fields(self):
FooSelfPubSerializer.Meta.publish_fields += ('pk', )
foo = FooSelfPub.objects.create(name='test')
def test_get_changes(self):
foo = FooSelfPub.objects.create(name='test')
self.assertListEqual(foo.get_changed_fields(), [])
foo.number = 12
self.assertListEqual(foo.get_changed_fields(), ['number'])
foo.name = 'updated'
self.assertIn('number', foo.get_changed_fields())
self.assertIn('name', foo.get_changed_fields())
bar = BarSelfPub.objects.create(date=datetime.now(), foo=foo)
self.assertListEqual(bar.get_changed_fields(), [])
update_date = datetime.now()
bar.date = update_date
self.assertListEqual(bar.get_changed_fields(), ['date'])
def test_raise_validation_error(self):
foo = FooSelfPub.objects.create(name='test')
data = foo.serialize()
self.assertEqual(data['name'], foo.name)
def test_create(self):
router = FooModelRouter(self.connection)
router.subscribe(**{'channel': 'testchan'})
FooSelfPub.objects.create(name='test')
self.assertEqual(self.connection.last_pub['action'], 'created')
def test_update(self):
router = FooModelRouter(self.connection)
router.subscribe(**{'channel': 'testchan'})
foo = FooSelfPub.objects.create(name='test')
foo.name = 'updated'
foo.save()
self.assertEqual(self.connection.last_pub['action'], 'updated')
def test_remove_on_update(self):
router = FooModelRouter(self.connection)
router.subscribe(**{'channel': 'testchan', 'name__contains': 'findme'})
foo = FooSelfPub.objects.create(name='test')
self.assertIsNone(self.connection.last_pub)
foo.name = 'findme'
foo.save()
self.assertEqual(self.connection.last_pub['action'], PUBACTIONS.updated)
foo.name = 'hideme'
foo.save()
self.assertEqual(self.connection.last_pub['action'], PUBACTIONS.deleted)
foo.name = 'findmeagain'
foo.save()
self.assertEqual(self.connection.last_pub['action'], PUBACTIONS.updated)
| bsd-3-clause | -1,348,968,510,869,027,600 | 37.953488 | 80 | 0.668955 | false |
Kalyzee/edx-platform | common/djangoapps/xblock_django/tests/test_user_service.py | 132 | 3992 | """
Tests for the DjangoXBlockUserService.
"""
from django.test import TestCase
from xblock_django.user_service import (
DjangoXBlockUserService,
ATTR_KEY_IS_AUTHENTICATED,
ATTR_KEY_USER_ID,
ATTR_KEY_USERNAME,
ATTR_KEY_USER_IS_STAFF,
)
from student.models import anonymous_id_for_user
from student.tests.factories import UserFactory, AnonymousUserFactory
from opaque_keys.edx.keys import CourseKey
class UserServiceTestCase(TestCase):
"""
Tests for the DjangoXBlockUserService.
"""
def setUp(self):
super(UserServiceTestCase, self).setUp()
self.user = UserFactory(username="tester", email="[email protected]")
self.user.profile.name = "Test Tester"
self.anon_user = AnonymousUserFactory()
def assert_is_anon_xb_user(self, xb_user):
"""
A set of assertions for an anonymous XBlockUser.
"""
self.assertFalse(xb_user.opt_attrs[ATTR_KEY_IS_AUTHENTICATED])
self.assertIsNone(xb_user.full_name)
self.assertListEqual(xb_user.emails, [])
def assert_xblock_user_matches_django(self, xb_user, dj_user):
"""
A set of assertions for comparing a XBlockUser to a django User
"""
self.assertTrue(xb_user.opt_attrs[ATTR_KEY_IS_AUTHENTICATED])
self.assertEqual(xb_user.emails[0], dj_user.email)
self.assertEqual(xb_user.full_name, dj_user.profile.name)
self.assertEqual(xb_user.opt_attrs[ATTR_KEY_USERNAME], dj_user.username)
self.assertEqual(xb_user.opt_attrs[ATTR_KEY_USER_ID], dj_user.id)
self.assertFalse(xb_user.opt_attrs[ATTR_KEY_USER_IS_STAFF])
def test_convert_anon_user(self):
"""
Tests for convert_django_user_to_xblock_user behavior when django user is AnonymousUser.
"""
django_user_service = DjangoXBlockUserService(self.anon_user)
xb_user = django_user_service.get_current_user()
self.assertTrue(xb_user.is_current_user)
self.assert_is_anon_xb_user(xb_user)
def test_convert_authenticate_user(self):
"""
Tests for convert_django_user_to_xblock_user behavior when django user is User.
"""
django_user_service = DjangoXBlockUserService(self.user)
xb_user = django_user_service.get_current_user()
self.assertTrue(xb_user.is_current_user)
self.assert_xblock_user_matches_django(xb_user, self.user)
def test_get_anonymous_user_id_returns_none_for_non_staff_users(self):
"""
Tests for anonymous_user_id method to return None if user is Non-Staff.
"""
django_user_service = DjangoXBlockUserService(self.user, user_is_staff=False)
anonymous_user_id = django_user_service.get_anonymous_user_id(username=self.user.username, course_id='edx/toy/2012_Fall')
self.assertIsNone(anonymous_user_id)
def test_get_anonymous_user_id_returns_none_for_non_existing_users(self):
"""
Tests for anonymous_user_id method to return None username does not exist in system.
"""
django_user_service = DjangoXBlockUserService(self.user, user_is_staff=True)
anonymous_user_id = django_user_service.get_anonymous_user_id(username="No User", course_id='edx/toy/2012_Fall')
self.assertIsNone(anonymous_user_id)
def test_get_anonymous_user_id_returns_id_for_existing_users(self):
"""
Tests for anonymous_user_id method returns anonymous user id for a user.
"""
course_key = CourseKey.from_string('edX/toy/2012_Fall')
anon_user_id = anonymous_id_for_user(
user=self.user,
course_id=course_key,
save=True
)
django_user_service = DjangoXBlockUserService(self.user, user_is_staff=True)
anonymous_user_id = django_user_service.get_anonymous_user_id(
username=self.user.username,
course_id='edX/toy/2012_Fall'
)
self.assertEqual(anonymous_user_id, anon_user_id)
| agpl-3.0 | -8,796,746,495,746,789,000 | 39.323232 | 129 | 0.667836 | false |
redhat-openstack/ironic | ironic/tests/conf_fixture.py | 11 | 1431 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_config import cfg
from ironic.common import config
CONF = cfg.CONF
CONF.import_opt('host', 'ironic.common.service')
class ConfFixture(fixtures.Fixture):
"""Fixture to manage global conf settings."""
def __init__(self, conf):
self.conf = conf
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('host', 'fake-mini')
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False, group='database')
self.conf.set_default('verbose', True)
config.parse_args([], default_config_files=[])
self.addCleanup(self.conf.reset)
| apache-2.0 | 3,040,773,665,392,038,000 | 34.775 | 78 | 0.701607 | false |
MingdaZhou/gnuradio | gr-vocoder/examples/codec2_audio_loopback.py | 47 | 1553 | #!/usr/bin/env python
#
# Copyright 2005,2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import blocks
from gnuradio import vocoder
from gnuradio.vocoder import codec2
def build_graph():
tb = gr.top_block()
src = audio.source(8000)
src_scale = blocks.multiply_const_ff(32767)
f2s = blocks.float_to_short()
enc = vocoder.codec2_encode_sp(codec2.MODE_2400)
dec = vocoder.codec2_decode_ps(codec2.MODE_2400)
s2f = blocks.short_to_float()
sink_scale = blocks.multiply_const_ff(1.0/32767.)
sink = audio.sink(8000)
tb.connect(src, src_scale, f2s, enc, dec, s2f, sink_scale, sink)
return tb
if __name__ == '__main__':
tb = build_graph()
tb.start()
raw_input ('Press Enter to exit: ')
tb.stop()
tb.wait()
| gpl-3.0 | -1,516,148,865,656,601,000 | 32.042553 | 70 | 0.710882 | false |
AakashRaina/radpress | radpress/urls.py | 3 | 1287 | from django.conf.urls import patterns, url
from radpress.views import (
ArticleArchiveView, ArticleDetailView, ArticleListView, PreviewView,
PageDetailView, SearchView, ZenModeView, ZenModeUpdateView)
from radpress.feeds import ArticleFeed
urlpatterns = patterns(
'',
url(r'^$',
view=ArticleListView.as_view(),
name='radpress-article-list'),
url(r'^archives/$',
view=ArticleArchiveView.as_view(),
name='radpress-article-archive'),
url(r'^detail/(?P<slug>[-\w]+)/$',
view=ArticleDetailView.as_view(),
name='radpress-article-detail'),
url(r'^p/(?P<slug>[-\w]+)/$',
view=PageDetailView.as_view(),
name='radpress-page-detail'),
url(r'^preview/$',
view=PreviewView.as_view(),
name='radpress-preview'),
url(r'^search/$',
view=SearchView.as_view(),
name='radpress-search'),
url(r'^zen/$',
view=ZenModeView.as_view(),
name='radpress-zen-mode'),
url(r'zen/(?P<pk>\d+)/$',
view=ZenModeUpdateView.as_view(),
name='radpress-zen-mode-update'),
url(r'^rss/$',
view=ArticleFeed(),
name='radpress-rss'),
url(r'^rss/(?P<tags>[-/\w]+)/$',
view=ArticleFeed(),
name='radpress-rss')
)
| mit | 8,750,976,305,379,518,000 | 25.265306 | 72 | 0.587413 | false |
admcrae/tensorflow | tensorflow/examples/adding_an_op/zero_out_2_test.py | 111 | 1988 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for version 2 of the zero_out op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.adding_an_op import zero_out_grad_2 # pylint: disable=unused-import
from tensorflow.examples.adding_an_op import zero_out_op_2
class ZeroOut2Test(tf.test.TestCase):
def test(self):
with self.test_session():
result = zero_out_op_2.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
def test_2d(self):
with self.test_session():
result = zero_out_op_2.zero_out([[6, 5, 4], [3, 2, 1]])
self.assertAllEqual(result.eval(), [[6, 0, 0], [0, 0, 0]])
def test_grad(self):
with self.test_session():
shape = (5,)
x = tf.constant([5, 4, 3, 2, 1], dtype=tf.float32)
y = zero_out_op_2.zero_out(x)
err = tf.test.compute_gradient_error(x, shape, y, shape)
self.assertLess(err, 1e-4)
def test_grad_2d(self):
with self.test_session():
shape = (2, 3)
x = tf.constant([[6, 5, 4], [3, 2, 1]], dtype=tf.float32)
y = zero_out_op_2.zero_out(x)
err = tf.test.compute_gradient_error(x, shape, y, shape)
self.assertLess(err, 1e-4)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 3,552,741,396,103,948,000 | 32.694915 | 93 | 0.634809 | false |
mszewczy/odoo | addons/board/__openerp__.py | 261 | 1647 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Dashboards',
'version': '1.0',
'category': 'Hidden',
'description': """
Lets the user create a custom dashboard.
========================================
Allows users to create custom dashboard.
""",
'author': 'OpenERP SA',
'depends': ['base', 'web'],
'data': [
'security/ir.model.access.csv',
'board_view.xml',
'board_mydashboard_view.xml',
'views/board.xml',
],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,224,184,009,156,844,000 | 35.6 | 78 | 0.576806 | false |
ecino/compassion-modules | thankyou_letters/__manifest__.py | 2 | 2325 | # -*- coding: utf-8 -*-
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2016-2020 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# pylint: disable=C8101
{
'name': 'Thank You Letters',
'version': '10.0.2.1.1',
'category': 'Other',
'author': 'Compassion CH',
'license': 'AGPL-3',
'website': 'http://www.compassion.ch',
'depends': [
'partner_communication',
'advanced_translation',
'web_widget_digitized_signature',
],
'data': [
'security/ir.model.access.csv',
'report/donation_report.xml',
'data/email_template.xml',
'data/communication_config.xml',
'data/ir_cron.xml',
'views/success_story_view.xml',
'views/communication_job_view.xml',
'views/account_invoice_view.xml',
'views/product_view.xml',
'views/res_partner_view.xml',
'views/thankyou_config_view.xml',
'views/generate_communication_wizard_view.xml',
],
'demo': [
'demo/demo_data.xml'
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 | -7,162,171,920,646,610,000 | 37.114754 | 78 | 0.504086 | false |
Hazelsuko07/17WarmingUp | py3.6/lib/python3.6/site-packages/pip/operations/check.py | 342 | 1590 |
def check_requirements(installed_dists):
missing_reqs_dict = {}
incompatible_reqs_dict = {}
for dist in installed_dists:
key = '%s==%s' % (dist.project_name, dist.version)
missing_reqs = list(get_missing_reqs(dist, installed_dists))
if missing_reqs:
missing_reqs_dict[key] = missing_reqs
incompatible_reqs = list(get_incompatible_reqs(
dist, installed_dists))
if incompatible_reqs:
incompatible_reqs_dict[key] = incompatible_reqs
return (missing_reqs_dict, incompatible_reqs_dict)
def get_missing_reqs(dist, installed_dists):
"""Return all of the requirements of `dist` that aren't present in
`installed_dists`.
"""
installed_names = set(d.project_name.lower() for d in installed_dists)
missing_requirements = set()
for requirement in dist.requires():
if requirement.project_name.lower() not in installed_names:
missing_requirements.add(requirement)
yield requirement
def get_incompatible_reqs(dist, installed_dists):
"""Return all of the requirements of `dist` that are present in
`installed_dists`, but have incompatible versions.
"""
installed_dists_by_name = {}
for installed_dist in installed_dists:
installed_dists_by_name[installed_dist.project_name] = installed_dist
for requirement in dist.requires():
present_dist = installed_dists_by_name.get(requirement.project_name)
if present_dist and present_dist not in requirement:
yield (requirement, present_dist)
| mit | -401,628,731,427,761,700 | 31.44898 | 77 | 0.666038 | false |
DefyVentures/edx-platform | lms/djangoapps/survey/models.py | 81 | 7589 | """
Models to support Course Surveys feature
"""
import logging
from lxml import etree
from collections import OrderedDict
from django.db import models
from student.models import User
from django.core.exceptions import ValidationError
from model_utils.models import TimeStampedModel
from survey.exceptions import SurveyFormNameAlreadyExists, SurveyFormNotFound
log = logging.getLogger("edx.survey")
class SurveyForm(TimeStampedModel):
"""
Model to define a Survey Form that contains the HTML form data
that is presented to the end user. A SurveyForm is not tied to
a particular run of a course, to allow for sharing of Surveys
across courses
"""
name = models.CharField(max_length=255, db_index=True, unique=True)
form = models.TextField()
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
"""
Override save method so we can validate that the form HTML is
actually parseable
"""
self.validate_form_html(self.form)
# now call the actual save method
super(SurveyForm, self).save(*args, **kwargs)
@classmethod
def validate_form_html(cls, html):
"""
Makes sure that the html that is contained in the form field is valid
"""
try:
fields = cls.get_field_names_from_html(html)
except Exception as ex:
log.exception("Cannot parse SurveyForm html: {}".format(ex))
raise ValidationError("Cannot parse SurveyForm as HTML: {}".format(ex))
if not len(fields):
raise ValidationError("SurveyForms must contain at least one form input field")
@classmethod
def create(cls, name, form, update_if_exists=False):
"""
Helper class method to create a new Survey Form.
update_if_exists=True means that if a form already exists with that name, then update it.
Otherwise throw an SurveyFormAlreadyExists exception
"""
survey = cls.get(name, throw_if_not_found=False)
if not survey:
survey = SurveyForm(name=name, form=form)
else:
if update_if_exists:
survey.form = form
else:
raise SurveyFormNameAlreadyExists()
survey.save()
return survey
@classmethod
def get(cls, name, throw_if_not_found=True):
"""
Helper class method to look up a Survey Form, throw FormItemNotFound if it does not exists
in the database, unless throw_if_not_found=False then we return None
"""
survey = None
exists = SurveyForm.objects.filter(name=name).exists()
if exists:
survey = SurveyForm.objects.get(name=name)
elif throw_if_not_found:
raise SurveyFormNotFound()
return survey
def get_answers(self, user=None, limit_num_users=10000):
"""
Returns all answers for all users for this Survey
"""
return SurveyAnswer.get_answers(self, user, limit_num_users=limit_num_users)
def has_user_answered_survey(self, user):
"""
Returns whether a given user has supplied answers to this
survey
"""
return SurveyAnswer.do_survey_answers_exist(self, user)
def save_user_answers(self, user, answers):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
SurveyAnswer.save_answers(self, user, answers)
def get_field_names(self):
"""
Returns a list of defined field names for all answers in a survey. This can be
helpful for reporting like features, i.e. adding headers to the reports
This is taken from the set of <input> fields inside the form.
"""
return SurveyForm.get_field_names_from_html(self.form)
@classmethod
def get_field_names_from_html(cls, html):
"""
Returns a list of defined field names from a block of HTML
"""
names = []
# make sure the form is wrap in some outer single element
# otherwise lxml can't parse it
# NOTE: This wrapping doesn't change the ability to query it
tree = etree.fromstring(u'<div>{}</div>'.format(html))
input_fields = tree.findall('.//input') + tree.findall('.//select')
for input_field in input_fields:
if 'name' in input_field.keys() and input_field.attrib['name'] not in names:
names.append(input_field.attrib['name'])
return names
class SurveyAnswer(TimeStampedModel):
"""
Model for the answers that a user gives for a particular form in a course
"""
user = models.ForeignKey(User, db_index=True)
form = models.ForeignKey(SurveyForm, db_index=True)
field_name = models.CharField(max_length=255, db_index=True)
field_value = models.CharField(max_length=1024)
@classmethod
def do_survey_answers_exist(cls, form, user):
"""
Returns whether a user has any answers for a given SurveyForm for a course
This can be used to determine if a user has taken a CourseSurvey.
"""
return SurveyAnswer.objects.filter(form=form, user=user).exists()
@classmethod
def get_answers(cls, form, user=None, limit_num_users=10000):
"""
Returns all answers a user (or all users, when user=None) has given to an instance of a SurveyForm
Return is a nested dict which are simple name/value pairs with an outer key which is the
user id. For example (where 'field3' is an optional field):
results = {
'1': {
'field1': 'value1',
'field2': 'value2',
},
'2': {
'field1': 'value3',
'field2': 'value4',
'field3': 'value5',
}
:
:
}
limit_num_users is to prevent an unintentional huge, in-memory dictionary.
"""
if user:
answers = SurveyAnswer.objects.filter(form=form, user=user)
else:
answers = SurveyAnswer.objects.filter(form=form)
results = OrderedDict()
num_users = 0
for answer in answers:
user_id = answer.user.id
if user_id not in results and num_users < limit_num_users:
results[user_id] = OrderedDict()
num_users = num_users + 1
if user_id in results:
results[user_id][answer.field_name] = answer.field_value
return results
@classmethod
def save_answers(cls, form, user, answers):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
for name in answers.keys():
value = answers[name]
# See if there is an answer stored for this user, form, field_name pair or not
# this will allow for update cases. This does include an additional lookup,
# but write operations will be relatively infrequent
answer, __ = SurveyAnswer.objects.get_or_create(user=user, form=form, field_name=name)
answer.field_value = value
answer.save()
| agpl-3.0 | 4,452,996,037,420,719,600 | 32.879464 | 106 | 0.617868 | false |
INM-6/python-neo | neo/io/neuralynxio_v1.py | 2 | 105289 | """
Class for reading data from Neuralynx files.
This IO supports NCS, NEV and NSE file formats.
This module is an older implementation with old neo.io API.
A new class NeuralynxIO compunded by NeuralynxRawIO and BaseFromIO
superseed this one.
Depends on: numpy
Supported: Read
Author: Julia Sprenger, Carlos Canova
Adapted from the exampleIO of python-neo
"""
import sys
import os
import warnings
import codecs
import copy
import re
import datetime
import pkg_resources
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
import neo.io.neuralynxio
from neo.core import (Block, Segment, ChannelIndex, AnalogSignal, SpikeTrain,
Event, Unit)
from os import listdir, sep
from os.path import isfile, getsize
import hashlib
import pickle
if hasattr(pkg_resources, 'pkg_resources'):
parse_version = pkg_resources.pkg_resources.parse_version
else:
parse_version = pkg_resources.parse_version
class NeuralynxIO(BaseIO):
"""
Class for reading Neuralynx files.
It enables reading:
- :class:'Block'
- :class:'Segment'
- :class:'AnalogSignal'
- :class:'SpikeTrain'
Usage:
from neo import io
import quantities as pq
import matplotlib.pyplot as plt
session_folder = '../Data/2014-07-24_10-31-02'
NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
block = NIO.read_block(t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
events=True)
seg = block.segments[0]
analogsignal = seg.analogsignals[0]
plt.plot(analogsignal.times.rescale(pq.ms), analogsignal.magnitude)
plt.show()
"""
is_readable = True # This class can only read data
is_writable = False # write is not supported
# This class is able to directly or indirectly handle the following objects
# You can notice that this greatly simplifies the full Neo object hierarchy
supported_objects = [Segment, AnalogSignal, SpikeTrain, Event]
# This class can return either a Block or a Segment
# The first one is the default ( self.read )
# These lists should go from highest object to lowest object because
# common_io_test assumes it.
readable_objects = [Segment, AnalogSignal, SpikeTrain]
# This class is not able to write objects
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuff : a definition for parameters when reading.
# This dict should be keyed by object (`Block`). Each entry is a list
# of tuple. The first entry in each tuple is the parameter name. The
# second entry is a dict with keys 'value' (for default value),
# and 'label' (for a descriptive name).
# Note that if the highest-level object requires parameters,
# common_io_test will be skipped.
read_params = {
Segment: [('waveforms', {'value': True})],
Block: [('waveforms', {'value': False})]
}
# do not supported write so no GUI stuff
write_params = None
name = 'Neuralynx'
description = 'This IO reads .nse/.ncs/.nev files of the Neuralynx (' \
'Cheetah) recordings system (tetrodes).'
extensions = ['nse', 'ncs', 'nev', 'ntt']
# mode can be 'file' or 'dir' or 'fake' or 'database'
# the main case is 'file' but some reader are base on a directory or
# a database this info is for GUI stuff also
mode = 'dir'
# hardcoded parameters from manual, which are not present in Neuralynx
# data files
# unit of timestamps in different files
nev_time_unit = pq.microsecond
ncs_time_unit = pq.microsecond
nse_time_unit = pq.microsecond
ntt_time_unit = pq.microsecond
# unit of sampling rate in different files
ncs_sr_unit = pq.Hz
nse_sr_unit = pq.Hz
ntt_sr_unit = pq.Hz
def __init__(self, sessiondir=None, cachedir=None, use_cache='hash',
print_diagnostic=False, filename=None):
"""
Arguments:
sessiondir: the directory the files of the recording session are
collected. Default 'None'.
print_diagnostic: indicates, whether information about the
loading of
data is printed in terminal or not. Default 'False'.
cachedir: the directory where metadata about the recording
session is
read from and written to.
use_cache: method used for cache identification. Possible values:
'hash'/
'always'/'datesize'/'never'. Default 'hash'
filename: this argument is handles the same as sessiondir and is
only
added for external IO interfaces. The value of
sessiondir
has priority over filename.
"""
warnings.warn('{} is deprecated and will be removed in neo version 0.10. Use {} instead.'
''.format(self.__class__, neo.io.neuralynxio.NeuralynxIO), FutureWarning)
BaseIO.__init__(self)
# possiblity to provide filename instead of sessiondir for IO
# compatibility
if filename is not None and sessiondir is None:
sessiondir = filename
if sessiondir is None:
raise ValueError('Must provide a directory containing data files of'
' of one recording session.')
# remove filename if specific file was passed
if any([sessiondir.endswith('.%s' % ext) for ext in self.extensions]):
sessiondir = sessiondir[:sessiondir.rfind(sep)]
# remove / for consistent directory handling
if sessiondir.endswith(sep):
sessiondir = sessiondir.rstrip(sep)
# set general parameters of this IO
self.sessiondir = sessiondir
self.filename = sessiondir.split(sep)[-1]
self._print_diagnostic = print_diagnostic
self.associated = False
self._associate(cachedir=cachedir, usecache=use_cache)
self._diagnostic_print(
'Initialized IO for session %s' % self.sessiondir)
def read_block(self, lazy=False, cascade=True, t_starts=None,
t_stops=None,
electrode_list=None, unit_list=None, analogsignals=True,
events=False,
waveforms=False):
"""
Reads data in a requested time window and returns block with as many
segments
es necessary containing these data.
Arguments:
lazy : Postpone actual reading of the data files. Default 'False'.
cascade : Do not postpone reading subsequent neo types (segments).
Default 'True'.
t_starts : list of quantities or quantity describing the start of
the requested time window to load. If None or [None]
the complete session is loaded. Default 'None'.
t_stops : list of quantities or quantity describing the end of the
requested time window to load. Has to contain the
same number of values as t_starts. If None or [None]
the complete session is loaded. Default 'None'.
electrode_list : list of integers containing the IDs of the
requested to load. If [] or None all available
channels will be loaded.
Default: None.
unit_list : list of integers containing the IDs of the requested
units to load. If [] or None all available units
will be loaded.
Default: None.
analogsignals : boolean, indication whether analogsignals should be
read. Default: True.
events : Loading events. If True all available events in the given
time window will be read. Default: False.
waveforms : Load waveform for spikes in the requested time
window. Default: False.
Returns: Block object containing the requested data in neo structures.
Usage:
from neo import io
import quantities as pq
import matplotlib.pyplot as plt
session_folder = '../Data/2014-07-24_10-31-02'
NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
block = NIO.read_block(lazy = False, cascade = True,
t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
electrode_list = [1,5,10],
unit_list = [1,2,3],
events = True, waveforms = True)
plt.plot(block.segments[0].analogsignals[0])
plt.show()
"""
# Create block
bl = Block(file_origin=self.sessiondir)
bl.name = self.filename
if not cascade:
return bl
# Checking input of t_start and t_stop
# For lazy users that specify x,x instead of [x],[x] for t_starts,
# t_stops
if t_starts is None:
t_starts = [None]
elif type(t_starts) == pq.Quantity:
t_starts = [t_starts]
elif type(t_starts) != list or any(
[(type(i) != pq.Quantity and i is not None) for i in t_starts]):
raise ValueError('Invalid specification of t_starts.')
if t_stops is None:
t_stops = [None]
elif type(t_stops) == pq.Quantity:
t_stops = [t_stops]
elif type(t_stops) != list or any(
[(type(i) != pq.Quantity and i is not None) for i in t_stops]):
raise ValueError('Invalid specification of t_stops.')
# adapting t_starts and t_stops to known gap times (extracted in
# association process / initialization)
for gap in self.parameters_global['gaps']:
# gap=gap_list[0]
for e in range(len(t_starts)):
t1, t2 = t_starts[e], t_stops[e]
gap_start = gap[1] * self.ncs_time_unit - \
self.parameters_global['t_start']
gap_stop = gap[2] * self.ncs_time_unit - self.parameters_global[
't_start']
if ((t1 is None and t2 is None)
or (t1 is None and t2 is not None and t2.rescale(
self.ncs_time_unit) > gap_stop)
or (t2 is None and t1 is not None and t1.rescale(
self.ncs_time_unit) < gap_stop)
or (t1 is not None and t2 is not None and t1.rescale(
self.ncs_time_unit) < gap_start
and t2.rescale(self.ncs_time_unit) > gap_stop)):
# adapting first time segment
t_stops[e] = gap_start
# inserting second time segment
t_starts.insert(e + 1, gap_stop)
t_stops.insert(e + 1, t2)
warnings.warn(
'Substituted t_starts and t_stops in order to skip '
'gap in recording session.')
# loading all channels if empty electrode_list
if electrode_list == [] or electrode_list is None:
electrode_list = self.parameters_ncs.keys()
# adding a segment for each t_start, t_stop pair
for t_start, t_stop in zip(t_starts, t_stops):
seg = self.read_segment(lazy=lazy, cascade=cascade,
t_start=t_start, t_stop=t_stop,
electrode_list=electrode_list,
unit_list=unit_list,
analogsignals=analogsignals, events=events,
waveforms=waveforms)
bl.segments.append(seg)
# generate units
units = []
channel_unit_collection = {}
for st in [s for seg in bl.segments for s in seg.spiketrains]:
# collecting spiketrains of same channel and unit id to generate
# common unit
chuid = (st.annotations['channel_index'], st.annotations['unit_id'])
if chuid in channel_unit_collection:
channel_unit_collection[chuid].append(st)
else:
channel_unit_collection[chuid] = [st]
for chuid in channel_unit_collection:
sts = channel_unit_collection[chuid]
unit = Unit(name='Channel %i, Unit %i' % chuid)
unit.spiketrains.extend(sts)
units.append(unit)
# generate one channel indexes for each analogsignal
for anasig in [a for seg in bl.segments for a in seg.analogsignals]:
channelids = anasig.annotations['channel_index']
channel_names = ['channel %i' % i for i in channelids]
channelidx = ChannelIndex(index=range(len(channelids)),
channel_names=channel_names,
name='channel ids for all analogsignal '
'"%s"' % anasig.name,
channel_ids=channelids)
channelidx.analogsignals.append(anasig)
bl.channel_indexes.append(channelidx)
# generate channel indexes for units
channelids = [unit.spiketrains[0].annotations['channel_index']
for unit in units]
channel_names = ['channel %i' % i for i in channelids]
channelidx = ChannelIndex(index=range(len(channelids)),
channel_names=channel_names,
name='channel ids for all spiketrains',
channel_ids=channelids)
channelidx.units.extend(units)
bl.channel_indexes.append(channelidx)
bl.create_many_to_one_relationship()
# Adding global parameters to block annotation
bl.annotations.update(self.parameters_global)
return bl
def read_segment(self, lazy=False, cascade=True, t_start=None, t_stop=None,
electrode_list=None, unit_list=None, analogsignals=True,
events=False, waveforms=False):
"""Reads one Segment.
The Segment will contain one AnalogSignal for each channel
and will go from t_start to t_stop.
Arguments:
lazy : Postpone actual reading of the data files. Default 'False'.
cascade : Do not postpone reading subsequent neo types (SpikeTrains,
AnalogSignals, Events).
Default 'True'.
t_start : time (quantity) that the Segment begins. Default None.
t_stop : time (quantity) that the Segment ends. Default None.
electrode_list : list of integers containing the IDs of the
requested to load. If [] or None all available
channels will be loaded.
Default: None.
unit_list : list of integers containing the IDs of the requested
units to load. If [] or None all available units
will be loaded. If False, no unit will be loaded.
Default: None.
analogsignals : boolean, indication whether analogsignals should be
read. Default: True.
events : Loading events. If True all available events in the given
time window will be read. Default: False.
waveforms : Load waveform for spikes in the requested time
window. Default: False.
Returns:
Segment object containing neo objects, which contain the data.
"""
# input check
# loading all channels if empty electrode_list
if electrode_list == [] or electrode_list is None:
electrode_list = self.parameters_ncs.keys()
elif electrode_list is None:
raise ValueError('Electrode_list can not be None.')
elif [v for v in electrode_list if
v in self.parameters_ncs.keys()] == []:
# warn if non of the requested channels are present in this session
warnings.warn('Requested channels %s are not present in session '
'(contains only %s)' % (
electrode_list, self.parameters_ncs.keys()))
electrode_list = []
seg = Segment(file_origin=self.filename)
if not cascade:
return seg
# generate empty segment for analogsignal collection
empty_seg = Segment(file_origin=self.filename)
# Reading NCS Files #
# selecting ncs files to load based on electrode_list requested
if analogsignals:
for chid in electrode_list:
if chid in self.parameters_ncs:
file_ncs = self.parameters_ncs[chid]['filename']
self.read_ncs(file_ncs, empty_seg, lazy, cascade,
t_start=t_start, t_stop=t_stop)
else:
self._diagnostic_print('Can not load ncs of channel %i. '
'No corresponding ncs file '
'present.' % (chid))
# supplementory merge function, should be replaced by neo utility
# function
def merge_analogsignals(anasig_list):
for aid, anasig in enumerate(anasig_list):
anasig.channel_index = None
if aid == 0:
full_analogsignal = anasig
else:
full_analogsignal = full_analogsignal.merge(anasig)
for key in anasig_list[0].annotations.keys():
listified_values = [a.annotations[key] for a in anasig_list]
full_analogsignal.annotations[key] = listified_values
return full_analogsignal
analogsignal = merge_analogsignals(empty_seg.analogsignals)
seg.analogsignals.append(analogsignal)
analogsignal.segment = seg
# Reading NEV Files (Events)#
# reading all files available
if events:
for filename_nev in self.nev_asso:
self.read_nev(filename_nev, seg, lazy, cascade, t_start=t_start,
t_stop=t_stop)
# Reading Spike Data only if requested
if unit_list is not False:
# Reading NSE Files (Spikes)#
# selecting nse files to load based on electrode_list requested
for chid in electrode_list:
if chid in self.parameters_nse:
filename_nse = self.parameters_nse[chid]['filename']
self.read_nse(filename_nse, seg, lazy, cascade,
t_start=t_start, t_stop=t_stop,
waveforms=waveforms)
else:
self._diagnostic_print('Can not load nse of channel %i. '
'No corresponding nse file '
'present.' % (chid))
# Reading ntt Files (Spikes)#
# selecting ntt files to load based on electrode_list requested
for chid in electrode_list:
if chid in self.parameters_ntt:
filename_ntt = self.parameters_ntt[chid]['filename']
self.read_ntt(filename_ntt, seg, lazy, cascade,
t_start=t_start, t_stop=t_stop,
waveforms=waveforms)
else:
self._diagnostic_print('Can not load ntt of channel %i. '
'No corresponding ntt file '
'present.' % (chid))
return seg
def read_ncs(self, filename_ncs, seg, lazy=False, cascade=True,
t_start=None, t_stop=None):
'''
Reading a single .ncs file from the associated Neuralynx recording
session.
In case of a recording gap between t_start and t_stop, data are only
loaded until gap start.
For loading data across recording gaps use read_block(...).
Arguments:
filename_ncs : Name of the .ncs file to be loaded.
seg : Neo Segment, to which the AnalogSignal containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
AnalogSignal. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time or sample (quantity or integer) that the
AnalogSignal begins.
Default None.
t_stop : time or sample (quantity or integer) that the
AnalogSignal ends.
Default None.
Returns:
None
'''
# checking format of filename and correcting if necessary
if filename_ncs[-4:] != '.ncs':
filename_ncs = filename_ncs + '.ncs'
if sep in filename_ncs:
filename_ncs = filename_ncs.split(sep)[-1]
# Extracting the channel id from prescan (association) of ncs files with
# this recording session
chid = self.get_channel_id_by_file_name(filename_ncs)
if chid is None:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_ncs))
if not cascade:
return
# read data
header_time_data = self.__mmap_ncs_packet_timestamps(filename_ncs)
data = self.__mmap_ncs_data(filename_ncs)
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
if isinstance(t_start, int):
t_start = t_start / self.parameters_ncs[chid]['sampling_rate']
if isinstance(t_stop, int):
t_stop = t_stop / self.parameters_ncs[chid]['sampling_rate']
# rescaling to global start time of recording (time of first sample
# in any file type)
if t_start is None or t_start < (
self.parameters_ncs[chid]['t_start']
- self.parameters_global[
't_start']):
t_start = (
self.parameters_ncs[chid]['t_start'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_ncs[chid]['t_stop']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ncs[chid]['t_stop']
- self.parameters_global['t_start']),
filename_ncs))
if t_stop is None or t_stop > (
self.parameters_ncs[chid]['t_stop']
- self.parameters_global[
't_start']):
t_stop = (
self.parameters_ncs[chid]['t_stop'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_ncs[chid]['t_start']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ncs[chid]['t_start']
- self.parameters_global['t_start']),
filename_ncs))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_ncs))
# Extracting data signal in requested time window
unit = pq.dimensionless # default value
if lazy:
sig = []
p_id_start = 0
else:
tstamps = header_time_data * self.ncs_time_unit - \
self.parameters_global['t_start']
# find data packet to start with signal construction
starts = np.where(tstamps <= t_start)[0]
if len(starts) == 0:
self._diagnostic_print(
'Requested AnalogSignal not present in this time '
'interval.')
return
else:
# first packet to be included into signal
p_id_start = starts[-1]
# find data packet where signal ends (due to gap or t_stop)
stops = np.where(tstamps >= t_stop)[0]
if len(stops) != 0:
first_stop = [stops[0]]
else:
first_stop = []
# last packet to be included in signal
p_id_stop = min(first_stop + [len(data)])
# search gaps in recording in time range to load
gap_packets = [gap_id[0] for gap_id in
self.parameters_ncs[chid]['gaps'] if
gap_id[0] > p_id_start]
if len(gap_packets) > 0 and min(gap_packets) < p_id_stop:
p_id_stop = min(gap_packets)
warnings.warn(
'Analogsignalarray was shortened due to gap in '
'recorded '
'data '
' of file %s at packet id %i' % (
filename_ncs, min(gap_packets)))
# search broken packets in time range to load
broken_packets = []
if 'broken_packet' in self.parameters_ncs[chid]:
broken_packets = [packet[0] for packet in
self.parameters_ncs[chid]['broken_packet']
if packet[0] > p_id_start]
if len(broken_packets) > 0 and min(broken_packets) < p_id_stop:
p_id_stop = min(broken_packets)
warnings.warn(
'Analogsignalarray was shortened due to broken data '
'packet in recorded data '
' of file %s at packet id %i' % (
filename_ncs, min(broken_packets)))
# construct signal in valid packet range
sig = np.array(data[p_id_start:p_id_stop + 1], dtype=float)
sig = sig.reshape(len(sig) * len(sig[0]))
# ADBitVolts is not guaranteed to be present in the header!
if 'ADBitVolts' in self.parameters_ncs[chid]:
sig *= self.parameters_ncs[chid]['ADBitVolts']
unit = pq.V
else:
warnings.warn(
'Could not transform data from file %s into physical '
'signal. '
'Missing "ADBitVolts" value in text header.')
# defining sampling rate for rescaling purposes
sampling_rate = self.parameters_ncs[chid]['sampling_unit'][0]
# creating neo AnalogSignal containing data
anasig = AnalogSignal(signal=pq.Quantity(sig, unit, copy=False),
sampling_rate=1 * sampling_rate,
# rescaling t_start to sampling time units
t_start=(header_time_data[p_id_start] * self.ncs_time_unit
- self.parameters_global['t_start']).rescale(
1 / sampling_rate),
name='channel_%i' % (chid),
channel_index=chid)
# removing protruding parts of first and last data packet
if anasig.t_start < t_start.rescale(anasig.t_start.units):
anasig = anasig.time_slice(t_start.rescale(anasig.t_start.units),
None)
if anasig.t_stop > t_stop.rescale(anasig.t_start.units):
anasig = anasig.time_slice(None,
t_stop.rescale(anasig.t_start.units))
annotations = copy.deepcopy(self.parameters_ncs[chid])
for pop_key in ['sampling_rate', 't_start']:
if pop_key in annotations:
annotations.pop(pop_key)
anasig.annotations.update(annotations)
anasig.annotations['electrode_id'] = chid
# this annotation is necesary for automatic genereation of
# recordingchannels
anasig.annotations['channel_index'] = chid
anasig.segment = seg # needed for merge function of analogsignals
seg.analogsignals.append(anasig)
def read_nev(self, filename_nev, seg, lazy=False, cascade=True,
t_start=None, t_stop=None):
'''
Reads associated nev file and attaches its content as eventarray to
provided neo segment. In constrast to read_ncs times can not be provided
in number of samples as a nev file has no inherent sampling rate.
Arguments:
filename_nev : Name of the .nev file to be loaded.
seg : Neo Segment, to which the Event containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
Event. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time (quantity) that the Events begin.
Default None.
t_stop : time (quantity) that the Event end.
Default None.
Returns:
None
'''
if filename_nev[-4:] != '.nev':
filename_nev += '.nev'
if sep in filename_nev:
filename_nev = filename_nev.split(sep)[-1]
if filename_nev not in self.nev_asso:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_nev))
# # ensure meaningful values for requested start and stop times
# # providing time is samples for nev file does not make sense as we
# don't know the underlying sampling rate
if isinstance(t_start, int):
raise ValueError(
'Requesting event information from nev file in samples '
'does '
'not make sense. '
'Requested t_start %s' % t_start)
if isinstance(t_stop, int):
raise ValueError(
'Requesting event information from nev file in samples '
'does '
'not make sense. '
'Requested t_stop %s' % t_stop)
# ensure meaningful values for requested start and stop times
if t_start is None or t_start < (
self.parameters_nev[filename_nev]['t_start']
- self.parameters_global['t_start']):
t_start = (self.parameters_nev[filename_nev]['t_start']
- self.parameters_global['t_start'])
if t_start > (self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start']),
filename_nev))
if t_stop is None or t_stop > (
self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start']):
t_stop = (self.parameters_nev[filename_nev]['t_stop']
- self.parameters_global['t_start'])
if t_stop < (self.parameters_nev[filename_nev]['t_start']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(
self.parameters_nev[filename_nev][
't_start']
- self.parameters_global['t_start']),
filename_nev))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_nev))
data = self.__mmap_nev_file(filename_nev)
# Extracting all events for one event type and put it into an event
# array
# TODO: Check if this is the correct way of event creation.
for event_type in self.parameters_nev[filename_nev]['event_types']:
# Extract all time stamps of digital markers and rescaling time
type_mask = [i for i in range(len(data)) if
(data[i][4] == event_type['event_id']
and data[i][5] == event_type['nttl']
and data[i][10].decode('latin-1') == event_type[
'name'])]
marker_times = [t[3] for t in
data[type_mask]] * self.nev_time_unit - \
self.parameters_global['t_start']
# only consider Events in the requested time window [t_start,
# t_stop]
time_mask = [i for i in range(len(marker_times)) if (
marker_times[i] >= t_start and marker_times[i] <= t_stop)]
marker_times = marker_times[time_mask]
# Do not create an eventarray if there are no events of this type
# in the requested time range
if len(marker_times) == 0:
continue
ev = Event(times=pq.Quantity(marker_times, units=self.nev_time_unit,
dtype="int"),
labels=event_type['name'],
name="Digital Marker " + str(event_type),
file_origin=filename_nev,
marker_id=event_type['event_id'],
digital_marker=True,
analog_marker=False,
nttl=event_type['nttl'])
seg.events.append(ev)
def read_nse(self, filename_nse, seg, lazy=False, cascade=True,
t_start=None, t_stop=None, unit_list=None,
waveforms=False):
'''
Reads nse file and attaches content as spike train to provided neo
segment. Times can be provided in samples (integer values). If the
nse file does not contain a sampling rate value, the ncs sampling
rate on the same electrode is used.
Arguments:
filename_nse : Name of the .nse file to be loaded.
seg : Neo Segment, to which the Spiketrain containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
SpikeTrain. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time or sample (quantity or integer) that the
SpikeTrain begins.
Default None.
t_stop : time or sample (quantity or integer) that the SpikeTrain
ends.
Default None.
unit_list : unit ids to be loaded. If [], all units are loaded.
Default None.
waveforms : Load the waveform (up to 32 data points) for each
spike time. Default: False
Returns:
None
'''
if filename_nse[-4:] != '.nse':
filename_nse += '.nse'
if sep in filename_nse:
filename_nse = filename_nse.split(sep)[-1]
# extracting channel id of requested file
channel_id = self.get_channel_id_by_file_name(filename_nse)
if channel_id is not None:
chid = channel_id
else:
# if nse file is empty it is not listed in self.parameters_nse, but
# in self.nse_avail
if filename_nse in self.nse_avail:
warnings.warn('NeuralynxIO is attempting to read an empty '
'(not associated) nse file (%s). '
'Not loading nse file.' % (filename_nse))
return
else:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_nse))
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
# ncs sampling rate is best guess if there is no explicit sampling
# rate given for nse values.
if 'sampling_rate' in self.parameters_nse[chid]:
sr = self.parameters_nse[chid]['sampling_rate']
elif chid in self.parameters_ncs and 'sampling_rate' in \
self.parameters_ncs[chid]:
sr = self.parameters_ncs[chid]['sampling_rate']
else:
raise ValueError(
'No sampling rate present for channel id %i in nse file '
'%s. '
'Could also not find the sampling rate of the respective '
'ncs '
'file.' % (
chid, filename_nse))
if isinstance(t_start, int):
t_start = t_start / sr
if isinstance(t_stop, int):
t_stop = t_stop / sr
# + rescaling global recording start (first sample in any file type)
# This is not optimal, as there is no way to know how long the
# recording lasted after last spike
if t_start is None or t_start < (
self.parameters_nse[chid]['t_first']
- self.parameters_global[
't_start']):
t_start = (
self.parameters_nse[chid]['t_first'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_nse[chid]['t_last']
- self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nse[chid]['t_last']
- self.parameters_global['t_start']),
filename_nse))
if t_stop is None:
t_stop = (sys.maxsize) * self.nse_time_unit
if t_stop is None or t_stop > (
self.parameters_nse[chid]['t_last']
- self.parameters_global[
't_start']):
t_stop = (
self.parameters_nse[chid]['t_last'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_nse[chid]['t_first']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nse[chid]['t_first']
- self.parameters_global['t_start']),
filename_nse))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) for file %s.' % (t_start, t_stop, filename_nse))
# reading data
[timestamps, channel_ids, cell_numbers, features,
data_points] = self.__mmap_nse_packets(filename_nse)
# load all units available if unit_list==[] or None
if unit_list == [] or unit_list is None:
unit_list = np.unique(cell_numbers)
elif not any([u in cell_numbers for u in unit_list]):
self._diagnostic_print(
'None of the requested unit ids (%s) present '
'in nse file %s (contains unit_list %s)' % (
unit_list, filename_nse, np.unique(cell_numbers)))
# extracting spikes unit-wise and generate spiketrains
for unit_i in unit_list:
if not lazy:
# Extract all time stamps of that neuron on that electrode
unit_mask = np.where(cell_numbers == unit_i)[0]
spike_times = timestamps[unit_mask] * self.nse_time_unit
spike_times = spike_times - self.parameters_global['t_start']
time_mask = np.where(np.logical_and(spike_times >= t_start,
spike_times < t_stop))
spike_times = spike_times[time_mask]
else:
spike_times = pq.Quantity([], units=self.nse_time_unit)
# Create SpikeTrain object
st = SpikeTrain(times=spike_times,
t_start=t_start,
t_stop=t_stop,
sampling_rate=self.parameters_ncs[chid][
'sampling_rate'],
name="Channel %i, Unit %i" % (chid, unit_i),
file_origin=filename_nse,
unit_id=unit_i,
channel_id=chid)
if waveforms and not lazy:
# Collect all waveforms of the specific unit
# For computational reasons: no units, no time axis
st.waveforms = data_points[unit_mask][time_mask]
# TODO: Add units to waveforms (pq.uV?) and add annotation
# left_sweep = x * pq.ms indicating when threshold crossing
# occurred in waveform
st.annotations.update(self.parameters_nse[chid])
st.annotations['electrode_id'] = chid
# This annotations is necessary for automatic generation of
# recordingchannels
st.annotations['channel_index'] = chid
seg.spiketrains.append(st)
def read_ntt(self, filename_ntt, seg, lazy=False, cascade=True,
t_start=None, t_stop=None, unit_list=None,
waveforms=False):
'''
Reads ntt file and attaches content as spike train to provided neo
segment.
Arguments:
filename_ntt : Name of the .ntt file to be loaded.
seg : Neo Segment, to which the Spiketrain containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
SpikeTrain. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time (quantity) that the SpikeTrain begins. Default None.
t_stop : time (quantity) that the SpikeTrain ends. Default None.
unit_list : unit ids to be loaded. If [] or None all units are
loaded.
Default None.
waveforms : Load the waveform (up to 32 data points) for each
spike time. Default: False
Returns:
None
'''
if filename_ntt[-4:] != '.ntt':
filename_ntt += '.ntt'
if sep in filename_ntt:
filename_ntt = filename_ntt.split(sep)[-1]
# extracting channel id of requested file
channel_id = self.get_channel_id_by_file_name(filename_ntt)
if channel_id is not None:
chid = channel_id
else:
# if ntt file is empty it is not listed in self.parameters_ntt, but
# in self.ntt_avail
if filename_ntt in self.ntt_avail:
warnings.warn('NeuralynxIO is attempting to read an empty '
'(not associated) ntt file (%s). '
'Not loading ntt file.' % (filename_ntt))
return
else:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_ntt))
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
# ncs sampling rate is best guess if there is no explicit sampling
# rate given for ntt values.
if 'sampling_rate' in self.parameters_ntt[chid]:
sr = self.parameters_ntt[chid]['sampling_rate']
elif chid in self.parameters_ncs and 'sampling_rate' in \
self.parameters_ncs[chid]:
sr = self.parameters_ncs[chid]['sampling_rate']
else:
raise ValueError(
'No sampling rate present for channel id %i in ntt file '
'%s. '
'Could also not find the sampling rate of the respective '
'ncs '
'file.' % (
chid, filename_ntt))
if isinstance(t_start, int):
t_start = t_start / sr
if isinstance(t_stop, int):
t_stop = t_stop / sr
# + rescaling to global recording start (first sample in any
# recording file)
if t_start is None or t_start < (
self.parameters_ntt[chid]['t_first']
- self.parameters_global[
't_start']):
t_start = (
self.parameters_ntt[chid]['t_first'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_ntt[chid]['t_last']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ntt[chid]['t_last']
- self.parameters_global['t_start']),
filename_ntt))
if t_stop is None:
t_stop = (sys.maxsize) * self.ntt_time_unit
if t_stop is None or t_stop > (
self.parameters_ntt[chid]['t_last']
- self.parameters_global[
't_start']):
t_stop = (
self.parameters_ntt[chid]['t_last'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_ntt[chid]['t_first']
- self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ntt[chid]['t_first']
- self.parameters_global['t_start']),
filename_ntt))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_ntt))
# reading data
[timestamps, channel_ids, cell_numbers, features,
data_points] = self.__mmap_ntt_packets(filename_ntt)
# TODO: When ntt available: Implement 1 RecordingChannelGroup per
# Tetrode, such that each electrode gets its own recording channel
# load all units available if units==[]
if unit_list == [] or unit_list is None:
unit_list = np.unique(cell_numbers)
elif not any([u in cell_numbers for u in unit_list]):
self._diagnostic_print(
'None of the requested unit ids (%s) present '
'in ntt file %s (contains units %s)' % (
unit_list, filename_ntt, np.unique(cell_numbers)))
# loading data for each unit and generating spiketrain
for unit_i in unit_list:
if not lazy:
# Extract all time stamps of that neuron on that electrode
mask = np.where(cell_numbers == unit_i)[0]
spike_times = timestamps[mask] * self.ntt_time_unit
spike_times = spike_times - self.parameters_global['t_start']
spike_times = spike_times[np.where(
np.logical_and(spike_times >= t_start,
spike_times < t_stop))]
else:
spike_times = pq.Quantity([], units=self.ntt_time_unit)
# Create SpikeTrain object
st = SpikeTrain(times=spike_times,
t_start=t_start,
t_stop=t_stop,
sampling_rate=self.parameters_ncs[chid][
'sampling_rate'],
name="Channel %i, Unit %i" % (chid, unit_i),
file_origin=filename_ntt,
unit_id=unit_i,
channel_id=chid)
# Collect all waveforms of the specific unit
if waveforms and not lazy:
# For computational reasons: no units, no time axis
# transposing to adhere to neo guidline, which states that
# time should be in the first axis.
# This is stupid and not intuitive.
st.waveforms = np.array(
[data_points[t, :, :] for t in range(len(timestamps))
if cell_numbers[t] == unit_i]).transpose()
# TODO: Add units to waveforms (pq.uV?) and add annotation
# left_sweep = x * pq.ms indicating when threshold crossing
# occurred in waveform
st.annotations = self.parameters_ntt[chid]
st.annotations['electrode_id'] = chid
# This annotations is necessary for automatic generation of
# recordingchannels
st.annotations['channel_index'] = chid
seg.spiketrains.append(st)
# private routines
# #################################################
def _associate(self, cachedir=None, usecache='hash'):
"""
Associates the object with a specified Neuralynx session, i.e., a
combination of a .nse, .nev and .ncs files. The meta data is read
into the
object for future reference.
Arguments:
cachedir : Directory for loading and saving hashes of recording
sessions
and pickled meta information about files
extracted during
association process
use_cache: method used for cache identification. Possible values:
'hash'/
'always'/'datesize'/'never'. Default 'hash'
Returns:
-
"""
# If already associated, disassociate first
if self.associated:
raise OSError(
"Trying to associate an already associated NeuralynxIO "
"object.")
# Create parameter containers
# Dictionary that holds different parameters read from the .nev file
self.parameters_nse = {}
# List of parameter dictionaries for all potential file types
self.parameters_ncs = {}
self.parameters_nev = {}
self.parameters_ntt = {}
# combined global parameters
self.parameters_global = {}
# Scanning session directory for recorded files
self.sessionfiles = [f for f in listdir(self.sessiondir) if
isfile(os.path.join(self.sessiondir, f))]
# Listing available files
self.ncs_avail = []
self.nse_avail = []
self.nev_avail = []
self.ntt_avail = []
# Listing associated (=non corrupted, non empty files)
self.ncs_asso = []
self.nse_asso = []
self.nev_asso = []
self.ntt_asso = []
if usecache not in ['hash', 'always', 'datesize', 'never']:
raise ValueError(
"Argument value of usecache '%s' is not valid. Accepted "
"values are 'hash','always','datesize','never'" % usecache)
if cachedir is None and usecache != 'never':
raise ValueError('No cache directory provided.')
# check if there are any changes of the data files -> new data check run
check_files = True if usecache != 'always' else False # never
# checking files if usecache=='always'
if cachedir is not None and usecache != 'never':
self._diagnostic_print(
'Calculating %s of session files to check for cached '
'parameter files.' % usecache)
cachefile = cachedir + sep + self.sessiondir.split(sep)[
-1] + '/hashkeys'
if not os.path.exists(cachedir + sep + self.sessiondir.split(sep)[-1]):
os.makedirs(cachedir + sep + self.sessiondir.split(sep)[-1])
if usecache == 'hash':
hashes_calc = {}
# calculates hash of all available files
for f in self.sessionfiles:
file_hash = self.hashfile(open(self.sessiondir + sep + f,
'rb'), hashlib.sha256())
hashes_calc[f] = file_hash
elif usecache == 'datesize':
hashes_calc = {}
for f in self.sessionfiles:
hashes_calc[f] = self.datesizefile(
self.sessiondir + sep + f)
# load hashes saved for this session in an earlier loading run
if os.path.exists(cachefile):
hashes_read = pickle.load(open(cachefile, 'rb'))
else:
hashes_read = {}
# compare hashes to previously saved meta data und load meta data
# if no changes occured
if usecache == 'always' or all([f in hashes_calc
and f in hashes_read
and hashes_calc[f] == hashes_read[f]
for f in self.sessionfiles]):
check_files = False
self._diagnostic_print(
'Using cached metadata from earlier analysis run in '
'file '
'%s. Skipping file checks.' % cachefile)
# loading saved parameters
parameterfile = cachedir + sep + self.sessiondir.split(sep)[
-1] + '/parameters.cache'
if os.path.exists(parameterfile):
parameters_read = pickle.load(open(parameterfile, 'rb'))
else:
raise OSError('Inconsistent cache files.')
for IOdict, dictname in [(self.parameters_global, 'global'),
(self.parameters_ncs, 'ncs'),
(self.parameters_nse, 'nse'),
(self.parameters_nev, 'nev'),
(self.parameters_ntt, 'ntt')]:
IOdict.update(parameters_read[dictname])
self.nev_asso = self.parameters_nev.keys()
self.ncs_asso = [val['filename'] for val in
self.parameters_ncs.values()]
self.nse_asso = [val['filename'] for val in
self.parameters_nse.values()]
self.ntt_asso = [val['filename'] for val in
self.parameters_ntt.values()]
for filename in self.sessionfiles:
# Extracting only continuous signal files (.ncs)
if filename[-4:] == '.ncs':
self.ncs_avail.append(filename)
elif filename[-4:] == '.nse':
self.nse_avail.append(filename)
elif filename[-4:] == '.nev':
self.nev_avail.append(filename)
elif filename[-4:] == '.ntt':
self.ntt_avail.append(filename)
else:
self._diagnostic_print(
'Ignoring file of unknown data type %s' % filename)
if check_files:
self._diagnostic_print('Starting individual file checks.')
# =======================================================================
# # Scan NCS files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .ncs file(s).' % (len(self.ncs_avail)))
for ncs_file in self.ncs_avail:
# Loading individual NCS file and extracting parameters
self._diagnostic_print("Scanning " + ncs_file + ".")
# Reading file packet headers
filehandle = self.__mmap_ncs_packet_headers(ncs_file)
if filehandle is None:
continue
try:
# Checking consistency of ncs file
self.__ncs_packet_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % ncs_file)
continue
# Reading data packet header information and store them in
# parameters_ncs
self.__read_ncs_data_headers(filehandle, ncs_file)
# Reading txt file header
channel_id = self.get_channel_id_by_file_name(ncs_file)
self.__read_text_header(ncs_file,
self.parameters_ncs[channel_id])
# Check for invalid starting times of data packets in ncs file
self.__ncs_invalid_first_sample_check(filehandle)
# Check ncs file for gaps
self.__ncs_gap_check(filehandle)
self.ncs_asso.append(ncs_file)
# =======================================================================
# # Scan NSE files
# =======================================================================
# Loading individual NSE file and extracting parameters
self._diagnostic_print(
'\nDetected %i .nse file(s).' % (len(self.nse_avail)))
for nse_file in self.nse_avail:
# Loading individual NSE file and extracting parameters
self._diagnostic_print('Scanning ' + nse_file + '.')
# Reading file
filehandle = self.__mmap_nse_packets(nse_file)
if filehandle is None:
continue
try:
# Checking consistency of nse file
self.__nse_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % nse_file)
continue
# Reading header information and store them in parameters_nse
self.__read_nse_data_header(filehandle, nse_file)
# Reading txt file header
channel_id = self.get_channel_id_by_file_name(nse_file)
self.__read_text_header(nse_file,
self.parameters_nse[channel_id])
# using sampling rate from txt header, as this is not saved
# in data packets
if 'SamplingFrequency' in self.parameters_nse[channel_id]:
self.parameters_nse[channel_id]['sampling_rate'] = \
(self.parameters_nse[channel_id][
'SamplingFrequency'] * self.nse_sr_unit)
self.nse_asso.append(nse_file)
# =======================================================================
# # Scan NEV files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .nev file(s).' % (len(self.nev_avail)))
for nev_file in self.nev_avail:
# Loading individual NEV file and extracting parameters
self._diagnostic_print('Scanning ' + nev_file + '.')
# Reading file
filehandle = self.__mmap_nev_file(nev_file)
if filehandle is None:
continue
try:
# Checking consistency of nev file
self.__nev_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % nev_file)
continue
# Reading header information and store them in parameters_nev
self.__read_nev_data_header(filehandle, nev_file)
# Reading txt file header
self.__read_text_header(nev_file, self.parameters_nev[nev_file])
self.nev_asso.append(nev_file)
# =======================================================================
# # Scan NTT files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .ntt file(s).' % (len(self.ntt_avail)))
for ntt_file in self.ntt_avail:
# Loading individual NTT file and extracting parameters
self._diagnostic_print('Scanning ' + ntt_file + '.')
# Reading file
filehandle = self.__mmap_ntt_file(ntt_file)
if filehandle is None:
continue
try:
# Checking consistency of nev file
self.__ntt_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % ntt_file)
continue
# Reading header information and store them in parameters_nev
self.__read_ntt_data_header(filehandle, ntt_file)
# Reading txt file header
self.__read_ntt_text_header(ntt_file)
# using sampling rate from txt header, as this is not saved
# in data packets
if 'SamplingFrequency' in self.parameters_ntt[channel_id]:
self.parameters_ntt[channel_id]['sampling_rate'] = \
(self.parameters_ntt[channel_id][
'SamplingFrequency'] * self.ntt_sr_unit)
self.ntt_asso.append(ntt_file)
# =======================================================================
# # Check consistency across files
# =======================================================================
# check RECORDING_OPENED / CLOSED times (from txt header) for
# different files
for parameter_collection in [self.parameters_ncs,
self.parameters_nse,
self.parameters_nev,
self.parameters_ntt]:
# check recoding_closed times for specific file types
if any(np.abs(np.diff([i['recording_opened'] for i in
parameter_collection.values()]))
> datetime.timedelta(seconds=1)):
raise ValueError(
'NCS files were opened for recording with a delay '
'greater than 0.1 second.')
# check recoding_closed times for specific file types
if any(np.diff([i['recording_closed'] for i in
parameter_collection.values()
if i['recording_closed'] is not None])
> datetime.timedelta(seconds=0.1)):
raise ValueError(
'NCS files were closed after recording with a '
'delay '
'greater than 0.1 second.')
# get maximal duration of any file in the recording
parameter_collection = list(self.parameters_ncs.values()) + \
list(self.parameters_nse.values()) + \
list(self.parameters_ntt.values()) + \
list(self.parameters_nev.values())
self.parameters_global['recording_opened'] = min(
[i['recording_opened'] for i in parameter_collection])
self.parameters_global['recording_closed'] = max(
[i['recording_closed'] for i in parameter_collection])
# Set up GLOBAL TIMING SCHEME
# #############################
for file_type, parameter_collection in [
('ncs', self.parameters_ncs), ('nse', self.parameters_nse),
('nev', self.parameters_nev), ('ntt', self.parameters_ntt)]:
# check starting times
name_t1, name_t2 = ['t_start', 't_stop'] if (
file_type != 'nse' and file_type != 'ntt') \
else ['t_first', 't_last']
# checking if files of same type start at same time point
if file_type != 'nse' and file_type != 'ntt' \
and len(np.unique(np.array(
[i[name_t1].magnitude for i in
parameter_collection.values()]))) > 1:
raise ValueError(
'%s files do not start at same time point.' %
file_type)
# saving t_start and t_stop for each file type available
if len([i[name_t1] for i in parameter_collection.values()]):
self.parameters_global['%s_t_start' % file_type] = min(
[i[name_t1]
for i in parameter_collection.values()])
self.parameters_global['%s_t_stop' % file_type] = min(
[i[name_t2]
for i in parameter_collection.values()])
# extracting minimial t_start and maximal t_stop value for this
# recording session
self.parameters_global['t_start'] = min(
[self.parameters_global['%s_t_start' % t]
for t in ['ncs', 'nev', 'nse', 'ntt']
if '%s_t_start' % t in self.parameters_global])
self.parameters_global['t_stop'] = max(
[self.parameters_global['%s_t_stop' % t]
for t in ['ncs', 'nev', 'nse', 'ntt']
if '%s_t_start' % t in self.parameters_global])
# checking gap consistency across ncs files
# check number of gaps detected
if len(np.unique([len(i['gaps']) for i in
self.parameters_ncs.values()])) != 1:
raise ValueError('NCS files contain different numbers of gaps!')
# check consistency of gaps across files and create global gap
# collection
self.parameters_global['gaps'] = []
for g in range(len(list(self.parameters_ncs.values())[0]['gaps'])):
integrated = False
gap_stats = np.unique(
[i['gaps'][g] for i in self.parameters_ncs.values()],
return_counts=True)
if len(gap_stats[0]) != 3 or len(np.unique(gap_stats[1])) != 1:
raise ValueError(
'Gap number %i is not consistent across NCS '
'files.' % (
g))
else:
# check if this is second part of already existing gap
for gg in range(len(self.parameters_global['gaps'])):
globalgap = self.parameters_global['gaps'][gg]
# check if stop time of first is start time of second
# -> continuous gap
if globalgap[2] == \
list(self.parameters_ncs.values())[0]['gaps'][
g][1]:
self.parameters_global['gaps'][gg] = \
self.parameters_global['gaps'][gg][:2] + (
list(self.parameters_ncs.values())[0][
'gaps'][g][
2],)
integrated = True
break
if not integrated:
# add as new gap if this is not a continuation of
# existing global gap
self.parameters_global['gaps'].append(
list(self.parameters_ncs.values())[0][
'gaps'][g])
# save results of association for future analysis together with hash
# values for change tracking
if cachedir is not None and usecache != 'never':
pickle.dump({'global': self.parameters_global,
'ncs': self.parameters_ncs,
'nev': self.parameters_nev,
'nse': self.parameters_nse,
'ntt': self.parameters_ntt},
open(cachedir + sep + self.sessiondir.split(sep)[
-1] + '/parameters.cache', 'wb'))
if usecache != 'always':
pickle.dump(hashes_calc, open(
cachedir + sep + self.sessiondir.split(sep)[
-1] + '/hashkeys', 'wb'))
self.associated = True
# private routines
# #########################################################�
# Memory Mapping Methods
def __mmap_nse_packets(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u2',
shape=((filesize - 16384) // 2 // 56, 56),
mode='r', offset=16384)
# reconstructing original data
# first 4 ints -> timestamp in microsec
timestamps = data[:, 0] \
+ data[:, 1] * 2 ** 16 \
+ data[:, 2] * 2 ** 32 \
+ data[:, 3] * 2 ** 48
channel_id = data[:, 4] + data[:, 5] * 2 ** 16
cell_number = data[:, 6] + data[:, 7] * 2 ** 16
features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
range(8, 23, 2)]
features = np.array(features, dtype='i4')
data_points = data[:, 24:56].astype('i2')
del data
return timestamps, channel_id, cell_number, features, data_points
else:
return None
def __mmap_ncs_data(self, filename):
""" Memory map of the Neuralynx .ncs file optimized for data
extraction"""
if getsize(self.sessiondir + sep + filename) > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype=np.dtype(('i2', (522))), mode='r',
offset=16384)
# removing data packet headers and flattening data
return data[:, 10:]
else:
return None
def __mmap_ncs_packet_headers(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=((filesize - 16384) // 4 // 261, 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + (data[:,1] *2**32)
header_u4 = data[:, 2:5]
return timestamps, header_u4
else:
return None
def __mmap_ncs_packet_timestamps(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=(int((filesize - 16384) / 4 / 261), 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + data[:,1]*2**32
return timestamps
else:
return None
def __mmap_nev_file(self, filename):
""" Memory map the Neuralynx .nev file """
nev_dtype = np.dtype([
('reserved', '<i2'),
('system_id', '<i2'),
('data_size', '<i2'),
('timestamp', '<u8'),
('event_id', '<i2'),
('ttl_input', '<i2'),
('crc_check', '<i2'),
('dummy1', '<i2'),
('dummy2', '<i2'),
('extra', '<i4', (8,)),
('event_string', 'a128'),
])
if getsize(self.sessiondir + sep + filename) > 16384:
return np.memmap(self.sessiondir + sep + filename,
dtype=nev_dtype, mode='r', offset=16384)
else:
return None
def __mmap_ntt_file(self, filename):
""" Memory map the Neuralynx .nse file """
nse_dtype = np.dtype([
('timestamp', '<u8'),
('sc_number', '<u4'),
('cell_number', '<u4'),
('params', '<u4', (8,)),
('data', '<i2', (32, 4)),
])
if getsize(self.sessiondir + sep + filename) > 16384:
return np.memmap(self.sessiondir + sep + filename,
dtype=nse_dtype, mode='r', offset=16384)
else:
return None
def __mmap_ntt_packets(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u2',
shape=((filesize - 16384) / 2 / 152, 152),
mode='r', offset=16384)
# reconstructing original data
# first 4 ints -> timestamp in microsec
timestamps = data[:, 0] + data[:, 1] * 2 ** 16 + \
data[:, 2] * 2 ** 32 + data[:, 3] * 2 ** 48
channel_id = data[:, 4] + data[:, 5] * 2 ** 16
cell_number = data[:, 6] + data[:, 7] * 2 ** 16
features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
range(8, 23, 2)]
features = np.array(features, dtype='i4')
data_points = data[:, 24:152].astype('i2').reshape((4, 32))
del data
return timestamps, channel_id, cell_number, features, data_points
else:
return None
# ___________________________ header extraction __________________________
def __read_text_header(self, filename, parameter_dict):
# Reading main file header (plain text, 16kB)
text_header = codecs.open(self.sessiondir + sep + filename, 'r',
'latin-1').read(16384)
parameter_dict['cheetah_version'] = \
self.__get_cheetah_version_from_txt_header(text_header, filename)
parameter_dict.update(self.__get_filename_and_times_from_txt_header(
text_header, parameter_dict['cheetah_version']))
# separating lines of header and ignoring last line (fill), check if
# Linux or Windows OS
if sep == '/':
text_header = text_header.split('\r\n')[:-1]
if sep == '\\':
text_header = text_header.split('\n')[:-1]
# minor parameters possibly saved in header (for any file type)
minor_keys = ['AcqEntName',
'FileType',
'FileVersion',
'RecordSize',
'HardwareSubSystemName',
'HardwareSubSystemType',
'SamplingFrequency',
'ADMaxValue',
'ADBitVolts',
'NumADChannels',
'ADChannel',
'InputRange',
'InputInverted',
'DSPLowCutFilterEnabled',
'DspLowCutFrequency',
'DspLowCutNumTaps',
'DspLowCutFilterType',
'DSPHighCutFilterEnabled',
'DspHighCutFrequency',
'DspHighCutNumTaps',
'DspHighCutFilterType',
'DspDelayCompensation',
'DspFilterDelay_\xb5s',
'DisabledSubChannels',
'WaveformLength',
'AlignmentPt',
'ThreshVal',
'MinRetriggerSamples',
'SpikeRetriggerTime',
'DualThresholding',
'Feature Peak 0',
'Feature Valley 1',
'Feature Energy 2',
'Feature Height 3',
'Feature NthSample 4',
'Feature NthSample 5',
'Feature NthSample 6',
'Feature NthSample 7',
'SessionUUID',
'FileUUID',
'CheetahRev',
'ProbeName',
'OriginalFileName',
'TimeCreated',
'TimeClosed',
'ApplicationName',
'AcquisitionSystem',
'ReferenceChannel']
# extracting minor key values of header (only taking into account
# non-empty lines)
for i, minor_entry in enumerate(text_header):
if minor_entry == '' or minor_entry[0] == '#':
continue
matching_key = [key for key in minor_keys if
minor_entry.strip('-').startswith(key)]
if len(matching_key) == 1:
matching_key = matching_key[0]
minor_value = minor_entry.split(matching_key)[1].strip(
' ').rstrip(' ')
# determine data type of entry
if minor_value.isdigit():
# converting to int if possible
minor_value = int(minor_value)
else:
# converting to float if possible
try:
minor_value = float(minor_value)
except:
pass
if matching_key in parameter_dict:
warnings.warn(
'Multiple entries for {} in text header of {}'.format(
matching_key, filename))
else:
parameter_dict[matching_key] = minor_value
elif len(matching_key) > 1:
raise ValueError(
'Inconsistent minor key list for text header '
'interpretation.')
else:
warnings.warn(
'Skipping text header entry %s, because it is not in '
'minor key list' % minor_entry)
self._diagnostic_print(
'Successfully decoded text header of file (%s).' % filename)
def __get_cheetah_version_from_txt_header(self, text_header, filename):
version_regex = re.compile(r'((-CheetahRev )|'
r'(ApplicationName Cheetah "))'
r'(?P<version>\d{1,3}\.\d{1,3}\.\d{1,3})')
match = version_regex.search(text_header)
if match:
return match.groupdict()['version']
else:
raise ValueError('Can not extract Cheetah version from file '
'header of file %s' % filename)
def __get_filename_and_times_from_txt_header(self, text_header, version):
if parse_version(version) <= parse_version('5.6.4'):
datetime1_regex = re.compile(r'## Time Opened \(m/d/y\): '
r'(?P<date>\S+)'
r' \(h:m:s\.ms\) '
r'(?P<time>\S+)')
datetime2_regex = re.compile(r'## Time Closed \(m/d/y\): '
r'(?P<date>\S+)'
r' \(h:m:s\.ms\) '
r'(?P<time>\S+)')
filename_regex = re.compile(r'## File Name (?P<filename>\S+)')
datetimeformat = '%m/%d/%Y %H:%M:%S.%f'
else:
datetime1_regex = re.compile(r'-TimeCreated '
r'(?P<date>\S+) '
r'(?P<time>\S+)')
datetime2_regex = re.compile(r'-TimeClosed '
r'(?P<date>\S+) '
r'(?P<time>\S+)')
filename_regex = re.compile(r'-OriginalFileName '
r'"?(?P<filename>\S+)"?')
datetimeformat = '%Y/%m/%d %H:%M:%S'
matchtime1 = datetime1_regex.search(text_header).groupdict()
matchtime2 = datetime2_regex.search(text_header).groupdict()
matchfilename = filename_regex.search(text_header)
filename = matchfilename.groupdict()['filename']
if '## Time Closed File was not closed properly' in text_header:
warnings.warn('Text header of file %s does not contain recording '
'closed time. File was not closed properly.'
'' % filename)
datetime1 = datetime.datetime.strptime(matchtime1['date'] + ' '
+ matchtime1['time'],
datetimeformat)
datetime2 = datetime.datetime.strptime(matchtime2['date'] + ' '
+ matchtime2['time'],
datetimeformat)
output = {'recording_opened': datetime1,
'recording_closed': datetime2,
'file_created': datetime1,
'file_closed': datetime2,
'recording_file_name': filename}
return output
def __read_ncs_data_headers(self, filehandle, filename):
'''
Reads the .ncs data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .ncs file.
filename (string):
Name of the ncs file.
Returns:
dict of extracted data
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0][0]
sr = header_u4[0][1] # in Hz
t_start = timestamps[0] # in microseconds
# calculating corresponding time stamp of first sample, that was not
# recorded any more
# t_stop= time of first sample in last packet +(#samples per packet *
# conversion factor / sampling rate)
# conversion factor is needed as times are recorded in ms
t_stop = timestamps[-1] + (
(header_u4[-1][2]) * (
1 / self.ncs_time_unit.rescale(pq.s)).magnitude
/ header_u4[-1][1])
if channel_id in self.parameters_ncs:
raise ValueError(
'Detected multiple ncs files for channel_id %i.'
% channel_id)
else:
sampling_unit = [pq.CompoundUnit('%f*%s'
'' % (sr,
self.ncs_sr_unit.symbol))]
sampling_rate = sr * self.ncs_sr_unit
self.parameters_ncs[channel_id] = {'filename': filename,
't_start': t_start
* self.ncs_time_unit,
't_stop': t_stop
* self.ncs_time_unit,
'sampling_rate': sampling_rate,
'sampling_unit': sampling_unit,
'gaps': []}
return {channel_id: self.parameters_ncs[channel_id]}
def __read_nse_data_header(self, filehandle, filename):
'''
Reads the .nse data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .nse file.
filename (string):
Name of the nse file.
Returns:
-
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
if filehandle is not None:
t_first = timestamps[0] # in microseconds
t_last = timestamps[-1] # in microseconds
channel_id = channel_ids[0]
cell_count = cell_numbers[0] # number of cells identified
self.parameters_nse[channel_id] = {'filename': filename,
't_first': t_first
* self.nse_time_unit,
't_last': t_last
* self.nse_time_unit,
'cell_count': cell_count}
def __read_ntt_data_header(self, filehandle, filename):
'''
Reads the .nse data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .nse file.
filename (string):
Name of the nse file.
Returns:
-
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
if filehandle is not None:
t_first = timestamps[0] # in microseconds
t_last = timestamps[-1] # in microseconds
channel_id = channel_ids[0]
cell_count = cell_numbers[0] # number of cells identified
# spike_parameters = filehandle[0][3]
# else:
# t_first = None
# channel_id = None
# cell_count = 0
# # spike_parameters = None
#
# self._diagnostic_print('Empty file: No information
# contained in %s'%filename)
self.parameters_ntt[channel_id] = {'filename': filename,
't_first': t_first
* self.ntt_time_unit,
't_last': t_last
* self.nse_time_unit,
'cell_count': cell_count}
def __read_nev_data_header(self, filehandle, filename):
'''
Reads the .nev data block headers and stores the relevant information
in the
object's parameters_nev dictionary.
Args:
filehandle (file object):
Handle to the already opened .nev file.
filename (string):
Name of the nev file.
Returns:
-
'''
# Extracting basic recording events to be able to check recording
# consistency
if filename in self.parameters_nev:
raise ValueError(
'Detected multiple nev files of name %s.' % (filename))
else:
self.parameters_nev[filename] = {}
if 'Starting_Recording' in self.parameters_nev[filename]:
raise ValueError('Trying to read second nev file of name %s. '
' Only one can be handled.' % filename)
self.parameters_nev[filename]['Starting_Recording'] = []
self.parameters_nev[filename]['events'] = []
for event in filehandle:
# separately extracting 'Starting Recording'
if ((event[4] in [11, 19])
and (event[10].decode('latin-1') == 'Starting Recording')):
self.parameters_nev[filename]['Starting_Recording'].append(
event[3] * self.nev_time_unit)
# adding all events to parameter collection
self.parameters_nev[filename]['events'].append(
{'timestamp': event[3] * self.nev_time_unit,
'event_id': event[4],
'nttl': event[5],
'name': event[10].decode('latin-1')})
if len(self.parameters_nev[filename]['Starting_Recording']) < 1:
raise ValueError(
'No Event "Starting_Recording" detected in %s' % (
filename))
self.parameters_nev[filename]['t_start'] = min(
self.parameters_nev[filename]['Starting_Recording'])
# t_stop = time stamp of last event in file
self.parameters_nev[filename]['t_stop'] = max(
[e['timestamp'] for e in
self.parameters_nev[filename]['events']])
# extract all occurring event types (= combination of nttl,
# event_id and name/string)
event_types = copy.deepcopy(self.parameters_nev[filename]['events'])
for d in event_types:
d.pop('timestamp')
self.parameters_nev[filename]['event_types'] = [dict(y) for y in
{tuple(
x.items())
for x in
event_types}]
# ________________ File Checks __________________________________
def __ncs_packet_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails. Detected recording gaps are added to parameter_ncs
Args:
filehandle (file object):
Handle to the already opened .ncs file.
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
# checking sampling rate of data packets
sr0 = header_u4[0, 1]
assert all(header_u4[:, 1] == sr0)
# checking channel id of data packets
channel_id = header_u4[0, 0]
assert all(header_u4[:, 0] == channel_id)
# time offset of data packets
# TODO: Check if there is a safer way to do the delta_t check for ncs
# data packets
# this is a not safe assumption, that the first two data packets have
# correct time stamps
delta_t = timestamps[1] - timestamps[0]
# valid samples of first data packet
temp_valid_samples = header_u4[0, 2]
# unit test
# time difference between packets corresponds to number of recorded
# samples
assert delta_t == (
temp_valid_samples / (
self.ncs_time_unit.rescale(pq.s).magnitude * sr0))
self._diagnostic_print('NCS packet check successful.')
def __nse_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nse file.
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
assert all(channel_ids == channel_ids[0])
assert all([len(dp) == len(data_points[0]) for dp in data_points])
self._diagnostic_print('NSE file check successful.')
def __nev_check(self, filehandle):
'''
Checks consistency of data in nev file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nev file.
'''
# this entry should always equal 2 (see Neuralynx File Description),
# but it is not. For me, this is 0.
assert all([f[2] == 2 or f[2] == 0 for f in filehandle])
# TODO: check with more nev files, if index 0,1,2,6,7,8 and 9 can be
# non-zero. Interpretation? Include in event extraction.
# only observed 0 for index 0,1,2,6,7,8,9 in nev files.
# If they are non-zero, this needs to be included in event extraction
assert all([f[0] == 0 for f in filehandle])
assert all([f[1] == 0 for f in filehandle])
assert all([f[2] in [0, 2] for f in filehandle])
assert all([f[6] == 0 for f in filehandle])
assert all([f[7] == 0 for f in filehandle])
assert all([f[8] == 0 for f in filehandle])
assert all([all(f[9] == 0) for f in filehandle])
self._diagnostic_print('NEV file check successful.')
def __ntt_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nse file.
'''
# TODO: check this when first .ntt files are available
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
assert all(channel_ids == channel_ids[0])
assert all([len(dp) == len(data_points[0]) for dp in data_points])
self._diagnostic_print('NTT file check successful.')
def __ncs_gap_check(self, filehandle):
'''
Checks individual data blocks of ncs files for consistent starting
times with respect to sample count.
This covers intended recording gaps as well as shortened data packet,
which are incomplete
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0, 0]
if channel_id not in self.parameters_ncs:
self.parameters_ncs[channel_id] = {}
# time stamps of data packets
delta_t = timestamps[1] - timestamps[0] # in microsec
data_packet_offsets = np.diff(timestamps) # in microsec
# check if delta_t corresponds to number of valid samples present in
# data packets
# NOTE: This also detects recording gaps!
valid_samples = header_u4[:-1, 2]
sampling_rate = header_u4[0, 1]
packet_checks = (valid_samples / (self.ncs_time_unit.rescale(
pq.s).magnitude * sampling_rate)) == data_packet_offsets
if not all(packet_checks):
if 'broken_packets' not in self.parameters_ncs[channel_id]:
self.parameters_ncs[channel_id]['broken_packets'] = []
broken_packets = np.where(np.array(packet_checks) is False)[0]
for broken_packet in broken_packets:
self.parameters_ncs[channel_id]['broken_packets'].append(
(broken_packet,
valid_samples[broken_packet],
data_packet_offsets[broken_packet]))
self._diagnostic_print('Detected broken packet in NCS file at '
'packet id %i (sample number %i '
'time offset id %i)'
'' % (broken_packet,
valid_samples[broken_packet],
data_packet_offsets[broken_packet])
) # in microsec
# checking for irregular data packet durations -> gaps / shortened
# data packets
if not all(data_packet_offsets == delta_t):
if 'gaps' not in self.parameters_ncs[channel_id]:
self.parameters_ncs[channel_id]['gaps'] = []
# gap identification by (sample of gap start, duration)
# gap packets
gap_packet_ids = np.where(data_packet_offsets != delta_t)[0]
for gap_packet_id in gap_packet_ids:
# skip if this packet starting time is known to be corrupted
# hoping no corruption and gap occurs simultaneously
# corrupted time stamp affects two delta_t comparisons:
if gap_packet_id in self.parameters_ncs[channel_id][
'invalid_first_samples'] \
or gap_packet_id + 1 in self.parameters_ncs[channel_id][
'invalid_first_samples']:
continue
gap_start = timestamps[
gap_packet_id] # t_start of last packet [microsec]
gap_stop = timestamps[
gap_packet_id + 1] # t_stop of first packet [microsec]
self.parameters_ncs[channel_id]['gaps'].append((gap_packet_id,
gap_start,
gap_stop)) #
# [,microsec,microsec]
self._diagnostic_print('Detected gap in NCS file between'
'sample time %i and %i (last correct '
'packet id %i)' % (gap_start, gap_stop,
gap_packet_id))
def __ncs_invalid_first_sample_check(self, filehandle):
'''
Checks data blocks of ncs files for corrupted starting times indicating
a missing first sample in the data packet. These are then excluded from
the gap check, but ignored for further analysis.
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0, 0]
self.parameters_ncs[channel_id]['invalid_first_samples'] = []
# checking if first bit of timestamp is 1, which indicates error
invalid_packet_ids = np.where(timestamps >= 2 ** 55)[0]
if len(invalid_packet_ids) > 0:
warnings.warn('Invalid first sample(s) detected in ncs file'
'(packet id(s) %i)! This error is ignored in'
'subsequent routines.' % (invalid_packet_ids))
self.parameters_ncs[channel_id][
'invalid_first_samples'] = invalid_packet_ids
# checking consistency of data around corrupted packet time
for invalid_packet_id in invalid_packet_ids:
if invalid_packet_id < 2 or invalid_packet_id > len(
filehandle) - 2:
raise ValueError(
'Corrupted ncs data packet at the beginning'
'or end of file.')
elif (timestamps[invalid_packet_id + 1] - timestamps[
invalid_packet_id - 1] != 2 * (
timestamps[invalid_packet_id - 1] - timestamps[
invalid_packet_id - 2])):
raise ValueError('Starting times of ncs data packets around'
'corrupted data packet are not '
'consistent!')
# Supplementory Functions
def get_channel_id_by_file_name(self, filename):
"""
Checking parameters of NCS, NSE and NTT Files for given filename and
return channel_id if result is consistent
:param filename:
:return:
"""
channel_ids = []
channel_ids += [k for k in self.parameters_ncs if
self.parameters_ncs[k]['filename'] == filename]
channel_ids += [k for k in self.parameters_nse if
self.parameters_nse[k]['filename'] == filename]
channel_ids += [k for k in self.parameters_ntt if
self.parameters_ntt[k]['filename'] == filename]
if len(np.unique(np.asarray(channel_ids))) == 1:
return channel_ids[0]
elif len(channel_ids) > 1:
raise ValueError(
'Ambiguous channel ids detected. Filename %s is associated'
' to different channels of NCS and NSE and NTT %s'
'' % (filename, channel_ids))
else: # if filename was not detected
return None
def hashfile(self, afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.digest()
def datesizefile(self, filename):
return str(os.path.getmtime(filename)) + '_' + str(
os.path.getsize(filename))
def _diagnostic_print(self, text):
'''
Print a diagnostic message.
Args:
text (string):
Diagnostic text to print.
Returns:
-
'''
if self._print_diagnostic:
print('NeuralynxIO: ' + text)
| bsd-3-clause | 3,845,068,956,470,738,400 | 42.778378 | 97 | 0.497811 | false |
captainpete/rethinkdb | external/v8_3.30.33.16/build/landmines.py | 49 | 4879 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs every build as a hook. If it detects that the build should
be clobbered, it will touch the file <build_dir>/.landmine_triggered. The
various build scripts will then check for the presence of this file and clobber
accordingly. The script will also emit the reasons for the clobber to stdout.
A landmine is tripped when a builder checks out a different revision, and the
diff between the new landmines and the old ones is non-null. At this point, the
build is clobbered.
"""
import difflib
import logging
import optparse
import os
import sys
import subprocess
import time
import landmine_utils
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_target_build_dir(build_tool, target):
"""
Returns output directory absolute path dependent on build and targets.
Examples:
r'c:\b\build\slave\win\build\src\out\Release'
'/mnt/data/b/build/slave/linux/build/src/out/Debug'
'/b/build/slave/ios_rel_device/build/src/xcodebuild/Release-iphoneos'
Keep this function in sync with tools/build/scripts/slave/compile.py
"""
ret = None
if build_tool == 'xcode':
ret = os.path.join(SRC_DIR, 'xcodebuild', target)
elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
ret = os.path.join(SRC_DIR, 'out', target)
elif build_tool in ['msvs', 'vs', 'ib']:
ret = os.path.join(SRC_DIR, 'build', target)
else:
raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
return os.path.abspath(ret)
def set_up_landmines(target, new_landmines):
"""Does the work of setting, planting, and triggering landmines."""
out_dir = get_target_build_dir(landmine_utils.builder(), target)
landmines_path = os.path.join(out_dir, '.landmines')
if not os.path.exists(out_dir):
return
if not os.path.exists(landmines_path):
print "Landmines tracker didn't exists."
# FIXME(machenbach): Clobber deletes the .landmines tracker. Difficult
# to know if we are right after a clobber or if it is first-time landmines
# deployment. Also, a landmine-triggered clobber right after a clobber is
# not possible. Different clobber methods for msvs, xcode and make all
# have different blacklists of files that are not deleted.
if os.path.exists(landmines_path):
triggered = os.path.join(out_dir, '.landmines_triggered')
with open(landmines_path, 'r') as f:
old_landmines = f.readlines()
if old_landmines != new_landmines:
old_date = time.ctime(os.stat(landmines_path).st_ctime)
diff = difflib.unified_diff(old_landmines, new_landmines,
fromfile='old_landmines', tofile='new_landmines',
fromfiledate=old_date, tofiledate=time.ctime(), n=0)
with open(triggered, 'w') as f:
f.writelines(diff)
print "Setting landmine: %s" % triggered
elif os.path.exists(triggered):
# Remove false triggered landmines.
os.remove(triggered)
print "Removing landmine: %s" % triggered
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
def process_options():
"""Returns a list of landmine emitting scripts."""
parser = optparse.OptionParser()
parser.add_option(
'-s', '--landmine-scripts', action='append',
default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')],
help='Path to the script which emits landmines to stdout. The target '
'is passed to this script via option -t. Note that an extra '
'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
parser.add_option('-v', '--verbose', action='store_true',
default=('LANDMINES_VERBOSE' in os.environ),
help=('Emit some extra debugging information (default off). This option '
'is also enabled by the presence of a LANDMINES_VERBOSE environment '
'variable.'))
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
logging.basicConfig(
level=logging.DEBUG if options.verbose else logging.ERROR)
extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT')
if extra_script:
return options.landmine_scripts + [extra_script]
else:
return options.landmine_scripts
def main():
landmine_scripts = process_options()
if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
return 0
landmines = []
for s in landmine_scripts:
proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
output, _ = proc.communicate()
landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
for target in ('Debug', 'Release'):
set_up_landmines(target, landmines)
return 0
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 | 3,808,867,847,769,036,000 | 34.100719 | 79 | 0.695429 | false |
dhutchis/accumulo | test/system/bench/cloudstone5/cloudstone5.py | 7 | 1070 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from lib import cloudshell
from lib.TableSplitsBenchmark import TableSplitsBenchmark
class CloudStone5(TableSplitsBenchmark):
"Creates a table with many splits"
def suite():
result = unittest.TestSuite([
CloudStone5(),
])
return result
| apache-2.0 | -8,368,251,239,055,802,000 | 35.896552 | 74 | 0.760748 | false |
blamedcloud/AISuite | weight_heuristic.py | 2 | 2291 | #!/usr/bin/env python
#weight_heuristic.py
import random
from alphabeta import UPPER_BOUND
from alphabeta import LOWER_BOUND
class WeightHeuristic(object):
def __init__(self, weight_m):
self.weights = weight_m
self.wins = 0
self.losses = 0
def __call__(self, game_state):
value = 0
state = self.parse(game_state)
winner = state[0]
turn = state[1]
matrix = state[2]
#check if the game is over
if winner == 1:
return UPPER_BOUND
elif winner == 2:
return LOWER_BOUND
elif winner == 0:
return 0
#evaluate based on weights
for y in range(len(matrix)):
for x in range(len(matrix[y])):
token = matrix[y][x]
value += self.weights[token][y][x]
#respect the bounds
if value >= UPPER_BOUND:
value = UPPER_BOUND-1
elif value <= LOWER_BOUND:
value = LOWER_BOUND+1
return value
def get_weights(self):
return self.weights
#method to parse the game_state into a tuple
#containing (winner, turn, matrix)
#parse : Game_State -> (Int, Int, List)
def parse(self, game_state):
pass
def record_game(self, win = False): # this counts draws as losses, which should be fine since it is across the board.
if win:
self.wins += 1
else:
self.losses += 1
def get_fitness(self):
if self.wins + self.losses == 0:
return 0
else:
return float(self.wins)/float(self.wins + self.losses)
def reproduce(self, other, mutation_rate = .001):
child_w = {}
ow = other.get_weights()
for token in self.weights:
matrix = []
for y in range(len(self.weights[token])):
row = []
for x in range(len(self.weights[token][y])):
new_value = 0
if random.random() < mutation_rate: # mutation occured
new_value = random.randint(LOWER_BOUND,UPPER_BOUND)
else:
my_w = self.weights[token][y][x]
other_w = ow[token][y][x]
if my_w*other_w < 0: # they have opposite signs.
new_value = random.choice([my_w,other_w])
elif my_w*other_w > 0: # they have the same sign.
new_value = (my_w + other_w)/2
else: # at least one is zero.
if my_w != 0:
new_value = my_w
else:
new_value = other_w
row += [new_value]
matrix += [row]
child_w[token] = matrix
return self.__class__(child_w)
| mit | -8,009,684,653,297,383,000 | 23.634409 | 118 | 0.615452 | false |
bkirui/odoo | addons/account/company.py | 384 | 2814 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'expects_chart_of_accounts': fields.boolean('Expects a Chart of Accounts'),
'tax_calculation_rounding_method': fields.selection([
('round_per_line', 'Round per Line'),
('round_globally', 'Round Globally'),
], 'Tax Calculation Rounding Method',
help="If you select 'Round per Line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round Globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."),
'paypal_account': fields.char("Paypal Account", size=128, help="Paypal username (usually email) for receiving online payments."),
'overdue_msg': fields.text('Overdue Payments Message', translate=True),
}
_defaults = {
'expects_chart_of_accounts': True,
'tax_calculation_rounding_method': 'round_per_line',
'overdue_msg': '''Dear Sir/Madam,
Our records indicate that some payments on your account are still due. Please find details below.
If the amount has already been paid, please disregard this notice. Otherwise, please forward us the total amount stated below.
If you have any queries regarding your account, Please contact us.
Thank you in advance for your cooperation.
Best Regards,'''
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,820,190,051,052,536,000 | 54.176471 | 610 | 0.673063 | false |
jaapz/werkzeug | examples/couchy/views.py | 44 | 1999 | from werkzeug.utils import redirect
from werkzeug.exceptions import NotFound
from couchy.utils import render_template, expose, \
validate_url, url_for, Pagination
from couchy.models import URL
@expose('/')
def new(request):
error = url = ''
if request.method == 'POST':
url = request.form.get('url')
alias = request.form.get('alias')
if not validate_url(url):
error = "I'm sorry but you cannot shorten this URL."
elif alias:
if len(alias) > 140:
error = 'Your alias is too long'
elif '/' in alias:
error = 'Your alias might not include a slash'
elif URL.load(alias):
error = 'The alias you have requested exists already'
if not error:
url = URL(target=url, public='private' not in request.form, shorty_id=alias if alias else None)
url.store()
uid = url.id
return redirect(url_for('display', uid=uid))
return render_template('new.html', error=error, url=url)
@expose('/display/<uid>')
def display(request, uid):
url = URL.load(uid)
if not url:
raise NotFound()
return render_template('display.html', url=url)
@expose('/u/<uid>')
def link(request, uid):
url = URL.load(uid)
if not url:
raise NotFound()
return redirect(url.target, 301)
@expose('/list/', defaults={'page': 1})
@expose('/list/<int:page>')
def list(request, page):
def wrap(doc):
data = doc.value
data['_id'] = doc.id
return URL.wrap(data)
code = '''function(doc) { if (doc.public){ map([doc._id], doc); }}'''
docResults = URL.query(code)
results = [wrap(doc) for doc in docResults]
pagination = Pagination(results, 1, page, 'list')
if pagination.page > 1 and not pagination.entries:
raise NotFound()
return render_template('list.html', pagination=pagination)
def not_found(request):
return render_template('not_found.html')
| bsd-3-clause | 8,268,488,097,135,613,000 | 31.770492 | 107 | 0.605303 | false |
nesi/easybuild-framework | easybuild/framework/easyconfig/constants.py | 6 | 2350 | #
# Copyright 2013-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
#
"""
Easyconfig constants module that provides all constants that can
be used within an Easyconfig file.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import platform
from vsc.utils import fancylogger
from easybuild.tools.systemtools import get_shared_lib_ext, get_os_name, get_os_type, get_os_version
_log = fancylogger.getLogger('easyconfig.constants', fname=False)
EXTERNAL_MODULE_MARKER = 'EXTERNAL_MODULE'
# constants that can be used in easyconfig
EASYCONFIG_CONSTANTS = {
'EXTERNAL_MODULE': (EXTERNAL_MODULE_MARKER, "External module marker"),
'SYS_PYTHON_VERSION': (platform.python_version(), "System Python version (platform.python_version())"),
'OS_TYPE': (get_os_type(), "System type (e.g. 'Linux' or 'Darwin')"),
'OS_NAME': (get_os_name(), "System name (e.g. 'fedora' or 'RHEL')"),
'OS_VERSION': (get_os_version(), "System version"),
}
def constant_documentation():
"""Generate the easyconfig constant documentation"""
indent_l0 = " " * 2
indent_l1 = indent_l0 + " " * 2
doc = []
doc.append("Constants that can be used in easyconfigs")
for cst, (val, descr) in EASYCONFIG_CONSTANTS.items():
doc.append('%s%s: %s (%s)' % (indent_l1, cst, val, descr))
return "\n".join(doc)
| gpl-2.0 | -7,266,035,335,851,191,000 | 35.71875 | 107 | 0.714894 | false |
bbc/kamaelia | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/File/WholeFileWriter.py | 9 | 2403 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: RJL
"""\
=======================
Whole File Writer
=======================
This component accepts file creation jobs and signals the completion of each
jobs. Creation jobs consist of a list [ filename, contents ] added to "inbox".
Completion signals consist of the string "done" being sent to "outbox".
All jobs are processed sequentially.
This component does not terminate.
"""
from Axon.Component import component
class WholeFileWriter(component):
"""\
WholeFileWriter() -> component that creates and writes files
Uses [ filename, contents ] structure to file creation messages in "inbox"
"""
Inboxes = {
"inbox" : "file creation jobs",
"control" : "UNUSED"
}
Outboxes = {
"outbox" : "filename written",
"signal" : "UNUSED"
}
def __init__(self):
super(WholeFileWriter, self).__init__()
def writeFile(self, filename, data):
"""Writes the data to a new file"""
file = open(filename, "wb", 0)
data = file.write(data)
file.close()
def main(self):
"""Main loop"""
while 1:
yield 1
if self.dataReady("inbox"):
command = self.recv("inbox")
self.writeFile(command[0], command[1])
self.send(command[0], "outbox")
else:
self.pause()
__kamaelia_components__ = ( WholeFileWriter, )
| apache-2.0 | 7,747,922,741,827,870,000 | 30.618421 | 78 | 0.611319 | false |
polojacky/ehfpi | ehf/rest_framework/tests/test_nullable_fields.py | 16 | 1069 | from django.core.urlresolvers import reverse
from rest_framework.compat import patterns, url
from rest_framework.test import APITestCase
from rest_framework.tests.models import NullableForeignKeySource
from rest_framework.tests.serializers import NullableFKSourceSerializer
from rest_framework.tests.views import NullableFKSourceDetail
urlpatterns = patterns(
'',
url(r'^objects/(?P<pk>\d+)/$', NullableFKSourceDetail.as_view(), name='object-detail'),
)
class NullableForeignKeyTests(APITestCase):
"""
DRF should be able to handle nullable foreign keys when a test
Client POST/PUT request is made with its own serialized object.
"""
urls = 'rest_framework.tests.test_nullable_fields'
def test_updating_object_with_null_fk(self):
obj = NullableForeignKeySource(name='example', target=None)
obj.save()
serialized_data = NullableFKSourceSerializer(obj).data
response = self.client.put(reverse('object-detail', args=[obj.pk]), serialized_data)
self.assertEqual(response.data, serialized_data)
| apache-2.0 | 1,062,316,351,393,000,200 | 34.633333 | 92 | 0.744621 | false |
tsdmgz/ansible | lib/ansible/utils/module_docs_fragments/aci.py | 36 | 2382 | # -*- coding: utf-8 -*-
# Copyright 2017 Dag Wieers <[email protected]>
# Copyright 2017 Swetha Chunduri (@schunduri)
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = '''
options:
hostname:
description:
- IP Address or hostname of APIC resolvable by Ansible control host.
required: yes
aliases: [ host ]
username:
description:
- The username to use for authentication.
required: yes
default: admin
aliases: [ user ]
password:
description:
- The password to use for authentication.
required: yes
timeout:
description:
- The socket level timeout in seconds.
default: 30
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
default: 'yes'
type: bool
use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
notes:
- By default, if an environment variable C(<protocol>_proxy) is set on
the target host, requests will be sent through that proxy. This
behaviour can be overridden by setting a variable for this task
(see `setting the environment
<http://docs.ansible.com/playbooks_environment.html>`_),
or by using the C(use_proxy) option.
- HTTP redirects can redirect from HTTP to HTTPS so you should be sure that
your proxy environment for both protocols is correct.
'''
| gpl-3.0 | 4,123,078,530,955,032,000 | 33.028571 | 113 | 0.716205 | false |
aabbox/kbengine | kbe/src/lib/python/Lib/test/test_asyncio/test_windows_events.py | 60 | 4592 | import os
import sys
import unittest
if sys.platform != 'win32':
raise unittest.SkipTest('Windows only')
import _winapi
import asyncio
from asyncio import _overlapped
from asyncio import test_utils
from asyncio import windows_events
class UpperProto(asyncio.Protocol):
def __init__(self):
self.buf = []
def connection_made(self, trans):
self.trans = trans
def data_received(self, data):
self.buf.append(data)
if b'\n' in data:
self.trans.write(b''.join(self.buf).upper())
self.trans.close()
class ProactorTests(test_utils.TestCase):
def setUp(self):
self.loop = asyncio.ProactorEventLoop()
self.set_event_loop(self.loop)
def test_close(self):
a, b = self.loop._socketpair()
trans = self.loop._make_socket_transport(a, asyncio.Protocol())
f = asyncio.async(self.loop.sock_recv(b, 100))
trans.close()
self.loop.run_until_complete(f)
self.assertEqual(f.result(), b'')
b.close()
def test_double_bind(self):
ADDRESS = r'\\.\pipe\test_double_bind-%s' % os.getpid()
server1 = windows_events.PipeServer(ADDRESS)
with self.assertRaises(PermissionError):
windows_events.PipeServer(ADDRESS)
server1.close()
def test_pipe(self):
res = self.loop.run_until_complete(self._test_pipe())
self.assertEqual(res, 'done')
def _test_pipe(self):
ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
with self.assertRaises(FileNotFoundError):
yield from self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
[server] = yield from self.loop.start_serving_pipe(
UpperProto, ADDRESS)
self.assertIsInstance(server, windows_events.PipeServer)
clients = []
for i in range(5):
stream_reader = asyncio.StreamReader(loop=self.loop)
protocol = asyncio.StreamReaderProtocol(stream_reader)
trans, proto = yield from self.loop.create_pipe_connection(
lambda: protocol, ADDRESS)
self.assertIsInstance(trans, asyncio.Transport)
self.assertEqual(protocol, proto)
clients.append((stream_reader, trans))
for i, (r, w) in enumerate(clients):
w.write('lower-{}\n'.format(i).encode())
for i, (r, w) in enumerate(clients):
response = yield from r.readline()
self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
w.close()
server.close()
with self.assertRaises(FileNotFoundError):
yield from self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
return 'done'
def test_wait_for_handle(self):
event = _overlapped.CreateEvent(None, True, False, None)
self.addCleanup(_winapi.CloseHandle, event)
# Wait for unset event with 0.5s timeout;
# result should be False at timeout
fut = self.loop._proactor.wait_for_handle(event, 0.5)
start = self.loop.time()
self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertFalse(fut.result())
self.assertTrue(0.48 < elapsed < 0.9, elapsed)
_overlapped.SetEvent(event)
# Wait for for set event;
# result should be True immediately
fut = self.loop._proactor.wait_for_handle(event, 10)
start = self.loop.time()
self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertTrue(fut.result())
self.assertTrue(0 <= elapsed < 0.3, elapsed)
# Tulip issue #195: cancelling a done _WaitHandleFuture must not crash
fut.cancel()
def test_wait_for_handle_cancel(self):
event = _overlapped.CreateEvent(None, True, False, None)
self.addCleanup(_winapi.CloseHandle, event)
# Wait for unset event with a cancelled future;
# CancelledError should be raised immediately
fut = self.loop._proactor.wait_for_handle(event, 10)
fut.cancel()
start = self.loop.time()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertTrue(0 <= elapsed < 0.1, elapsed)
# Tulip issue #195: cancelling a _WaitHandleFuture twice must not crash
fut = self.loop._proactor.wait_for_handle(event)
fut.cancel()
fut.cancel()
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | -759,867,738,910,190,500 | 31.567376 | 79 | 0.61324 | false |
grlee77/scipy | scipy/optimize/_lsq/trf.py | 21 | 19479 | """Trust Region Reflective algorithm for least-squares optimization.
The algorithm is based on ideas from paper [STIR]_. The main idea is to
account for the presence of the bounds by appropriate scaling of the variables (or,
equivalently, changing a trust-region shape). Let's introduce a vector v:
| ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
| 1, otherwise
where g is the gradient of a cost function and lb, ub are the bounds. Its
components are distances to the bounds at which the anti-gradient points (if
this distance is finite). Define a scaling matrix D = diag(v**0.5).
First-order optimality conditions can be stated as
D^2 g(x) = 0.
Meaning that components of the gradient should be zero for strictly interior
variables, and components must point inside the feasible region for variables
on the bound.
Now consider this system of equations as a new optimization problem. If the
point x is strictly interior (not on the bound), then the left-hand side is
differentiable and the Newton step for it satisfies
(D^2 H + diag(g) Jv) p = -D^2 g
where H is the Hessian matrix (or its J^T J approximation in least squares),
Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all
elements of matrix C = diag(g) Jv are non-negative. Introduce the change
of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables,
we have a Newton step satisfying
B_h p_h = -g_h,
where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where
J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect
to "hat" variables. To guarantee global convergence we formulate a
trust-region problem based on the Newton step in the new variables:
0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta
In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region
problem is
0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta
Here, the meaning of the matrix D becomes more clear: it alters the shape
of a trust-region, such that large steps towards the bounds are not allowed.
In the implementation, the trust-region problem is solved in "hat" space,
but handling of the bounds is done in the original space (see below and read
the code).
The introduction of the matrix D doesn't allow to ignore bounds, the algorithm
must keep iterates strictly feasible (to satisfy aforementioned
differentiability), the parameter theta controls step back from the boundary
(see the code for details).
The algorithm does another important trick. If the trust-region solution
doesn't fit into the bounds, then a reflected (from a firstly encountered
bound) search direction is considered. For motivation and analysis refer to
[STIR]_ paper (and other papers of the authors). In practice, it doesn't need
a lot of justifications, the algorithm simply chooses the best step among
three: a constrained trust-region step, a reflected step and a constrained
Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original
space).
Another feature is that a trust-region radius control strategy is modified to
account for appearance of the diagonal C matrix (called diag_h in the code).
Note that all described peculiarities are completely gone as we consider
problems without bounds (the algorithm becomes a standard trust-region type
algorithm very similar to ones implemented in MINPACK).
The implementation supports two methods of solving the trust-region problem.
The first, called 'exact', applies SVD on Jacobian and then solves the problem
very accurately using the algorithm described in [JJMore]_. It is not
applicable to large problem. The second, called 'lsmr', uses the 2-D subspace
approach (sometimes called "indefinite dogleg"), where the problem is solved
in a subspace spanned by the gradient and the approximate Gauss-Newton step
found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is
reformulated as a 4th order algebraic equation and solved very accurately by
``numpy.roots``. The subspace approach allows to solve very large problems
(up to couple of millions of residuals on a regular PC), provided the Jacobian
matrix is sufficiently sparse.
References
----------
.. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
"""
import numpy as np
from numpy.linalg import norm
from scipy.linalg import svd, qr
from scipy.sparse.linalg import lsmr
from scipy.optimize import OptimizeResult
from .common import (
step_size_to_bound, find_active_constraints, in_bounds,
make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region,
solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d,
evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator,
CL_scaling_vector, compute_grad, compute_jac_scale, check_termination,
update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear,
print_iteration_nonlinear)
def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose):
# For efficiency, it makes sense to run the simplified version of the
# algorithm when no bounds are imposed. We decided to write the two
# separate functions. It violates the DRY principle, but the individual
# functions are kept the most readable.
if np.all(lb == -np.inf) and np.all(ub == np.inf):
return trf_no_bounds(
fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose)
else:
return trf_bounds(
fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose)
def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta):
"""Select the best step according to Trust Region Reflective algorithm."""
if in_bounds(x + p, lb, ub):
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
return p, p_h, -p_value
p_stride, hits = step_size_to_bound(x, p, lb, ub)
# Compute the reflected direction.
r_h = np.copy(p_h)
r_h[hits.astype(bool)] *= -1
r = d * r_h
# Restrict trust-region step, such that it hits the bound.
p *= p_stride
p_h *= p_stride
x_on_bound = x + p
# Reflected direction will cross first either feasible region or trust
# region boundary.
_, to_tr = intersect_trust_region(p_h, r_h, Delta)
to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub)
# Find lower and upper bounds on a step size along the reflected
# direction, considering the strict feasibility requirement. There is no
# single correct way to do that, the chosen approach seems to work best
# on test problems.
r_stride = min(to_bound, to_tr)
if r_stride > 0:
r_stride_l = (1 - theta) * p_stride / r_stride
if r_stride == to_bound:
r_stride_u = theta * to_bound
else:
r_stride_u = to_tr
else:
r_stride_l = 0
r_stride_u = -1
# Check if reflection step is available.
if r_stride_l <= r_stride_u:
a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h)
r_stride, r_value = minimize_quadratic_1d(
a, b, r_stride_l, r_stride_u, c=c)
r_h *= r_stride
r_h += p_h
r = r_h * d
else:
r_value = np.inf
# Now correct p_h to make it strictly interior.
p *= theta
p_h *= theta
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
ag_h = -g_h
ag = d * ag_h
to_tr = Delta / norm(ag_h)
to_bound, _ = step_size_to_bound(x, ag, lb, ub)
if to_bound < to_tr:
ag_stride = theta * to_bound
else:
ag_stride = to_tr
a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h)
ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride)
ag_h *= ag_stride
ag *= ag_stride
if p_value < r_value and p_value < ag_value:
return p, p_h, -p_value
elif r_value < p_value and r_value < ag_value:
return r, r_h, -r_value
else:
return ag, ag_h, -ag_value
def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev,
x_scale, loss_function, tr_solver, tr_options, verbose):
x = x0.copy()
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
m, n = J.shape
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
v, dv = CL_scaling_vector(x, g, lb, ub)
v[dv != 0] *= scale_inv[dv != 0]
Delta = norm(x0 * scale_inv / v**0.5)
if Delta == 0:
Delta = 1.0
g_norm = norm(g * v, ord=np.inf)
f_augmented = np.zeros((m + n))
if tr_solver == 'exact':
J_augmented = np.empty((m + n, n))
elif tr_solver == 'lsmr':
reg_term = 0.0
regularize = tr_options.pop('regularize', True)
if max_nfev is None:
max_nfev = x0.size * 100
alpha = 0.0 # "Levenberg-Marquardt" parameter
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
v, dv = CL_scaling_vector(x, g, lb, ub)
g_norm = norm(g * v, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
# Now compute variables in "hat" space. Here, we also account for
# scaling introduced by `x_scale` parameter. This part is a bit tricky,
# you have to write down the formulas and see how the trust-region
# problem is formulated when the two types of scaling are applied.
# The idea is that first we apply `x_scale` and then apply Coleman-Li
# approach in the new variables.
# v is recomputed in the variables after applying `x_scale`, note that
# components which were identically 1 not affected.
v[dv != 0] *= scale_inv[dv != 0]
# Here, we apply two types of scaling.
d = v**0.5 * scale
# C = diag(g * scale) Jv
diag_h = g * dv * scale
# After all this has been done, we continue normally.
# "hat" gradient.
g_h = d * g
f_augmented[:m] = f
if tr_solver == 'exact':
J_augmented[:m] = J * d
J_h = J_augmented[:m] # Memory view.
J_augmented[m:] = np.diag(diag_h**0.5)
U, s, V = svd(J_augmented, full_matrices=False)
V = V.T
uf = U.T.dot(f_augmented)
elif tr_solver == 'lsmr':
J_h = right_multiplied_operator(J, d)
if regularize:
a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h)
to_tr = Delta / norm(g_h)
ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
reg_term = -ag_value / Delta**2
lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5)
gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0]
S = np.vstack((g_h, gn_h)).T
S, _ = qr(S, mode='economic')
JS = J_h.dot(S) # LinearOperator does dot too.
B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S)
g_S = S.T.dot(g_h)
# theta controls step back step ratio from the bounds.
theta = max(0.995, 1 - g_norm)
actual_reduction = -1
while actual_reduction <= 0 and nfev < max_nfev:
if tr_solver == 'exact':
p_h, alpha, n_iter = solve_lsq_trust_region(
n, m, uf, s, V, Delta, initial_alpha=alpha)
elif tr_solver == 'lsmr':
p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
p_h = S.dot(p_S)
p = d * p_h # Trust-region solution in the original space.
step, step_h, predicted_reduction = select_step(
x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta)
x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step_h)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta_new, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, step_h_norm > 0.95 * Delta)
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
alpha *= Delta / Delta_new
Delta = Delta_new
if actual_reduction > 0:
x = x_new
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
active_mask = find_active_constraints(x, lb, ub, rtol=xtol)
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev,
status=termination_status)
def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev,
x_scale, loss_function, tr_solver, tr_options, verbose):
x = x0.copy()
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
m, n = J.shape
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
Delta = norm(x0 * scale_inv)
if Delta == 0:
Delta = 1.0
if tr_solver == 'lsmr':
reg_term = 0
damp = tr_options.pop('damp', 0.0)
regularize = tr_options.pop('regularize', True)
if max_nfev is None:
max_nfev = x0.size * 100
alpha = 0.0 # "Levenberg-Marquardt" parameter
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
g_norm = norm(g, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
d = scale
g_h = d * g
if tr_solver == 'exact':
J_h = J * d
U, s, V = svd(J_h, full_matrices=False)
V = V.T
uf = U.T.dot(f)
elif tr_solver == 'lsmr':
J_h = right_multiplied_operator(J, d)
if regularize:
a, b = build_quadratic_1d(J_h, g_h, -g_h)
to_tr = Delta / norm(g_h)
ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
reg_term = -ag_value / Delta**2
damp_full = (damp**2 + reg_term)**0.5
gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0]
S = np.vstack((g_h, gn_h)).T
S, _ = qr(S, mode='economic')
JS = J_h.dot(S)
B_S = np.dot(JS.T, JS)
g_S = S.T.dot(g_h)
actual_reduction = -1
while actual_reduction <= 0 and nfev < max_nfev:
if tr_solver == 'exact':
step_h, alpha, n_iter = solve_lsq_trust_region(
n, m, uf, s, V, Delta, initial_alpha=alpha)
elif tr_solver == 'lsmr':
p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
step_h = S.dot(p_S)
predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h)
step = d * step_h
x_new = x + step
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step_h)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta_new, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, step_h_norm > 0.95 * Delta)
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
alpha *= Delta / Delta_new
Delta = Delta_new
if actual_reduction > 0:
x = x_new
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
active_mask = np.zeros_like(x)
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev,
status=termination_status)
| bsd-3-clause | -166,482,030,192,741,440 | 33.783929 | 83 | 0.581703 | false |
adlius/osf.io | api/base/exceptions.py | 2 | 12106 | from past.builtins import basestring
from rest_framework import status as http_status
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException, AuthenticationFailed, ErrorDetail
def get_resource_object_member(error_key, context):
from api.base.serializers import RelationshipField
field = context['view'].serializer_class._declared_fields.get(error_key, None)
if field:
return 'relationships' if isinstance(field, RelationshipField) else 'attributes'
# If field cannot be found (where read/write operations have different serializers,
# or fields serialized on __init__, assume error was in 'attributes' by default
return 'attributes'
def dict_error_formatting(errors, context, index=None):
"""
Formats all dictionary error messages for both single and bulk requests
"""
formatted_error_list = []
# Error objects may have the following members. Title and id removed to avoid clash with "title" and "id" field errors.
top_level_error_keys = ['links', 'status', 'code', 'detail', 'source', 'meta']
# Resource objects must contain at least 'id' and 'type'
resource_object_identifiers = ['type', 'id']
if index is None:
index = ''
else:
index = str(index) + '/'
for error_key, error_description in errors.items():
if isinstance(error_description, ErrorDetail):
error_description = [error_description]
if error_key in top_level_error_keys:
formatted_error_list.extend({error_key: description} for description in error_description)
elif error_key in resource_object_identifiers:
formatted_error_list.extend([{'source': {'pointer': '/data/{}'.format(index) + error_key}, 'detail': reason} for reason in error_description])
elif error_key == 'non_field_errors':
formatted_error_list.extend([{'detail': description for description in error_description}])
elif isinstance(error_description, list):
for error in error_description:
formatted_error_list += format_validators_errors(error, error_key, context, index)
else:
formatted_error_list += format_validators_errors(error_description, error_key, context, index)
return formatted_error_list
def format_validators_errors(error_description, error_key, context, index):
errors = []
if isinstance(error_description, ErrorDetail):
errors.append({
'source': {
'pointer': f'/data/{index}{get_resource_object_member(error_key, context)}/' + error_key,
},
'detail': error_description,
})
else:
for key, value in error_description.items():
errors.append({
'source': {
'pointer': f'/data/{index}{get_resource_object_member(error_key, context)}/' + error_key,
},
'detail': value,
})
return errors
def json_api_exception_handler(exc, context):
"""
Custom exception handler that returns errors object as an array
"""
# We're deliberately not stripping html from exception detail.
# This creates potential vulnerabilities to script injection attacks
# when returning raw user input into error messages.
#
# Fortunately, Django's templating language strips markup bu default,
# but if our frontend changes we may lose that protection.
# TODO: write tests to ensure our html frontend strips html
# Import inside method to avoid errors when the OSF is loaded without Django
from rest_framework.views import exception_handler
response = exception_handler(exc, context)
errors = []
if response:
message = response.data
if isinstance(exc, TwoFactorRequiredError):
response['X-OSF-OTP'] = 'required; app'
if isinstance(exc, JSONAPIException):
errors.extend([{'source': exc.source or {}, 'detail': exc.detail, 'meta': exc.meta or {}}])
elif isinstance(message, dict):
errors.extend(dict_error_formatting(message, context, index=None))
else:
if isinstance(message, basestring):
message = [message]
for index, error in enumerate(message):
if isinstance(error, dict):
errors.extend(dict_error_formatting(error, context, index=index))
else:
errors.append({'detail': error})
response.data = {'errors': errors}
return response
def format_validation_error(e):
error_list = []
for key, value in e.message_dict.items():
error_list.append('There was an issue with the {} field. {}'.format(key, value[0]))
return error_list
class EndpointNotImplementedError(APIException):
status_code = status.HTTP_501_NOT_IMPLEMENTED
default_detail = _('This endpoint is not yet implemented.')
class ServiceUnavailableError(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = _('Service is unavailable at this time.')
class JSONAPIException(APIException):
"""Inherits from the base DRF API exception and adds extra metadata to support JSONAPI error objects
:param str detail: a human-readable explanation specific to this occurrence of the problem
:param dict source: A dictionary containing references to the source of the error.
See http://jsonapi.org/format/#error-objects.
Example: ``source={'pointer': '/data/attributes/title'}``
:param dict meta: A meta object containing non-standard meta info about the error.
"""
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, source=None, meta=None):
super(JSONAPIException, self).__init__(detail=detail)
self.source = source
self.meta = meta
# Custom Exceptions the Django Rest Framework does not support
class Gone(JSONAPIException):
status_code = status.HTTP_410_GONE
default_detail = ('The requested resource is no longer available.')
def UserGone(user):
return Gone(
detail='The requested user is no longer available.',
meta={
'full_name': user.fullname, 'family_name': user.family_name, 'given_name': user.given_name,
'middle_names': user.middle_names, 'profile_image': user.profile_image_url(),
},
)
class Conflict(JSONAPIException):
status_code = status.HTTP_409_CONFLICT
default_detail = ('Resource identifier does not match server endpoint.')
class JSONAPIParameterException(JSONAPIException):
def __init__(self, detail=None, parameter=None):
source = {
'parameter': parameter,
}
super(JSONAPIParameterException, self).__init__(detail=detail, source=source)
class JSONAPIAttributeException(JSONAPIException):
def __init__(self, detail=None, attribute=None):
source = {
'pointer': '/data/attributes/{}'.format(attribute),
}
super(JSONAPIAttributeException, self).__init__(detail=detail, source=source)
class InvalidQueryStringError(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query string parameter."""
default_detail = 'Query string contains an invalid value.'
status_code = http_status.HTTP_400_BAD_REQUEST
class InvalidFilterOperator(JSONAPIParameterException):
"""Raised when client passes an invalid operator to a query param filter."""
status_code = http_status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, value=None, valid_operators=('eq', 'lt', 'lte', 'gt', 'gte', 'contains', 'icontains')):
if value and not detail:
valid_operators = ', '.join(valid_operators)
detail = "Value '{0}' is not a supported filter operator; use one of {1}.".format(
value,
valid_operators,
)
super(InvalidFilterOperator, self).__init__(detail=detail, parameter='filter')
class InvalidFilterValue(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query param filter."""
status_code = http_status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, value=None, field_type=None):
if not detail:
detail = "Value '{0}' is not valid".format(value)
if field_type:
detail += ' for a filter on type {0}'.format(
field_type,
)
detail += '.'
super(InvalidFilterValue, self).__init__(detail=detail, parameter='filter')
class InvalidFilterError(JSONAPIParameterException):
"""Raised when client passes an malformed filter in the query string."""
default_detail = _('Query string contains a malformed filter.')
status_code = http_status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None):
super(InvalidFilterError, self).__init__(detail=detail, parameter='filter')
class InvalidFilterComparisonType(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not a date or number type"""
default_detail = _('Comparison operators are only supported for dates and numbers.')
status_code = http_status.HTTP_400_BAD_REQUEST
class InvalidFilterMatchType(JSONAPIParameterException):
"""Raised when client tries to do a match filter on a field that is not a string or a list"""
default_detail = _('Match operators are only supported for strings and lists.')
status_code = http_status.HTTP_400_BAD_REQUEST
class InvalidFilterFieldError(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not supported"""
default_detail = _('Query contained one or more filters for invalid fields.')
status_code = http_status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, parameter=None, value=None):
if value and not detail:
detail = "Value '{}' is not a filterable field.".format(value)
super(InvalidFilterFieldError, self).__init__(detail=detail, parameter=parameter)
class UnconfirmedAccountError(APIException):
status_code = 400
default_detail = _('Please confirm your account before using the API.')
class UnclaimedAccountError(APIException):
status_code = 400
default_detail = _('Please claim your account before using the API.')
class DeactivatedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a deactivated account is not allowed.')
class MergedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a merged account is not allowed.')
class InvalidAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with an invalid account is not allowed.')
class TwoFactorRequiredError(AuthenticationFailed):
default_detail = _('Must specify two-factor authentication OTP code.')
pass
class InvalidModelValueError(JSONAPIException):
status_code = 400
default_detail = _('Invalid value in POST/PUT/PATCH request.')
class TargetNotSupportedError(Exception):
"""Raised if a TargetField is used for a resource that isn't supported."""
pass
class RelationshipPostMakesNoChanges(Exception):
"""Raised when a post is on a relationship that already exists, so view can return a 204"""
pass
class NonDescendantNodeError(APIException):
"""Raised when a client attempts to associate a non-descendant node with a view only link"""
status_code = 400
default_detail = _('The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.')
def __init__(self, node_id, detail=None):
if not detail:
detail = self.default_detail.format(node_id)
super(NonDescendantNodeError, self).__init__(detail=detail)
| apache-2.0 | -4,940,389,085,349,513,000 | 38.305195 | 200 | 0.677515 | false |
fosfataza/protwis | common/sequence_signature.py | 1 | 29836 | """
A module for generating sequence signatures for the given two sets of proteins.
"""
from django.conf import settings
from django.core import exceptions
from alignment.functions import strip_html_tags, get_format_props
Alignment = getattr(__import__(
'common.alignment_' + settings.SITE_NAME,
fromlist=['Alignment']
), 'Alignment')
from common.definitions import AMINO_ACIDS, AMINO_ACID_GROUPS, AMINO_ACID_GROUP_NAMES
from protein.models import Protein, ProteinConformation
from residue.models import Residue
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import re
import time
class SequenceSignature:
"""
A class handling the sequence signature.
"""
def __init__(self):
self.aln_pos = Alignment()
self.aln_neg = Alignment()
self.features_normalized_pos = OrderedDict()
self.features_normalized_neg = OrderedDict()
self.features_frequency_difference = OrderedDict()
self.features_frequency_diff_display = []
self.freq_cutoff = 30
self.common_gn = OrderedDict()
def setup_alignments(self, segments, protein_set_positive = None, protein_set_negative = None):
if protein_set_positive:
self.aln_pos.load_proteins(protein_set_positive)
if protein_set_negative:
self.aln_neg.load_proteins(protein_set_negative)
# In case positive and negative sets come from different classes
# unify the numbering schemes
self.common_schemes = self.merge_numbering_schemes()
self.aln_pos.numbering_schemes = self.common_schemes
self.aln_neg.numbering_schemes = self.common_schemes
# now load the segments and generic numbers
self.aln_pos.load_segments(segments)
self.aln_neg.load_segments(segments)
self.aln_pos.build_alignment()
self.aln_neg.build_alignment()
self.common_gn = deepcopy(self.aln_pos.generic_numbers)
for scheme in self.aln_neg.numbering_schemes:
for segment in self.aln_neg.segments:
for pos in self.aln_neg.generic_numbers[scheme[0]][segment].items():
if pos[0] not in self.common_gn[scheme[0]][segment].keys():
self.common_gn[scheme[0]][segment][pos[0]] = pos[1]
self.common_gn[scheme[0]][segment] = OrderedDict(sorted(
self.common_gn[scheme[0]][segment].items(),
key=lambda x: x[0].split('x')
))
self.common_segments = OrderedDict([
(x, sorted(list(set(self.aln_pos.segments[x]) | set(self.aln_neg.segments[x])), key=lambda x: x.split('x'))) for x in self.aln_neg.segments
])
# tweaking alignment
self._update_alignment(self.aln_pos)
self.aln_pos.calculate_statistics()
# tweaking consensus seq
self._update_consensus_sequence(self.aln_pos)
# tweaking negative alignment
self._update_alignment(self.aln_neg)
self.aln_neg.calculate_statistics()
# tweaking consensus seq
self._update_consensus_sequence(self.aln_neg)
def _update_alignment(self, alignment):
for prot in alignment.proteins:
for seg, resi in prot.alignment.items():
consensus = []
aln_list = [x[0] for x in resi]
aln_dict = dict([
(x[0], x) for x in resi
])
for pos in self.common_segments[seg]:
if pos not in aln_list:
consensus.append([pos, False, '-', 0])
else:
consensus.append(aln_dict[pos])
prot.alignment[seg] = consensus
def _update_consensus_sequence(self, alignment):
for seg, resi in alignment.consensus.items():
consensus = OrderedDict()
aln_list = [x for x in resi.keys()]
aln_dict = dict([
(x, resi[x]) for x in resi.keys()
])
for pos in self.common_segments[seg]:
if pos not in aln_list:
consensus[pos] = ['_', 0, 100]
else:
consensus[pos] = aln_dict[pos]
alignment.consensus[seg] = consensus
def _convert_feature_stats(self, fstats, aln):
tmp_fstats = []
for row in range(len(AMINO_ACID_GROUPS.keys())):
tmp_row = []
for segment in self.common_segments:
print(fstats[segment][row])
tmp_row.append([[
str(x),
str(int(x/10)),
] for x in fstats[segment][row]])
tmp_fstats.append(tmp_row)
aln.feature_stats = tmp_fstats
def setup_alignments_from_selection(self, positive_selection, negative_selection):
"""
The function gathers necessary information from provided selections
and runs the calculations of the sequence alignments independently for
both protein sets. It also finds the common set of residue positions.
Arguments:
positive_selection {Selection} -- selection containing first group of proteins
negative_selection {[type]} -- selction containing second group of proteins along with the user-selcted sequence segments for the alignment
"""
self.aln_pos.load_proteins_from_selection(positive_selection)
self.aln_neg.load_proteins_from_selection(negative_selection)
# local segment list
segments = []
# read selection
for segment in negative_selection.segments:
segments.append(segment)
self.setup_alignments(segments)
def calculate_signature(self):
"""
Calculates the feature frequency difference between two protein sets.
Generates the full differential matrix as well as maximum difference for a position (for scatter plot).
"""
for sid, segment in enumerate(self.aln_neg.segments):
self.features_normalized_pos[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_pos.feature_stats],
dtype='int'
)
self.features_normalized_neg[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_neg.feature_stats],
dtype='int'
)
for segment in self.aln_neg.segments:
#TODO: get the correct default numering scheme from settings
for idx, res in enumerate(self.common_gn[self.common_schemes[0][0]][segment].keys()):
if res not in self.aln_pos.generic_numbers[self.common_schemes[0][0]][segment].keys():
self.features_normalized_pos[segment] = np.insert(self.features_normalized_pos[segment], idx, 0, axis=1)
# Set 100% occurence for a gap feature
self.features_normalized_pos[segment][-1, idx] = 100
elif res not in self.aln_neg.generic_numbers[self.common_schemes[0][0]][segment].keys():
self.features_normalized_neg[segment] = np.insert(self.features_normalized_neg[segment], idx, 0, axis=1)
# Set 100% occurence for a gap feature
self.features_normalized_neg[segment][-1, idx] = 100
# now the difference
self.features_frequency_difference[segment] = np.subtract(
self.features_normalized_pos[segment],
self.features_normalized_neg[segment]
)
self._convert_feature_stats(self.features_normalized_pos, self.aln_pos)
self._convert_feature_stats(self.features_normalized_neg, self.aln_neg)
# Version with display data
for row in range(len(AMINO_ACID_GROUPS.keys())):
tmp_row = []
for segment in self.aln_neg.segments:
#first item is the real value,
# second is the assignmnent of color (via css)
# 0 - red, 5 - yellow, 10 - green
#third item is a tooltip
tmp_row.append([[
x,
int(x/20)+5,
"{} - {}".format(
self.features_normalized_pos[segment][row][y],
self.features_normalized_neg[segment][row][y]
)
] for y, x in enumerate(self.features_frequency_difference[segment][row])])
self.features_frequency_diff_display.append(tmp_row)
self.signature = OrderedDict([(x, []) for x in self.aln_neg.segments])
for segment in self.aln_neg.segments:
tmp = np.array(self.features_frequency_difference[segment])
signature_map = np.absolute(tmp).argmax(axis=0)
self.signature[segment] = []
for col, pos in enumerate(list(signature_map)):
self.signature[segment].append([
list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
self.features_frequency_difference[segment][pos][col],
int(self.features_frequency_difference[segment][pos][col]/20)+5
])
features_pos = OrderedDict()
features_neg = OrderedDict()
self.features_consensus_pos = OrderedDict([(x, []) for x in self.aln_neg.segments])
self.features_consensus_neg = OrderedDict([(x, []) for x in self.aln_neg.segments])
for sid, segment in enumerate(self.aln_neg.segments):
features_pos[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_pos.feature_stats],
dtype='int'
)
features_neg[segment] = np.array(
[[x[0] for x in feat[sid]] for feat in self.aln_neg.feature_stats],
dtype='int'
)
features_cons_pos = np.absolute(features_pos[segment]).argmax(axis=0)
features_cons_neg = np.absolute(features_neg[segment]).argmax(axis=0)
for col, pos in enumerate(list(features_cons_pos)):
self.features_consensus_pos[segment].append([
list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
features_pos[segment][pos][col],
int(features_pos[segment][pos][col]/20)+5
])
for col, pos in enumerate(list(features_cons_neg)):
self.features_consensus_neg[segment].append([
list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
features_neg[segment][pos][col],
int(features_neg[segment][pos][col]/20)+5
])
self._convert_feature_stats(self.features_normalized_pos, self.aln_pos)
self._convert_feature_stats(self.features_normalized_neg, self.aln_neg)
def prepare_display_data(self):
options = {
'num_residue_columns': len(sum([[x for x in self.common_gn[self.common_schemes[0][0]][segment]] for segment in self.aln_neg.segments], [])),
'num_of_sequences_pos': len(self.aln_pos.proteins),
'num_residue_columns_pos': len(self.aln_pos.positions),
'num_of_sequences_neg': len(self.aln_neg.proteins),
'num_residue_columns_neg': len(self.aln_neg.positions),
'common_segments': self.common_segments,
'common_generic_numbers': self.common_gn,
'feats_signature': self.features_frequency_diff_display,
'signature_consensus': self.signature,
'feats_cons_pos': self.features_consensus_pos,
'feats_cons_neg': self.features_consensus_neg,
'a_pos': self.aln_pos,
'a_neg': self.aln_neg,
}
return options
def prepare_session_data(self):
session_signature = {
'common_positions': self.common_gn,
'diff_matrix': self.features_frequency_difference,
'numbering_schemes': self.common_schemes,
'common_segments': self.common_segments,
}
return session_signature
def merge_numbering_schemes(self):
"""
Extract all of the numbering schemes used for a set of proteins.
Arguments:
proteins {selection} -- A set of proteins to analyze
"""
numbering_schemes = {}
for prot in self.aln_pos.proteins + self.aln_neg.proteins:
if prot.protein.residue_numbering_scheme.slug not in numbering_schemes:
rnsn = prot.protein.residue_numbering_scheme.name
numbering_schemes[prot.protein.residue_numbering_scheme.slug] = rnsn
# order and convert numbering scheme dict to tuple
return sorted(numbering_schemes.items(), key=lambda x: x[0])
def prepare_excel_worksheet(self, workbook, worksheet_name, aln='positive', data='alignment'):
"""
A function saving alignment data subset into the excel spreadsheet.
It adds a worksheet to an existing workbook and saves only a selected subset of alignment data.
For a complete save of the alignment it needs to be wrapped with additional code.
The outline of the excel worksheet is similar to the one of html page.
First column shows nunbering schemes, protein list, etc
The frequency data start from column B
Arguments:
workbook {xlrsxwriter.Workbook} -- object to add workseet to
worksheet_name {string} -- name for the new workseet
Keyword Arguments:
alignment {string} -- alignment to extract data from.
Possible choices: positive, negative, signature
data {string} -- data type to save to workshet: 'alignment' or 'features' frequencies
"""
props = AMINO_ACID_GROUP_NAMES.values()
worksheet = workbook.add_worksheet(worksheet_name)
if aln == 'positive':
numbering_schemes = self.aln_pos.numbering_schemes
generic_numbers_set = self.aln_pos.generic_numbers
alignment = self.aln_pos
if data == 'features':
data_block = self.aln_pos.feature_stats
elif aln == 'negative':
numbering_schemes = self.aln_neg.numbering_schemes
generic_numbers_set = self.aln_neg.generic_numbers
alignment = self.aln_neg
if data == 'features':
data_block = self.aln_neg.feature_stats
else:
numbering_schemes = self.common_schemes
generic_numbers_set = self.common_gn
if data == 'features':
data_block = self.features_frequency_diff_display
# First column, numbering schemes
for row, scheme in enumerate(numbering_schemes):
worksheet.write(1 + 3*row, 0, scheme[1])
# First column, stats
if data == 'features':
for offset, prop in enumerate(props):
worksheet.write(1 + 3 * len(numbering_schemes) + offset, 0, prop)
# First column, protein list (for alignment) and line for consensus sequence
else:
for offset, prot in enumerate(alignment.proteins):
worksheet.write(
1 + 3 * len(numbering_schemes) + offset,
0,
prot.protein.entry_name
)
worksheet.write(
1 + len(numbering_schemes) + len(alignment.proteins),
0,
'CONSENSUS'
)
# Second column and on
# Segments
offset = 0
for segment in generic_numbers_set[numbering_schemes[0][0]].keys():
worksheet.merge_range(
0,
1 + offset,
0,
len(generic_numbers_set[numbering_schemes[0][0]][segment]) + offset - 1,
segment
)
offset += len(generic_numbers_set[numbering_schemes[0][0]][segment])
# Generic numbers
# for row, item in enumerate(generic_numbers_set.items()):
for row, item in enumerate(numbering_schemes):
scheme = item[0]
offset = 1
for sn, gn_list in generic_numbers_set[scheme].items():
for col, gn_pair in enumerate(gn_list.items()):
try:
tm, bw, gpcrdb = re.split('\.|x', strip_html_tags(gn_pair[1]))
except:
tm, bw, gpcrdb = ('', '', '')
worksheet.write(
1 + 3 * row,
col + offset,
tm
)
worksheet.write(
2 + 3 * row,
col + offset,
bw
)
worksheet.write(
3 + 3*row,
col + offset,
gpcrdb
)
offset += len(gn_list.items())
# Stats
if data == 'features':
offset = 1 + 3 * len(numbering_schemes)
for row, prop in enumerate(data_block):
col_offset = 0
for segment in prop:
for col, freq in enumerate(segment):
cell_format = workbook.add_format(get_format_props(freq[1]))
worksheet.write(
offset + row,
1 + col + col_offset,
freq[0] if isinstance(freq[0], int) else int(freq[0]),
cell_format
)
col_offset += len(segment)
col_offset = 0
for segment, cons_feat in self.signature.items():
for col, chunk in enumerate(cons_feat):
worksheet.write(
offset + len(AMINO_ACID_GROUPS),
1 + col + col_offset,
chunk[0]
)
cell_format = workbook.add_format(get_format_props(int(chunk[2]/20)+5))
worksheet.write(
1 + offset + len(AMINO_ACID_GROUPS),
1 + col + col_offset,
chunk[2],
cell_format
)
col_offset += len(cons_feat)
# Alignment
else:
offset = 1 + 3 * len(alignment.numbering_schemes)
for row, data in enumerate(alignment.proteins):
col_offset = 0
for segment, sequence in data.alignment.items():
for col, res in enumerate(sequence):
cell_format = workbook.add_format(get_format_props(res=res[2]))
worksheet.write(
offset + row,
1 + col + col_offset,
res[2],
cell_format
)
col_offset += len(sequence)
# Consensus sequence
row = 1 + 3 * len(alignment.numbering_schemes) + len(alignment.proteins)
col_offset = 0
for segment, sequence in alignment.consensus.items():
for col, data in enumerate(sequence.items()):
res = data[1]
cell_format = workbook.add_format(get_format_props(res=res[0]))
worksheet.write(
row,
1 + col + col_offset,
res[0],
cell_format
)
cell_format = workbook.add_format(get_format_props(res[1]))
worksheet.write(
row + 1,
1 + col + col_offset,
res[2],
cell_format
)
col_offset += len(sequence.items())
class SignatureMatch():
def __init__(self, common_positions, numbering_schemes, segments, difference_matrix, protein_set, cutoff=40):
self.cutoff = cutoff
self.common_gn = common_positions
self.schemes = numbering_schemes
self.segments = segments
self.diff_matrix = difference_matrix
self.signature_matrix_filtered = OrderedDict()
self.signature_consensus = OrderedDict()
self.protein_set = protein_set
self.relevant_gn = OrderedDict([(x[0], OrderedDict()) for x in self.schemes])
self.relevant_segments = OrderedDict()
self.scored_proteins = []
self.protein_report = OrderedDict()
self.protein_signatures = OrderedDict()
self.find_relevant_gns()
self.residue_to_feat = dict(
[(x, set()) for x in AMINO_ACIDS.keys()]
)
for fidx, feat in enumerate(AMINO_ACID_GROUPS.items()):
for res in feat[1].split(','):
self.residue_to_feat[res].add(fidx)
def find_relevant_gns(self):
matrix_consensus = OrderedDict()
for segment in self.segments:
print(segment)
segment_consensus = []
signature_map = np.absolute(self.diff_matrix[segment]).argmax(axis=0)
for col, pos in enumerate(list(signature_map)):
if abs(self.diff_matrix[segment][pos][col]) > self.cutoff:
segment_consensus.append(self.diff_matrix[segment][ : , col])
for scheme in self.schemes:
gnum = list(self.common_gn[scheme[0]][segment].items())[col]
try:
self.relevant_gn[scheme[0]][segment][gnum[0]] = gnum[1]
except:
self.relevant_gn[scheme[0]][segment] = OrderedDict()
self.relevant_gn[scheme[0]][segment][gnum[0]] = gnum[1]
segment_consensus = np.array(segment_consensus).T
if segment_consensus != []:
matrix_consensus[segment] = segment_consensus
self.signature_matrix_filtered = matrix_consensus
self.relevant_segments = OrderedDict([
(
x[0],
self.relevant_gn[self.schemes[0][0]][x[0]].keys()
) for x in self.signature_matrix_filtered.items()
])
signature = OrderedDict([(x[0], []) for x in matrix_consensus.items()])
for segment in self.relevant_segments:
signature_map = np.absolute(self.signature_matrix_filtered[segment]).argmax(axis=0)
tmp = np.array(self.signature_matrix_filtered[segment])
for col, pos in enumerate(list(signature_map)):
signature[segment].append([
list(AMINO_ACID_GROUPS.keys())[pos],
list(AMINO_ACID_GROUP_NAMES.values())[pos],
tmp[pos][col],
int(tmp[pos][col]/20)+5
])
self.signature_consensus = signature
def score_protein_class(self, pclass_slug='001'):
start = time.time()
protein_scores = {}
protein_signature_match = {}
class_proteins = Protein.objects.filter(
species__common_name='Human',
family__slug__startswith=pclass_slug
).exclude(
id__in=[x.id for x in self.protein_set]
)
class_a_pcf = ProteinConformation.objects.order_by('protein__family__slug',
'protein__entry_name').filter(protein__in=class_proteins, protein__sequence_type__slug='wt').exclude(protein__entry_name__endswith='-consensus')
for pcf in class_a_pcf:
p_start = time.time()
score, signature_match = self.score_protein(pcf)
protein_scores[pcf] = score
protein_signature_match[pcf] = signature_match
p_end = time.time()
print("Time elapsed for {}: ".format(pcf.protein.entry_name), p_end - p_start)
end = time.time()
self.protein_report = OrderedDict(sorted(protein_scores.items(), key=lambda x: x[1], reverse=True))
for prot in self.protein_report.items():
self.protein_signatures[prot[0]] = protein_signature_match[prot[0]]
self.scored_proteins = list(self.protein_report.keys())
print("Total time: ", end - start)
def score_protein(self, pcf):
prot_score = 0.0
consensus_match = OrderedDict([(x, []) for x in self.relevant_segments])
for segment in self.relevant_segments:
tmp = []
signature_map = np.absolute(self.signature_matrix_filtered[segment]).argmax(axis=0)
resi = Residue.objects.filter(
protein_segment__slug=segment,
protein_conformation=pcf,
generic_number__label__in=self.relevant_gn[self.schemes[0][0]][segment].keys(),
)
for idx, pos in enumerate(self.relevant_gn[self.schemes[0][0]][segment].keys()):
feat = signature_map[idx]
feat_abr = list(AMINO_ACID_GROUPS.keys())[feat]
feat_name = list(AMINO_ACID_GROUP_NAMES.values())[feat]
val = self.signature_matrix_filtered[segment][feat][idx]
try:
res = resi.get(generic_number__label=pos)
r_name = res.amino_acid if res.amino_acid != 'Gap' else '_'
if feat in self.residue_to_feat[res.amino_acid]:
prot_score += val
tmp.append([feat_abr, feat_name, val, "green", res.amino_acid, pos]) if val > 0 else tmp.append([feat_abr, feat_name, val, "white", res.amino_acid, pos])
else:
prot_score -= val
tmp.append([feat_abr, feat_name, val, "red", res.amino_acid, pos]) if val > 0 else tmp.append([feat_abr, feat_name, val, "white", res.amino_acid, pos])
except (exceptions.ObjectDoesNotExist, exceptions.MultipleObjectsReturned):
prot_score -= val
tmp.append([feat_abr, feat_name, val, "red", '_', pos]) if val > 0 else tmp.append([feat_abr, feat_name, val, "white", '_', pos])
consensus_match[segment] = tmp
return (prot_score/100, consensus_match)
def signature_score_excel(workbook, scores, protein_signatures, signature_filtered, relevant_gn, relevant_segments, numbering_schemes):
worksheet = workbook.add_worksheet('scored_proteins')
# First column, numbering schemes
for row, scheme in enumerate(numbering_schemes):
worksheet.write(1 + 3*row, 0, scheme[1])
# Score header
worksheet.write(1, 1, 'Score')
offset = 0
# Segments
for segment, resi in relevant_segments.items():
worksheet.merge_range(
0,
2 + offset,
0,
len(resi) + offset,
segment
)
offset += len(resi)
# Generic numbers
# for row, item in enumerate(generic_numbers_set.items()):
for row, item in enumerate(numbering_schemes):
scheme = item[0]
offset = 1
for sn, gn_list in relevant_gn[scheme].items():
for col, gn_pair in enumerate(gn_list.items()):
try:
tm, bw, gpcrdb = re.split('\.|x', strip_html_tags(gn_pair[1]))
except:
tm, bw, gpcrdb = ('', '', '')
worksheet.write(
1 + 3 * row,
1 + col + offset,
tm
)
worksheet.write(
2 + 3 * row,
1 + col + offset,
bw
)
worksheet.write(
3 + 3*row,
1 + col + offset,
gpcrdb
)
offset += len(gn_list.items())
# Line for sequence signature
worksheet.write(
1 + 3 * len(numbering_schemes),
0,
'CONSENSUS'
)
col_offset = 0
for segment, cons_feat in signature_filtered.items():
for col, chunk in enumerate(cons_feat):
worksheet.write(
2 + 3 * len(numbering_schemes),
2 + col + col_offset,
chunk[0]
)
cell_format = workbook.add_format(get_format_props(int(chunk[2]/20)+5))
worksheet.write(
1 + 3 * len(numbering_schemes),
2 + col + col_offset,
chunk[2],
cell_format
)
col_offset += len(cons_feat)
# Score lines
row_offset = 0
for protein, score in scores.items():
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
0,
protein.protein.entry_name,
)
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
1,
score,
)
col_offset = 0
for segment, data in protein_signatures[protein].items():
for col, res in enumerate(data):
cell_format = workbook.add_format({'bg_color': res[3],})
worksheet.write(
3 + 3 * len(numbering_schemes) + row_offset,
2 + col + col_offset,
res[4],
cell_format
)
col_offset += len(data)
row_offset += 1
| apache-2.0 | 4,413,995,730,283,294,700 | 41.320567 | 177 | 0.537572 | false |
anirudhSK/chromium | tools/perf/benchmarks/thread_times.py | 2 | 1721 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from benchmarks import silk_flags
from measurements import thread_times
class ThreadTimesKeySilkCases(test.Test):
"""Measures timeline metrics while performing smoothness action on key silk
cases."""
test = thread_times.ThreadTimes
page_set = 'page_sets/key_silk_cases.json'
options = {"report_silk_results": True}
class ThreadTimesFastPathKeySilkCases(test.Test):
"""Measures timeline metrics while performing smoothness action on key silk
cases using bleeding edge rendering fast paths."""
tag = 'fast_path'
test = thread_times.ThreadTimes
page_set = 'page_sets/key_silk_cases.json'
options = {"report_silk_results": True}
def CustomizeBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForFastPath(options)
class LegacySilkBenchmark(ThreadTimesKeySilkCases):
"""Same as thread_times.key_silk_cases but with the old name."""
@classmethod
def GetName(cls):
return "silk.key_silk_cases"
class ThreadTimesFastPathMobileSites(test.Test):
"""Measures timeline metrics while performing smoothness action on
key mobile sites labeled with fast-path tag.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
test = thread_times.ThreadTimes
page_set = 'page_sets/key_mobile_sites.json'
options = {'page_label_filter' : 'fastpath'}
class LegacyFastPathBenchmark(ThreadTimesFastPathMobileSites):
"""Same as thread_times.fast_path_mobile_sites but with the old name."""
@classmethod
def GetName(cls):
return "fast_path.key_mobile_sites"
| bsd-3-clause | -7,497,054,484,465,839,000 | 35.617021 | 77 | 0.765253 | false |
cmelange/ansible | lib/ansible/modules/system/aix_inittab.py | 26 | 7988 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Joris Weijters <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author: "Joris Weijters (@molekuul)"
module: aix_inittab
short_description: Manages the inittab on AIX.
description:
- Manages the inittab on AIX.
version_added: "2.3"
options:
name:
description: Name of the inittab entry.
required: True
aliases: ['service']
runlevel:
description: Runlevel of the entry.
required: True
action:
description: Action what the init has to do with this entry.
required: True
choices: [
'respawn',
'wait',
'once',
'boot',
'bootwait',
'powerfail',
'powerwait',
'off',
'hold',
'ondemand',
'initdefault',
'sysinit'
]
command:
description: What command has to run.
required: True
insertafter:
description: After which inittabline should the new entry inserted.
state:
description: Whether the entry should be present or absent in the inittab file
choices: [ "present", "absent" ]
default: present
notes:
- The changes are persistent across reboots, you need root rights to read or adjust the inittab with the lsitab, chitab,
mkitab or rmitab commands.
- tested on AIX 7.1.
requirements: [ 'itertools']
'''
EXAMPLES = '''
# Add service startmyservice to the inittab, directly after service existingservice.
- name: Add startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 4
action: once
command: "echo hello"
insertafter: existingservice
state: present
become: yes
# Change inittab enrty startmyservice to runlevel "2" and processaction "wait".
- name: Change startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: "echo hello"
state: present
become: yes
# Remove inittab entry startmyservice.
- name: remove startmyservice from inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: "echo hello"
state: absent
become: yes
'''
RETURN = '''
name:
description: name of the adjusted inittab entry
returned: always
type: string
sample: startmyservice
mgs:
description: action done with the inittab entry
returned: changed
type: string
sample: changed inittab entry startmyservice
changed:
description: whether the inittab changed or not
return: always
type: boolean
sample: true
'''
# Import necessary libraries
import itertools
from ansible.module_utils.basic import AnsibleModule
# end import modules
# start defining the functions
def check_current_entry(module):
# Check if entry exists, if not return False in exists in return dict,
# if true return True and the entry in return dict
existsdict = {'exist': False}
lsitab = module.get_bin_path('lsitab')
(rc, out, err) = module.run_command([lsitab, module.params['name']])
if rc == 0:
keys = ('name', 'runlevel', 'action', 'command')
values = out.split(":")
# strip non readable characters as \n
values = map(lambda s: s.strip(), values)
existsdict = dict(itertools.izip(keys, values))
existsdict.update({'exist': True})
return existsdict
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['service']),
runlevel=dict(required=True, type='str'),
action=dict(choices=[
'respawn',
'wait',
'once',
'boot',
'bootwait',
'powerfail',
'powerwait',
'off',
'hold',
'ondemand',
'initdefault',
'sysinit'
], type='str'),
command=dict(required=True, type='str'),
insertafter=dict(type='str'),
state=dict(choices=[
'present',
'absent',
], required=True, type='str'),
),
supports_check_mode=True,
)
result = {
'name': module.params['name'],
'changed': False,
'msg': ""
}
# Find commandline strings
mkitab = module.get_bin_path('mkitab')
rmitab = module.get_bin_path('rmitab')
chitab = module.get_bin_path('chitab')
rc = 0
# check if the new entry exists
current_entry = check_current_entry(module)
# if action is install or change,
if module.params['state'] == 'present':
# create new entry string
new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
":" + module.params['action'] + ":" + module.params['command']
# If current entry exists or fields are different(if the entry does not
# exists, then the entry wil be created
if (not current_entry['exist']) or (
module.params['runlevel'] != current_entry['runlevel'] or
module.params['action'] != current_entry['action'] or
module.params['command'] != current_entry['command']):
# If the entry does exist then change the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command([chitab, new_entry])
if rc != 0:
module.fail_json(
msg="could not change inittab", rc=rc, err=err)
result['msg'] = "changed inittab entry" + " " + current_entry['name']
result['changed'] = True
# If the entry does not exist create the entry
elif not current_entry['exist']:
if module.params['insertafter']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, '-i', module.params['insertafter'], new_entry])
else:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, new_entry])
if rc != 0:
module.fail_json(
"could not adjust inittab", rc=rc, err=err)
result['msg'] = "add inittab entry" + " " + module.params['name']
result['changed'] = True
elif module.params['state'] == 'absent':
# If the action is remove and the entry exists then remove the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[rmitab, module.params['name']])
if rc != 0:
module.fail_json(
msg="could not remove entry grom inittab)", rc=rc, err=err)
result['msg'] = "removed inittab entry" + " " + current_entry['name']
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,695,662,398,566,407,700 | 30.698413 | 122 | 0.574236 | false |
wrigri/libcloud | libcloud/test/storage/test_oss.py | 11 | 36276 | # -*- coding=utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import os
import sys
import unittest
try:
import mock
except ImportError:
from unittest import mock
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.utils.py3 import PY3
from libcloud.common.types import InvalidCredsError
from libcloud.common.types import MalformedResponseError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.oss import OSSConnection
from libcloud.storage.drivers.oss import OSSStorageDriver
from libcloud.storage.drivers.oss import CHUNK_SIZE
from libcloud.storage.drivers.dummy import DummyIterator
from libcloud.test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611
from libcloud.test import MockHttpTestCase # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import STORAGE_OSS_PARAMS
class OSSConnectionTestCase(unittest.TestCase):
def setUp(self):
self.conn = OSSConnection('44CF9590006BF252F707',
'OtxrzxIsfpFjA7SwPzILwy8Bw21TLhquhboDYROV')
def test_signature(self):
expected = b('26NBxoKdsyly4EDv6inkoDft/yA=')
headers = {
'Content-MD5': 'ODBGOERFMDMzQTczRUY3NUE3NzA5QzdFNUYzMDQxNEM=',
'Content-Type': 'text/html',
'Expires': 'Thu, 17 Nov 2005 18:49:58 GMT',
'X-OSS-Meta-Author': '[email protected]',
'X-OSS-Magic': 'abracadabra',
'Host': 'oss-example.oss-cn-hangzhou.aliyuncs.com'
}
action = '/oss-example/nelson'
actual = OSSConnection._get_auth_signature('PUT', headers, {},
headers['Expires'],
self.conn.key,
action,
'x-oss-')
self.assertEqual(expected, actual)
class ObjectTestCase(unittest.TestCase):
def test_object_with_chinese_name(self):
driver = OSSStorageDriver(*STORAGE_OSS_PARAMS)
obj = Object(name='中文', size=0, hash=None, extra=None,
meta_data=None, container=None, driver=driver)
self.assertTrue(obj.__repr__() is not None)
class OSSMockHttp(StorageMockHttp, MockHttpTestCase):
fixtures = StorageFileFixtures('oss')
base_headers = {}
def _unauthorized(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers_empty(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
body = self.fixtures.load('list_containers.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_empty(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_chinese(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_chinese.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_prefix(self, method, url, body, headers):
params = {'prefix': self.test.prefix}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('list_container_objects_prefix.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _get_container(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _get_object(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _notexisted_get_object(self, method, url, body, headers):
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_get_object(self, method, url, body, headers):
self.base_headers.update(
{'accept-ranges': 'bytes',
'connection': 'keep-alive',
'content-length': '0',
'content-type': 'application/octet-stream',
'date': 'Sat, 16 Jan 2016 15:38:14 GMT',
'etag': '"D41D8CD98F00B204E9800998ECF8427E"',
'last-modified': 'Fri, 15 Jan 2016 14:43:15 GMT',
'server': 'AliyunOSS',
'x-oss-object-type': 'Normal',
'x-oss-request-id': '569A63E6257784731E3D877F',
'x-oss-meta-rabbits': 'monkeys'})
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _invalid_name(self, method, url, body, headers):
# test_create_container_bad_request
return (httplib.BAD_REQUEST,
body,
headers,
httplib.responses[httplib.OK])
def _already_exists(self, method, url, body, headers):
# test_create_container_already_existed
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _create_container(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
self.assertEqual('', body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _create_container_location(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
location_constraint = ('<CreateBucketConfiguration>'
'<LocationConstraint>%s</LocationConstraint>'
'</CreateBucketConfiguration>' %
self.test.ex_location)
self.assertEqual(location_constraint, body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_doesnt_exist(self, method, url, body, headers):
# test_delete_container_doesnt_exist
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_not_empty(self, method, url, body, headers):
# test_delete_container_not_empty
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container(self, method, url, body, headers):
return (httplib.NO_CONTENT,
body,
self.base_headers,
httplib.responses[httplib.NO_CONTENT])
def _foo_bar_object_not_found(self, method, url, body, headers):
# test_delete_object_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object(self, method, url, body, headers):
# test_delete_object
return (httplib.NO_CONTENT,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_stream_data_multipart(self, method, url, body, headers):
headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'}
TEST_UPLOAD_ID = '0004B9894A22E5B1888A1E29F8236E2D'
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if not query.get('uploadId', False):
self.fail('Request doesnt contain uploadId query parameter')
upload_id = query['uploadId'][0]
if upload_id != TEST_UPLOAD_ID:
self.fail('first uploadId doesnt match')
if method == 'PUT':
# PUT is used for uploading the part. part number is mandatory
if not query.get('partNumber', False):
self.fail('Request is missing partNumber query parameter')
body = ''
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
elif method == 'DELETE':
# DELETE is done for aborting the upload
body = ''
return (httplib.NO_CONTENT,
body,
headers,
httplib.responses[httplib.NO_CONTENT])
else:
commit = ET.fromstring(body)
count = 0
for part in commit.findall('Part'):
count += 1
part_no = part.find('PartNumber').text
etag = part.find('ETag').text
self.assertEqual(part_no, str(count))
self.assertEqual(etag, headers['etag'])
# Make sure that manifest contains at least one part
self.assertTrue(count >= 1)
body = self.fixtures.load('complete_multipart_upload.xml')
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _list_multipart(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'key-marker' not in query:
body = self.fixtures.load('ex_iterate_multipart_uploads_p1.xml')
else:
body = self.fixtures.load('ex_iterate_multipart_uploads_p2.xml')
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
class OSSMockRawResponse(MockRawResponse, MockHttpTestCase):
fixtures = StorageFileFixtures('oss')
def parse_body(self):
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
try:
if PY3:
parser = ET.XMLParser(encoding='utf-8')
body = ET.XML(self.body.encode('utf-8'), parser=parser)
else:
body = ET.XML(self.body)
except:
raise MalformedResponseError("Failed to parse XML",
body=self.body,
driver=self.connection.driver)
return body
def _foo_bar_object(self, method, url, body, headers):
# test_download_object_success
body = self._generate_random_data(1000)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object_invalid_size(self, method, url, body, headers):
# test_upload_object_invalid_file_size
body = ''
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object_not_found(self, method, url, body, headers):
# test_upload_object_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_test_upload_invalid_hash1(self, method, url, body, headers):
body = ''
headers = {}
headers['etag'] = '"foobar"'
# test_upload_object_invalid_hash1
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_upload(self, method, url, body, headers):
# test_upload_object_success
body = ''
headers = {'etag': '"0CC175B9C0F1B6A831C399E269772661"'}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_upload_acl(self, method, url, body, headers):
# test_upload_object_with_acl
body = ''
headers = {'etag': '"0CC175B9C0F1B6A831C399E269772661"'}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_stream_data(self, method, url, body, headers):
# test_upload_object_via_stream
body = ''
headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_stream_data_multipart(self, method, url, body, headers):
headers = {}
# POST is done for initiating multipart upload
if method == 'POST':
body = self.fixtures.load('initiate_multipart_upload.xml')
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
else:
body = ''
return (httplib.BAD_REQUEST,
body,
headers,
httplib.responses[httplib.BAD_REQUEST])
class OSSStorageDriverTestCase(unittest.TestCase):
driver_type = OSSStorageDriver
driver_args = STORAGE_OSS_PARAMS
mock_response_klass = OSSMockHttp
mock_raw_response_klass = OSSMockRawResponse
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args)
def setUp(self):
self.driver_type.connectionCls.conn_classes = (
None, self.mock_response_klass)
self.driver_type.connectionCls.rawResponseCls = \
self.mock_raw_response_klass
self.mock_response_klass.type = None
self.mock_response_klass.test = self
self.mock_raw_response_klass.type = None
self.mock_raw_response_klass.test = self
self.driver = self.create_driver()
def tearDown(self):
self._remove_test_file()
def _remove_test_file(self):
file_path = os.path.abspath(__file__) + '.temp'
try:
os.unlink(file_path)
except OSError:
pass
def test_invalid_credentials(self):
self.mock_response_klass.type = 'unauthorized'
self.assertRaises(InvalidCredsError, self.driver.list_containers)
def test_list_containers_empty(self):
self.mock_response_klass.type = 'list_containers_empty'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_list_containers_success(self):
self.mock_response_klass.type = 'list_containers'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 2)
container = containers[0]
self.assertEqual('xz02tphky6fjfiuc0', container.name)
self.assertTrue('creation_date' in container.extra)
self.assertEqual('2014-05-15T11:18:32.000Z',
container.extra['creation_date'])
self.assertTrue('location' in container.extra)
self.assertEqual('oss-cn-hangzhou-a', container.extra['location'])
self.assertEqual(self.driver, container.driver)
def test_list_container_objects_empty(self):
self.mock_response_klass.type = 'list_container_objects_empty'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
def test_list_container_objects_success(self):
self.mock_response_klass.type = 'list_container_objects'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 2)
obj = objects[0]
self.assertEqual(obj.name, 'en/')
self.assertEqual(obj.hash, 'D41D8CD98F00B204E9800998ECF8427E')
self.assertEqual(obj.size, 0)
self.assertEqual(obj.container.name, 'test_container')
self.assertEqual(
obj.extra['last_modified'], '2016-01-15T14:43:15.000Z')
self.assertTrue('owner' in obj.meta_data)
def test_list_container_objects_with_chinese(self):
self.mock_response_klass.type = 'list_container_objects_chinese'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 2)
obj = [o for o in objects
if o.name == 'WEB控制台.odp'][0]
self.assertEqual(obj.hash, '281371EA1618CF0E645D6BB90A158276')
self.assertEqual(obj.size, 1234567)
self.assertEqual(obj.container.name, 'test_container')
self.assertEqual(
obj.extra['last_modified'], '2016-01-15T14:43:06.000Z')
self.assertTrue('owner' in obj.meta_data)
def test_list_container_objects_with_prefix(self):
self.mock_response_klass.type = 'list_container_objects_prefix'
container = Container(name='test_container', extra={},
driver=self.driver)
self.prefix = 'test_prefix'
objects = self.driver.list_container_objects(container=container,
ex_prefix=self.prefix)
self.assertEqual(len(objects), 2)
def test_get_container_doesnt_exist(self):
self.mock_response_klass.type = 'get_container'
self.assertRaises(ContainerDoesNotExistError,
self.driver.get_container,
container_name='not-existed')
def test_get_container_success(self):
self.mock_response_klass.type = 'get_container'
container = self.driver.get_container(
container_name='xz02tphky6fjfiuc0')
self.assertTrue(container.name, 'xz02tphky6fjfiuc0')
def test_get_object_container_doesnt_exist(self):
self.mock_response_klass.type = 'get_object'
self.assertRaises(ObjectDoesNotExistError,
self.driver.get_object,
container_name='xz02tphky6fjfiuc0',
object_name='notexisted')
def test_get_object_success(self):
self.mock_response_klass.type = 'get_object'
obj = self.driver.get_object(container_name='xz02tphky6fjfiuc0',
object_name='test')
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.container.name, 'xz02tphky6fjfiuc0')
self.assertEqual(obj.size, 0)
self.assertEqual(obj.hash, 'D41D8CD98F00B204E9800998ECF8427E')
self.assertEqual(obj.extra['last_modified'],
'Fri, 15 Jan 2016 14:43:15 GMT')
self.assertEqual(obj.extra['content_type'], 'application/octet-stream')
self.assertEqual(obj.meta_data['rabbits'], 'monkeys')
def test_create_container_bad_request(self):
# invalid container name, returns a 400 bad request
self.mock_response_klass.type = 'invalid_name'
self.assertRaises(ContainerError,
self.driver.create_container,
container_name='invalid_name')
def test_create_container_already_exists(self):
# container with this name already exists
self.mock_response_klass.type = 'already_exists'
self.assertRaises(InvalidContainerNameError,
self.driver.create_container,
container_name='new-container')
def test_create_container_success(self):
# success
self.mock_response_klass.type = 'create_container'
name = 'new_container'
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
def test_create_container_with_ex_location(self):
self.mock_response_klass.type = 'create_container_location'
name = 'new_container'
self.ex_location = 'oss-cn-beijing'
container = self.driver.create_container(container_name=name,
ex_location=self.ex_location)
self.assertEqual(container.name, name)
self.assertTrue(container.extra['location'], self.ex_location)
def test_delete_container_doesnt_exist(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'delete_container_doesnt_exist'
self.assertRaises(ContainerDoesNotExistError,
self.driver.delete_container,
container=container)
def test_delete_container_not_empty(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'delete_container_not_empty'
self.assertRaises(ContainerIsNotEmptyError,
self.driver.delete_container,
container=container)
def test_delete_container_success(self):
self.mock_response_klass.type = 'delete_container'
container = Container(name='new_container', extra=None,
driver=self.driver)
self.assertTrue(self.driver.delete_container(container=container))
def test_download_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_invalid_file_size(self):
self.mock_raw_response_klass.type = 'invalid_size'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertFalse(result)
def test_download_object_not_found(self):
self.mock_raw_response_klass.type = 'not_found'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
self.assertRaises(ObjectDoesNotExistError,
self.driver.download_object,
obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
def test_download_object_as_stream_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
stream = self.driver.download_object_as_stream(obj=obj,
chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_upload_object_invalid_hash1(self):
def upload_file(self, response, file_path, chunked=False,
calculate_hash=True):
return True, 'hash343hhash89h932439jsaa89', 1000
self.mock_raw_response_klass.type = 'invalid_hash1'
old_func = self.driver_type._upload_file
try:
self.driver_type._upload_file = upload_file
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
self.assertRaises(ObjectHashMismatchError,
self.driver.upload_object,
file_path=file_path,
container=container,
object_name=object_name,
verify_hash=True)
finally:
self.driver_type._upload_file = old_func
def test_upload_object_success(self):
def upload_file(self, response, file_path, chunked=False,
calculate_hash=True):
return True, '0cc175b9c0f1b6a831c399e269772661', 1000
old_func = self.driver_type._upload_file
try:
self.driver_type._upload_file = upload_file
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, 1000)
self.assertTrue('some-value' in obj.meta_data)
finally:
self.driver_type._upload_file = old_func
def test_upload_object_with_acl(self):
def upload_file(self, response, file_path, chunked=False,
calculate_hash=True):
return True, '0cc175b9c0f1b6a831c399e269772661', 1000
old_func = self.driver_type._upload_file
try:
self.driver_type._upload_file = upload_file
self.mock_raw_response_klass.type = 'acl'
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'acl': 'public-read'}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, 1000)
self.assertEqual(obj.extra['acl'], 'public-read')
finally:
self.driver_type._upload_file = old_func
def test_upload_object_with_invalid_acl(self):
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'acl': 'invalid-acl'}
self.assertRaises(AttributeError,
self.driver.upload_object,
file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
def test_upload_empty_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_raw_response_klass.type = 'multipart'
self.mock_response_klass.type = 'multipart'
else:
self.mock_raw_response_klass.type = None
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(data=[''])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 0)
def test_upload_small_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_raw_response_klass.type = 'multipart'
self.mock_response_klass.type = 'multipart'
else:
self.mock_raw_response_klass.type = None
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(data=['2', '3', '5'])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
def test_upload_big_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_raw_response_klass.type = 'multipart'
self.mock_response_klass.type = 'multipart'
else:
self.mock_raw_response_klass.type = None
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(
data=['2' * CHUNK_SIZE, '3' * CHUNK_SIZE, '5'])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, CHUNK_SIZE * 2 + 1)
def test_upload_object_via_stream_abort(self):
if not self.driver.supports_multipart_upload:
return
self.mock_raw_response_klass.type = 'MULTIPART'
self.mock_response_klass.type = 'MULTIPART'
def _faulty_iterator():
for i in range(0, 5):
yield str(i)
raise RuntimeError('Error in fetching data')
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = _faulty_iterator()
extra = {'content_type': 'text/plain'}
try:
self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
except Exception:
pass
return
def test_ex_iterate_multipart_uploads(self):
if not self.driver.supports_multipart_upload:
return
self.mock_response_klass.type = 'list_multipart'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
for upload in self.driver.ex_iterate_multipart_uploads(container,
max_uploads=2):
self.assertTrue(upload.key is not None)
self.assertTrue(upload.id is not None)
self.assertTrue(upload.initiated is not None)
def test_ex_abort_all_multipart_uploads(self):
if not self.driver.supports_multipart_upload:
return
self.mock_response_klass.type = 'list_multipart'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
with mock.patch('libcloud.storage.drivers.oss.OSSStorageDriver'
'._abort_multipart', autospec=True) as mock_abort:
self.driver.ex_abort_all_multipart_uploads(container)
self.assertEqual(3, mock_abort.call_count)
def test_delete_object_not_found(self):
self.mock_response_klass.type = 'not_found'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
self.assertRaises(ObjectDoesNotExistError,
self.driver.delete_object,
obj=obj)
def test_delete_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
result = self.driver.delete_object(obj=obj)
self.assertTrue(result)
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | 1,451,032,568,254,711,600 | 40.164586 | 88 | 0.568191 | false |
dims/cinder | cinder/tests/unit/fake_vmem_client.py | 23 | 1779 | # Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake VMEM REST client for testing drivers.
"""
import sys
import mock
# The following gymnastics to fake an exception class globally is done because
# we want to globally model and make available certain exceptions. If we do
# not do this, then the real-driver's import will not see our fakes.
class NoMatchingObjectIdError(Exception):
pass
error = mock.Mock()
error.NoMatchingObjectIdError = NoMatchingObjectIdError
core = mock.Mock()
core.attach_mock(error, 'error')
vmemclient = mock.Mock()
vmemclient.__version__ = "unknown"
vmemclient.attach_mock(core, 'core')
sys.modules['vmemclient'] = vmemclient
mock_client_conf = [
'basic',
'basic.login',
'basic.get_node_values',
'basic.save_config',
'lun',
'lun.export_lun',
'lun.unexport_lun',
'snapshot',
'snapshot.export_lun_snapshot',
'snapshot.unexport_lun_snapshot',
'iscsi',
'iscsi.bind_ip_to_target',
'iscsi.create_iscsi_target',
'iscsi.delete_iscsi_target',
'igroup',
'client',
'client.get_client_info',
'client.create_client',
'client.delete_client',
'adapter',
'adapter.get_fc_info'
]
| apache-2.0 | -1,910,002,199,048,692,500 | 26.369231 | 78 | 0.697583 | false |
brianjimenez/lightdock | bin/simulation/lightdock_setup.py | 1 | 6309 | #!/usr/bin/env python
"""Before launching the LightDock simulation, a setup step is required.
This step parses the input PDB structures, calculates the minimum ellipsoid
containing each of them, calculates the swarms on the surface of the
receptor and populates each swarm with random coordinates for each glowworm's
optimization vector.
"""
import argparse
import numpy as np
from lightdock.util.parser import SetupCommandLineParser
from lightdock.prep.simulation import read_input_structure, save_lightdock_structure, \
calculate_starting_positions, prepare_results_environment, \
create_setup_file, calculate_anm, parse_restraints_file, \
get_restraints
from lightdock.constants import DEFAULT_LIGHTDOCK_PREFIX, DEFAULT_ELLIPSOID_DATA_EXTENSION, \
DEFAULT_NMODES_REC, DEFAULT_REC_NM_FILE, DEFAULT_NMODES_LIG, DEFAULT_LIG_NM_FILE
from lightdock.mathutil.ellipsoid import MinimumVolumeEllipsoid
from lightdock.util.logger import LoggingManager
from lightdock.error.lightdock_errors import LightDockError
log = LoggingManager.get_logger('lightdock_setup')
if __name__ == "__main__":
try:
parser = SetupCommandLineParser()
args = parser.args
# Read input structures
receptor = read_input_structure(args.receptor_pdb, args.noxt, args.noh, args.verbose_parser)
ligand = read_input_structure(args.ligand_pdb, args.noxt, args.noh, args.verbose_parser)
# Move structures to origin
rec_translation = receptor.move_to_origin()
lig_translation = ligand.move_to_origin()
# Calculate reference points for receptor
log.info("Calculating reference points for receptor %s..." % args.receptor_pdb)
rec_ellipsoid = MinimumVolumeEllipsoid(receptor.representative().coordinates)
ellipsoid_data_file = "%s%s" % (DEFAULT_LIGHTDOCK_PREFIX % receptor.structure_file_names[0],
DEFAULT_ELLIPSOID_DATA_EXTENSION)
np.save(ellipsoid_data_file, np.array([rec_ellipsoid.center.copy()]))
log.info("Done.")
# Calculate reference points for ligand
log.info("Calculating reference points for ligand %s..." % args.ligand_pdb)
lig_ellipsoid = MinimumVolumeEllipsoid(ligand.representative().coordinates)
ellipsoid_data_file = "%s%s" % (DEFAULT_LIGHTDOCK_PREFIX % ligand.structure_file_names[0],
DEFAULT_ELLIPSOID_DATA_EXTENSION)
np.save(ellipsoid_data_file, np.array([lig_ellipsoid.center.copy()]))
log.info("Done.")
# Save to file parsed structures
save_lightdock_structure(receptor)
save_lightdock_structure(ligand)
# Calculate and save ANM if required
if args.use_anm:
if args.anm_rec > 0:
log.info("Calculating ANM for receptor molecule...")
calculate_anm(receptor, args.anm_rec, DEFAULT_REC_NM_FILE)
if args.anm_lig > 0:
log.info("Calculating ANM for ligand molecule...")
calculate_anm(ligand, args.anm_lig, DEFAULT_LIG_NM_FILE)
# Parse restraints if any:
receptor_restraints = ligand_restraints = None
if args.restraints:
log.info("Reading restraints from %s" % args.restraints)
restraints = parse_restraints_file(args.restraints)
# Calculate number of restraints in order to check them
num_rec_active = len(restraints['receptor']['active'])
num_rec_passive = len(restraints['receptor']['passive'])
num_lig_active = len(restraints['ligand']['active'])
num_lig_passive = len(restraints['ligand']['passive'])
# Complain if not a single restraint has been defined, but restraints are enabled
if not num_rec_active and not num_rec_passive and not num_lig_active and not num_lig_passive:
raise LightDockError("Restraints file specified, but not a single restraint found")
# Check if restraints correspond with real residues
receptor_restraints = get_restraints(receptor, restraints['receptor'])
args.receptor_restraints = restraints['receptor']
ligand_restraints = get_restraints(ligand, restraints['ligand'])
args.ligand_restraints = restraints['ligand']
log.info("Number of receptor restraints is: %d (active), %d (passive)" % (num_rec_active, num_rec_passive))
log.info("Number of ligand restraints is: %d (active), %d (passive)" % (num_lig_active, num_lig_passive))
rec_restraints = None
try:
rec_restraints = receptor_restraints['active'] + receptor_restraints['passive']
except:
pass
lig_restraints = None
try:
lig_restraints = ligand_restraints['active'] + ligand_restraints['passive']
except:
pass
# Calculate surface points (swarm centers) over receptor structure
starting_points_files = calculate_starting_positions(receptor, ligand,
args.swarms, args.glowworms,
args.starting_points_seed,
rec_restraints, lig_restraints,
rec_translation, lig_translation,
args.ftdock_file, args.use_anm, args.anm_seed,
args.anm_rec, args.anm_lig, args.membrane)
if len(starting_points_files) != args.swarms:
args.swarms = len(starting_points_files)
log.info('Number of swarms is %d after applying restraints' % args.swarms)
# Create simulation folders
prepare_results_environment(args.swarms)
# Dump to a setup file the actual configuration
create_setup_file(args)
log.info("LightDock setup OK")
except LightDockError, error:
log.error("LightDock setup failed. Please see:")
log.error(error)
| gpl-3.0 | -8,445,596,598,521,049,000 | 47.530769 | 119 | 0.613885 | false |
m1ck/bookadoptions | django/conf/locale/pl/formats.py | 238 | 1288 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u' '
NUMBER_GROUPING = 3
| bsd-3-clause | -9,150,665,608,630,431,000 | 33.783784 | 77 | 0.551671 | false |
whereismyjetpack/ansible | lib/ansible/plugins/connection/zone.py | 45 | 7978 | # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
# and chroot.py (c) 2013, Maykel Moya <[email protected]>
# and jail.py (c) 2013, Michael Scherer <[email protected]>
# (c) 2015, Dagobert Michelsen <[email protected]>
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import os.path
import subprocess
import traceback
from ansible import constants as C
from ansible.compat.six.moves import shlex_quote
from ansible.errors import AnsibleError
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.module_utils._text import to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' Local zone based connections '''
transport = 'zone'
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.zone = self._play_context.remote_addr
if os.geteuid() != 0:
raise AnsibleError("zone connection requires running as root")
self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm'))
self.zlogin_cmd = to_bytes(self._search_executable('zlogin'))
if self.zone not in self.list_zones():
raise AnsibleError("incorrect zone name %s" % self.zone)
@staticmethod
def _search_executable(executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise AnsibleError("%s command not found in PATH") % executable
return cmd
def list_zones(self):
process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
zones = []
for l in process.stdout.readlines():
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
s = l.split(':')
if s[1] != 'global':
zones.append(s[1])
return zones
def get_zone_path(self):
#solaris10vm# zoneadm -z cswbuild list -p
#-:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared
process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#stdout, stderr = p.communicate()
path = process.stdout.readlines()[0].split(':')[3]
return path + '/root'
def _connect(self):
''' connect to the zone; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
''' run a command on the zone. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
# Note: zlogin invokes a shell (just like ssh does) so we do not pass
# this through /bin/sh -c here. Instead it goes through the shell
# that zlogin selects.
local_cmd = [self.zlogin_cmd, self.zone, cmd]
local_cmd = map(to_bytes, local_cmd)
display.vvv("EXEC %s" % (local_cmd), host=self.zone)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the zone '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
''' transfer a file from local to zone '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from zone to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
except OSError:
raise AnsibleError("zone connection requires dd command in the zone")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
| gpl-3.0 | 6,919,412,866,669,932,000 | 39.704082 | 121 | 0.614314 | false |
StanfordBioinformatics/loom | server/loomengine_server/core/settings.py | 2 | 14306 | # Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
import datetime
import json
import logging
import os
import random
import socket
import sys
import tempfile
import warnings
from django.core.exceptions import ValidationError
SESSION_BACKED = 'django.contrib.sessions.backends.db'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
def to_boolean(value):
if value in [None, '', False]:
return False
if value == True:
return True
if str(value).lower() == 'false':
return False
if str(value).lower() == 'true':
return True
raise Exception("Invalid value %s. Expected True or False")
def to_float(value):
if value is None:
return None
if value == '':
return None
return float(value)
def to_int(value):
if value is None:
return None
if value == '':
return None
return int(value)
def to_list(value):
if value is None:
return []
value = value.strip(' "\'')
list_str = value.lstrip('[').rstrip(']')
if list_str == '':
return []
list = list_str.split(',')
return [item.strip(' "\'') for item in list]
SETTINGS_DIR = os.path.dirname(__file__)
BASE_DIR = (os.path.join(SETTINGS_DIR, '..'))
sys.path.append(BASE_DIR)
PORTAL_ROOT = os.path.join(BASE_DIR, '..', '..', 'portal')
# Security settings
DEBUG = to_boolean(os.getenv('LOOM_DEBUG'))
secret_file = os.path.join(os.path.dirname(__file__),'secret.txt')
if os.path.exists(secret_file):
with open(secret_file) as f:
SECRET_KEY = f.read()
else:
SECRET_KEY = os.getenv(
'LOOM_SERVER_SECRET_KEY',
''.join([random.SystemRandom()\
.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
for i in range(50)]))
with open(secret_file, 'w') as f:
f.write(SECRET_KEY)
CORS_ORIGIN_ALLOW_ALL = to_boolean(
os.getenv('LOOM_SERVER_CORS_ORIGIN_ALLOW_ALL', 'False'))
CORS_ORIGIN_WHITELIST = to_list(os.getenv('LOOM_SERVER_CORS_ORIGIN_WHITELIST', '[]'))
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
ALLOWED_HOSTS = to_list(os.getenv('LOOM_SERVER_ALLOWED_HOSTS', '[*]'))
LOGIN_REQUIRED = to_boolean(os.getenv('LOOM_LOGIN_REQUIRED', 'True'))
LOG_LEVEL = os.getenv('LOG_LEVEL', 'WARNING').upper()
STORAGE_TYPE = os.getenv('LOOM_STORAGE_TYPE', 'local').lower()
STATIC_ROOT = os.getenv('LOOM_SERVER_STATIC_ROOT', '/var/www/loom/static')
SERVER_NAME = os.getenv('LOOM_SERVER_NAME', 'loom') # used in attempt container names
SERVER_URL_FOR_WORKER = os.getenv('SERVER_URL_FOR_WORKER', 'http://127.0.0.1:8000')
SERVER_URL_FOR_CLIENT = os.getenv('SERVER_URL_FOR_CLIENT', 'http://127.0.0.1:8000')
# GCP settings
GCE_EMAIL = os.getenv('GCE_EMAIL')
GCE_PROJECT = os.getenv('GCE_PROJECT', '')
GCE_PEM_FILE_PATH = os.getenv('GCE_PEM_FILE_PATH')
GOOGLE_STORAGE_BUCKET = os.getenv('LOOM_GOOGLE_STORAGE_BUCKET', '')
SETTINGS_HOME = os.getenv('LOOM_SETTINGS_HOME', os.path.expanduser('~/.loom'))
PLAYBOOK_PATH = os.path.join(SETTINGS_HOME, os.getenv('LOOM_PLAYBOOK_DIR', 'playbooks'))
RUN_TASK_ATTEMPT_PLAYBOOK = os.getenv('LOOM_RUN_TASK_ATTEMPT_PLAYBOOK')
CLEANUP_TASK_ATTEMPT_PLAYBOOK = os.getenv('LOOM_CLEANUP_TASK_ATTEMPT_PLAYBOOK')
def _add_url_prefix(path):
if STORAGE_TYPE.lower() == 'local':
return 'file://' + path
elif STORAGE_TYPE.lower() == 'google_storage':
return 'gs://' + GOOGLE_STORAGE_BUCKET + path
else:
raise ValidationError(
'Couldn\'t recognize value for setting STORAGE_TYPE="%s"'\
% STORAGE_TYPE)
STORAGE_ROOT = os.path.expanduser(os.getenv('LOOM_STORAGE_ROOT', '~/loomdata'))
INTERNAL_STORAGE_ROOT = os.path.expanduser(
os.getenv('LOOM_INTERNAL_STORAGE_ROOT', STORAGE_ROOT))
STORAGE_ROOT_WITH_PREFIX =_add_url_prefix(STORAGE_ROOT)
INTERNAL_STORAGE_ROOT_WITH_PREFIX =_add_url_prefix(INTERNAL_STORAGE_ROOT)
DISABLE_DELETE = to_boolean(os.getenv('LOOM_DISABLE_DELETE', 'False'))
FORCE_RERUN = to_boolean(os.getenv('LOOM_FORCE_RERUN', 'False'))
TASKRUNNER_HEARTBEAT_INTERVAL_SECONDS = float(os.getenv('LOOM_TASKRUNNER_HEARTBEAT_INTERVAL_SECONDS', '60'))
TASKRUNNER_HEARTBEAT_TIMEOUT_SECONDS = float(os.getenv('LOOM_TASKRUNNER_HEARTBEAT_TIMEOUT_SECONDS', TASKRUNNER_HEARTBEAT_INTERVAL_SECONDS*2.5))
SYSTEM_CHECK_INTERVAL_MINUTES = float(os.getenv('LOOM_SYSTEM_CHECK_INTERVAL_MINUTES', '15'))
PRESERVE_ON_FAILURE = to_boolean(os.getenv('LOOM_PRESERVE_ON_FAILURE', 'False'))
PRESERVE_ALL = to_boolean(os.getenv('LOOM_PRESERVE_ALL', 'False'))
TASK_TIMEOUT_HOURS = float(os.getenv(
'LOOM_TASK_TIMEOUT_HOURS', '24.0'))
MAXIMUM_RETRIES_FOR_ANALYSIS_FAILURE = int(os.getenv(
'LOOM_MAXIMUM_TASK_RETRIES_FOR_ANALYSIS_FAILURE', '1'))
MAXIMUM_RETRIES_FOR_SYSTEM_FAILURE = int(os.getenv(
'LOOM_MAXIMUM_TASK_RETRIES_FOR_SYSTEM_FAILURE', '10'))
MAXIMUM_RETRIES_FOR_TIMEOUT_FAILURE = int(os.getenv(
'LOOM_MAXIMUM_TASK_RETRIES_FOR_TIMEOUT_FAILURE', '0'))
MAXIMUM_TREE_DEPTH = int(os.getenv('LOOM_MAXIMUM_TREE_DEPTH', '10'))
DEFAULT_DOCKER_REGISTRY = os.getenv('LOOM_DEFAULT_DOCKER_REGISTRY', '')
# Database settings
# Any defaults must match defaults in playbook
MYSQL_HOST = os.getenv('LOOM_MYSQL_HOST')
MYSQL_USER = os.getenv('LOOM_MYSQL_USER', 'loom')
MYSQL_PASSWORD = os.getenv('LOOM_MYSQL_PASSWORD', 'loompass')
MYSQL_DATABASE = os.getenv('LOOM_MYSQL_DATABASE', 'loomdb')
MYSQL_PORT = int(os.getenv('LOOM_MYSQL_PORT', 3306))
MYSQL_SSL_CA_CERT_PATH = os.getenv('LOOM_MYSQL_SSL_CA_CERT_PATH')
MYSQL_SSL_CLIENT_CERT_PATH = os.getenv('LOOM_MYSQL_SSL_CLIENT_CERT_PATH')
MYSQL_SSL_CLIENT_KEY_PATH = os.getenv('LOOM_MYSQL_SSL_CLIENT_KEY_PATH')
# Email settings
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.getenv('LOOM_EMAIL_HOST', None)
EMAIL_PORT = to_int(os.getenv('LOOM_EMAIL_PORT', 587))
EMAIL_HOST_USER = os.getenv('LOOM_EMAIL_HOST_USER', None)
EMAIL_HOST_PASSWORD = os.getenv('LOOM_EMAIL_HOST_PASSWORD', None)
EMAIL_USE_TLS = to_boolean(os.getenv('LOOM_EMAIL_USE_TLS', True))
EMAIL_USE_SSL = to_boolean(os.getenv('LOOM_EMAIL_USE_SSL', True))
EMAIL_TIMEOUT = to_float(os.getenv('LOOM_EMAIL_TIMEOUT', 0.0))
EMAIL_SSL_KEYFILE = os.getenv('LOOM_EMAIL_SSL_KEYFILE', None)
EMAIL_SSL_CERTFILE = os.getenv('LOOM_EMAIL_SSL_CERTFILE', None)
DEFAULT_FROM_EMAIL = os.getenv('LOOM_DEFAULT_FROM_EMAIL', EMAIL_HOST_USER)
NOTIFICATION_ADDRESSES = to_list(os.getenv('LOOM_NOTIFICATION_ADDRESSES', '[]'))
NOTIFICATION_HTTPS_VERIFY_CERTIFICATE = to_boolean(os.getenv('LOOM_NOTIFICATION_HTTPS_VERIFY_CERTIFICATE', True))
# Message broker settings
LOOM_RABBITMQ_PASSWORD = os.getenv('LOOM_RABBITMQ_PASSWORD', 'guest')
LOOM_RABBITMQ_USER = os.getenv('LOOM_RABBITMQ_USER', 'guest')
LOOM_RABBITMQ_VHOST = os.getenv('LOOM_RABBITMQ_VHOST', '/')
LOOM_RABBITMQ_HOST = os.getenv('LOOM_RABBITMQ_HOST', 'rabbitmq')
LOOM_RABBITMQ_PORT = os.getenv('LOOM_RABBITMQ_PORT', '5672')
def _get_ansible_inventory():
ansible_inventory = os.getenv('LOOM_ANSIBLE_INVENTORY', 'localhost,')
if ',' not in ansible_inventory:
ansible_inventory = os.path.join(
PLAYBOOK_PATH,
os.getenv('LOOM_ANSIBLE_INVENTORY'))
return ansible_inventory
ANSIBLE_INVENTORY = _get_ansible_inventory()
LOOM_SSH_PRIVATE_KEY_PATH = os.getenv('LOOM_SSH_PRIVATE_KEY_PATH')
# For testing only
TEST_DISABLE_ASYNC_DELAY = to_boolean(os.getenv('TEST_DISABLE_ASYNC_DELAY', False))
TEST_NO_CREATE_TASK = to_boolean(os.getenv('TEST_NO_CREATE_TASK', False))
TEST_NO_RUN_TASK_ATTEMPT = to_boolean(os.getenv('TEST_NO_RUN_TASK_ATTEMPT', False))
TEST_NO_TASK_ATTEMPT_CLEANUP = to_boolean(os.getenv(
'TEST_NO_TASK_ATTEMPT_CLEANUP', False))
TEST_NO_PUSH_INPUTS= to_boolean(os.getenv('TEST_NO_PUSH_INPUTS', False))
# Fixed settings
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_TZ = True
CELERY_ALWAYS_EAGER = True
APPEND_SLASH = True
ROOT_URLCONF = 'loomengine_server.core.urls'
# Celery
CELERY_RESULT_BACKEND = 'django-cache'
CELERY_BROKER_URL = 'amqp://%s:%s@%s:%s/%s' \
% (LOOM_RABBITMQ_USER, LOOM_RABBITMQ_PASSWORD,
LOOM_RABBITMQ_HOST, LOOM_RABBITMQ_PORT,
LOOM_RABBITMQ_VHOST)
CELERY_BROKER_POOL_LIMIT = 50
CELERYD_TASK_SOFT_TIME_LIMIT = 60
LOGIN_REDIRECT_URL = '/'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django_extensions',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'django_celery_results',
'api',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
if LOGIN_REQUIRED:
drf_permission_classes = ('rest_framework.permissions.IsAuthenticated',)
else:
drf_permission_classes = ('rest_framework.permissions.AllowAny',)
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': drf_permission_classes,
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'loomengine_server.core.wsgi.application'
def _get_sqlite_databases():
return {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'loomdb.sqlite3'),
}
}
def _get_mysql_databases():
if not MYSQL_USER:
raise Exception(
"LOOM_MYSQL_USER is a required setting if LOOM_MYSQL_HOST is set")
if not MYSQL_DATABASE:
raise Exception(
"LOOM_MYSQL_DATABASE is a required setting if LOOM_MYSQL_HOST is set")
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': MYSQL_HOST,
'NAME': MYSQL_DATABASE,
'USER': MYSQL_USER,
'PORT': MYSQL_PORT,
}
}
if MYSQL_PASSWORD:
DATABASES['default'].update({
'PASSWORD': MYSQL_PASSWORD
})
if MYSQL_SSL_CA_CERT_PATH \
or MYSQL_SSL_CLIENT_CERT_PATH \
or MYSQL_SSL_CLIENT_KEY_PATH:
if not (MYSQL_SSL_CA_CERT_PATH \
and MYSQL_SSL_CLIENT_CERT_PATH \
and MYSQL_SSL_CLIENT_KEY_PATH):
raise Exception(
'One or more required values missing: '\
'LOOM_MYSQL_SSL_CA_CERT_PATH="%s", '\
'LOOM_MYSQL_SSL_CLIENT_CERT_PATH="%s", '\
'LOOM_MYSQL_SSL_CLIENT_KEY_PATH="%s"' % (
MYSQL_SSL_CA_CERT_PATH,
MYSQL_SSL_CLIENT_CERT_PATH,
MYSQL_SSL_CLIENT_KEY_PATH))
else:
DATABASES['default'].update({
'OPTIONS': {
'ssl': {
'ca': MYSQL_SSL_CA_CERT_PATH,
'cert': MYSQL_SSL_CLIENT_CERT_PATH,
'key': MYSQL_SSL_CLIENT_KEY_PATH
}
}
})
return DATABASES
# Database
if MYSQL_HOST:
DATABASES = _get_mysql_databases()
else:
DATABASES = _get_sqlite_databases()
# Logging
if len(sys.argv) > 1 and sys.argv[1] == 'test':
DISABLE_LOGGING = True
else:
DISABLE_LOGGING = False
if DISABLE_LOGGING:
LOGGING = {}
else:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(levelname)s [%(asctime)s] %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
}
},
'loggers': {
'django': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
'loomengine': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
'api': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
},
}
STATIC_URL = '/%s/' % os.path.basename(STATIC_ROOT)
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# This is needed for nginx reverse proxy to work
INTERNAL_IPS = ["127.0.0.1",]
if DEBUG or (len(sys.argv) > 1 and sys.argv[1] == 'collectstatic'):
INSTALLED_APPS.append('debug_toolbar')
MIDDLEWARE_CLASSES.append('debug_toolbar.middleware.DebugToolbarMiddleware')
def custom_show_toolbar(request):
return True
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
'MEDIA_URL': '/__debug__/m/',
'SHOW_TOOLBAR_CALLBACK': custom_show_toolbar,
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': '232871b2',
'TIMEOUT': 0,
}
}
| agpl-3.0 | 4,712,104,880,341,630,000 | 34.149877 | 143 | 0.642038 | false |
senuido/stash-scanner | lib/ModsHelper.py | 1 | 3577 | import json
import re
from lib.ModFilter import ModFilterType
from lib.ModFilterGroup import PSEUDO_MODS
from lib.Utility import AppException
class ModsHelper:
MODS_FNAME = 'res\\mods.json'
MOD_TEXT_REGEX = re.compile('\(([^()]+)\)\s+(.*)')
def __init__(self):
self.mod_list = None
self.mod_set = None
def init(self):
try:
self.load()
except Exception as e:
raise AppException('Failed to load item mods information.\n{}'.format(e))
def load(self):
mod_set = set()
mod_list = []
cat_ordered = ['[pseudo] mods', '[total] mods', 'explicit', 'crafted', 'implicit', 'enchantments',
'unique explicit', 'map mods', 'prophecies', 'leaguestone']
cat_ignore = []
with open(self.MODS_FNAME) as f:
data = json.load(f)
for cat in cat_ordered:
if cat in cat_ignore:
continue
cat_mods = []
for mod in data['mods'][cat]:
mod_type, text = self.textToMod(mod)
if mod_type == ModFilterType.Pseudo and text not in PSEUDO_MODS:
# convert mod to a non-psuedo if it has another tag
inner_tag, inner_text = self._getTagText(text)
if inner_tag is None:
continue
mod = text
cat_mods.append(mod)
for mod in sorted(cat_mods):
mod_set.add(mod)
if len(mod_set) > len(mod_list):
mod_list.append(mod)
self.mod_list = mod_list
self.mod_set = mod_set
def modToText(self, mod_type, expr):
if mod_type == ModFilterType.Pseudo:
pat = expr
else:
pat = expr.replace('([0-9]+)', '#')
pat = pat.replace('\+', '+') # un-escape characters
if pat.endswith('$'):
pat = pat[:-1]
if mod_type == ModFilterType.Explicit:
return pat
return '({}) {}'.format(mod_type.value, pat)
def isCustom(self, mod_type, expr):
return self.modToText(mod_type, expr) not in self.mod_set
def isPredefined(self, mod_text):
return mod_text in self.mod_set
def textToMod(self, mod_text):
tag, text = self._getTagText(mod_text)
if tag is None:
mod_type = ModFilterType.Explicit
else:
mod_type = ModFilterType(tag)
expr = text
if expr and mod_type != ModFilterType.Pseudo:
expr = expr.replace('+', '\+') # escape characters
expr = expr.replace('#', '([0-9]+)') + '$'
return mod_type, expr
def _getTagText(self, text):
match = self.MOD_TEXT_REGEX.match(text)
if match:
return match.groups()
return None, text
def stripTags(self, mod_text):
while True:
tag, mod_text = self._getTagText(mod_text)
if tag is None:
return mod_text
def modToParam(self, mod_type, expr):
text = self.modToText(mod_type, expr)
# prevents custom mod conversion
# while searching for sortable mods works, for most cases, custom mods will break the search
if not self.isPredefined(text):
raise ValueError('Cannot convert custom mod {} to param.'.format(text))
if mod_type == ModFilterType.Total:
text = '({}) {}'.format(ModFilterType.Pseudo.name.lower(), text)
return text
mod_helper = ModsHelper() | gpl-3.0 | 3,681,077,305,719,443,500 | 29.322034 | 106 | 0.536763 | false |
jashworth-isb/cmonkey-python | test/util_test.py | 1 | 11520 | """util_test.py - test classes for util module
This file is part of cMonkey Python. Please see README and LICENSE for
more information and licensing details.
"""
import unittest
import util
import operator
import numpy as np
class DelimitedFileTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for DelimitedFile"""
def test_read_with_tabs(self):
"""Reads a tab delimited file"""
dfile = util.DelimitedFile.read("testdata/simple.tsv")
lines = dfile.lines()
self.assertEquals(["value11", "value12"], lines[0])
self.assertEquals(["value21", "value22"], lines[1])
self.assertIsNone(dfile.header())
def test_read_with_tabs_and_header(self):
"""Reads a tab delimited file with a header"""
dfile = util.DelimitedFile.read("testdata/simple.tsv", has_header=True)
lines = dfile.lines()
self.assertEquals(1, len(lines))
self.assertEquals(["value11", "value12"], dfile.header())
def test_read_with_semicolon_header_and_comments(self):
"""Reads a semicolon delimited file with a header and comments"""
dfile = util.DelimitedFile.read("testdata/withcomments.ssv", sep=';',
has_header=True, comment='#')
lines = dfile.lines()
self.assertEquals(2, len(lines))
self.assertEquals(["header1", "header2"], dfile.header())
def test_read_with_quotes(self):
"""Reads a semicolon delimited file with quotes"""
dfile = util.DelimitedFile.read("testdata/withquotes.ssv", sep=';',
has_header=False, comment='#', quote='"')
lines = dfile.lines()
self.assertEquals(["value11", "value12"], lines[0])
self.assertEquals(["value21", "value22"], lines[1])
def test_read_with_empty_lines(self):
"""Reads a semicolon delimited file containing emptylines"""
dfile = util.DelimitedFile.read("testdata/withemptylines.ssv", sep=';',
has_header=True, comment='#', quote='"')
lines = dfile.lines()
self.assertEquals(["header1", "header2"], dfile.header())
self.assertEquals(2, len(lines))
self.assertEquals(["value11", "value12"], lines[0])
self.assertEquals(["value21", "value22"], lines[1])
def test_create_from_text(self):
"""Reads a tab delimited file from a text"""
dfile = util.DelimitedFile.create_from_text(
"value11\tvalue12\nvalue21\tvalue22")
lines = dfile.lines()
self.assertEquals(["value11", "value12"], lines[0])
self.assertEquals(["value21", "value22"], lines[1])
self.assertIsNone(dfile.header())
def test_create_from_text_empty_line_at_end(self):
"""Reads a tab delimited file from a text"""
dfile = util.DelimitedFile.create_from_text(
"value11\tvalue12\nvalue21\tvalue22\n")
lines = dfile.lines()
self.assertEquals(2, len(lines))
self.assertEquals(["value11", "value12"], lines[0])
self.assertEquals(["value21", "value22"], lines[1])
self.assertIsNone(dfile.header())
class LevenshteinDistanceTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for levenshtein_distance"""
def test_kitten_sitting(self):
"""compare kitten with sitting"""
self.assertEquals(3, util.levenshtein_distance('sitting', 'kitten'))
def test_saturday_sunday(self):
"""compare Saturday with Sunday"""
self.assertEquals(3, util.levenshtein_distance('Sunday', 'Saturday'))
RSAT_LIST_FILE_PATH = "testdata/RSAT_genomes_listing.txt"
class BestMatchingLinksTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for best_matching_links"""
def test_best_rsat_matches(self):
"""test the best_matching_links function"""
with open(RSAT_LIST_FILE_PATH) as inputfile:
html = inputfile.read()
matches = util.best_matching_links('Halobacterium', html)
self.assertEquals("Halobacterium_sp/", matches[0])
class UtilsTest(unittest.TestCase): # pylint: disable-msg=R0904
"""Test class for utility functions"""
def test_quantile(self):
"""tests the quantile function"""
data = [1, 2, 3, 4, 5]
self.assertEquals(1, util.quantile(data, 0))
self.assertEquals(1.8, util.quantile(data, 0.2))
self.assertEquals(2, util.quantile(data, 0.25))
self.assertEquals(3, util.quantile(data, 0.5))
self.assertEquals(4, util.quantile(data, 0.75))
self.assertEquals(5, util.quantile(data, 1))
def test_r_stddev(self):
"""tests the standard deviation function"""
self.assertEquals(0.1, util.r_stddev([0.1, 0.2, 0.3]))
def test_r_stddev_with_nan(self):
"""tests the standard deviation function"""
self.assertEquals(0.1, util.r_stddev([0.1, 0.2, 0.3, np.nan]))
def test_r_variance_columns(self):
"""tests the column variance function"""
matrix = [[0.0010, 0.1234, 0.21370, 0.0342],
[0.2123, -0.2135, -0.99980, -0.0213],
[-0.4534, 0.5546, 0.79123, 0.00312321]]
result = util.r_variance_columns(matrix)
self.assertAlmostEqual(0.1157139233, result[0])
self.assertAlmostEqual(0.1482354433, result[1])
self.assertAlmostEqual(0.8356519353, result[2])
self.assertAlmostEqual(0.0007737516, result[3])
def test_r_variance_columns_with_nans(self):
"""tests the column variance function"""
matrix = [[np.nan, 0.1234, 0.21370, 0.0342],
[0.2123, -0.2135, -0.99980, -0.0213],
[-0.4534, 0.5546, 0.79123, np.nan]]
result = util.r_variance_columns(matrix)
self.assertAlmostEqual(0.1661836837, result[0])
self.assertAlmostEqual(0.1482354433, result[1])
self.assertAlmostEqual(0.8356519353, result[2])
self.assertAlmostEqual(0.0011550937, result[3])
def test_column_means(self):
"""tests the column_means() function"""
matrix = [[0.0010, 0.1234, 0.21370, 0.0342],
[0.2123, -0.2135, -0.99980, -0.0213],
[-0.4534, 0.5546, 0.79123, 0.00312321]]
result = util.column_means(matrix)
self.assertAlmostEqual(-0.08003333, result[0])
self.assertAlmostEqual(0.15483333, result[1])
self.assertAlmostEqual(0.00171, result[2])
self.assertAlmostEqual(0.00534107, result[3])
def test_column_means_with_nans(self):
"""tests the column_means() function, containing NaNs"""
matrix = [[0.0010, 0.1234, 0.21370, np.nan],
[0.2123, np.nan, -0.99980, -0.0213],
[np.nan, 0.5546, 0.79123, 0.00312321]]
result = util.column_means(matrix)
self.assertAlmostEqual(0.10664999, result[0])
self.assertAlmostEqual(0.33899999, result[1])
self.assertAlmostEqual(0.00171, result[2])
self.assertAlmostEqual(-0.00908839499, result[3])
def test_row_means(self):
"""tests the row_means() function"""
matrix = [[0.0010, 0.1234, 0.21370, 0.0342],
[0.2123, -0.2135, -0.99980, -0.0213],
[-0.4534, 0.5546, 0.79123, 0.00312321]]
result = util.row_means(matrix)
self.assertAlmostEqual(0.0930750, result[0])
self.assertAlmostEqual(-0.255575, result[1])
self.assertAlmostEqual(0.2238883025, result[2])
def test_row_means_with_nans(self):
"""tests the row_means() function"""
matrix = [[0.0010, np.nan, 0.21370, 0.0342],
[0.2123, -0.2135, -0.99980, -0.0213],
[-0.4534, 0.5546, 0.79123, np.nan]]
result = util.row_means(matrix)
self.assertAlmostEqual(0.08296666, result[0])
self.assertAlmostEqual(-0.255575, result[1])
self.assertAlmostEqual(0.297476666, result[2])
def test_trim_mean_nonmedian(self):
self.assertAlmostEqual(
40.625,
util.trim_mean([2, 4, 6, 7, 11, 21, 81, 90, 105, 121], 0.1))
def test_trim_mean_median(self):
self.assertAlmostEqual(3.5, util.trim_mean([.1, .2, 3, 4, 5, 6], 0.5))
def test_trim_mean_no_values(self):
self.assertEqual(0, util.trim_mean([], 0.05))
def test_trim_mean_real(self):
values = [0.0, 0.0, -8.7520618359684352, -8.7520618359684352, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.assertAlmostEqual(-1.4586770, util.trim_mean(values, 0.05))
def test_mean_with_nans(self):
"""tests the mean() function"""
array = np.array([2.0, 3.0, np.nan, 1.0])
result = util.mean(array)
self.assertAlmostEqual(2.0, result)
def test_density(self):
kvalues = [3.4268700450682301, 3.3655160468930152, -8.0654569044842539,
2.0762815314005487, 4.8537715329554203, 1.2374476248622075]
cluster_values = [-3.5923001345962162, 0.77069901513184735,
-4.942909785931378, -3.1580950032999096]
bandwidth = 2.69474878768
dmin = -13.8848342423
dmax = 12.6744452247
result = util.density(kvalues, cluster_values, bandwidth, dmin, dmax)
self.assertAlmostEquals(0.08663036966690765, result[0])
self.assertAlmostEquals(0.08809242907902183, result[1])
self.assertAlmostEquals(0.49712338305039777, result[2])
self.assertAlmostEquals(0.12248549621579163, result[3])
self.assertAlmostEquals(0.05708884005243133, result[4])
self.assertAlmostEquals(0.14857948193544993, result[5])
def test_sd_rnorm(self):
result = util.sd_rnorm([1.3, 1.6, 1.2, 1.05], 9, 0.748951)
# the results are fairly random, make sure we have the right
# number of values
self.assertEquals(9, len(result))
def test_max_row_var(self):
"""tests maximum row variance function"""
matrix = [[1, 5, 9, 13],
[2, 6, 10, 14],
[3, 7, 11, 15],
[4, 8, 12, 16]]
result = util.max_row_var(matrix)
self.assertAlmostEqual(26.666666666666664, result)
def test_max_row_var_with_nans(self):
"""tests maximum row variance with NaNs"""
matrix = [[1, np.nan, 9],
[np.nan, 6, 10],
[3, 7, np.nan],
[4, 8, 12]]
result = util.max_row_var(matrix)
self.assertAlmostEqual(16.0, result)
def test_r_outer(self):
"""tests the r_outer function"""
result = util.r_outer([5.5, 6.5], [4.5, 7.5], operator.add)
self.assertAlmostEqual(10.0, result[0][0])
self.assertAlmostEqual(13.0, result[0][1])
self.assertAlmostEqual(11.0, result[1][0])
self.assertAlmostEqual(14.0, result[1][1])
class Order2StringTest(unittest.TestCase): # pylint: disable-msg=R09042
"""Test class for order2string"""
def test_order2string(self):
self.assertEquals("1st", util.order2string(1))
self.assertEquals("2nd", util.order2string(2))
self.assertEquals("3rd", util.order2string(3))
self.assertEquals("4th", util.order2string(4))
self.assertEquals("11th", util.order2string(11))
self.assertEquals("12th", util.order2string(12))
self.assertEquals("21st", util.order2string(21))
self.assertEquals("22nd", util.order2string(22))
self.assertEquals("23rd", util.order2string(23))
| lgpl-3.0 | -2,578,243,708,874,551,000 | 41.666667 | 81 | 0.608507 | false |
Srisai85/scipy | scipy/stats/tests/test_contingency.py | 126 | 5959 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (run_module_suite, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_approx_equal, assert_raises,
assert_allclose)
from scipy.special import xlogy
from scipy.stats.contingency import margins, expected_freq, chi2_contingency
def test_margins():
a = np.array([1])
m = margins(a)
assert_equal(len(m), 1)
m0 = m[0]
assert_array_equal(m0, np.array([1]))
a = np.array([[1]])
m0, m1 = margins(a)
expected0 = np.array([[1]])
expected1 = np.array([[1]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(12).reshape(2, 6)
m0, m1 = margins(a)
expected0 = np.array([[15], [51]])
expected1 = np.array([[6, 8, 10, 12, 14, 16]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(24).reshape(2, 3, 4)
m0, m1, m2 = margins(a)
expected0 = np.array([[[66]], [[210]]])
expected1 = np.array([[[60], [92], [124]]])
expected2 = np.array([[[60, 66, 72, 78]]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
assert_array_equal(m2, expected2)
def test_expected_freq():
assert_array_equal(expected_freq([1]), np.array([1.0]))
observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]])
e = expected_freq(observed)
assert_array_equal(e, np.ones_like(observed))
observed = np.array([[10, 10, 20], [20, 20, 20]])
e = expected_freq(observed)
correct = np.array([[12., 12., 16.], [18., 18., 24.]])
assert_array_almost_equal(e, correct)
def test_chi2_contingency_trivial():
# Some very simple tests for chi2_contingency.
# A trivial case
obs = np.array([[1, 2], [1, 2]])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 1)
assert_array_equal(obs, expected)
# A *really* trivial case: 1-D data.
obs = np.array([1, 2, 3])
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
assert_equal(chi2, 0.0)
assert_equal(p, 1.0)
assert_equal(dof, 0)
assert_array_equal(obs, expected)
def test_chi2_contingency_R():
# Some test cases that were computed independently, using R.
Rcode = \
"""
# Data vector.
data <- c(
12, 34, 23, 4, 47, 11,
35, 31, 11, 34, 10, 18,
12, 32, 9, 18, 13, 19,
12, 12, 14, 9, 33, 25
)
# Create factor tags:r=rows, c=columns, t=tiers
r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4")))
c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3")))
t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2")))
# 3-way Chi squared test of independence
s = summary(xtabs(data~r+c+t))
print(s)
"""
Routput = \
"""
Call: xtabs(formula = data ~ r + c + t)
Number of cases in table: 478
Number of factors: 3
Test for independence of all factors:
Chisq = 102.17, df = 17, p-value = 3.514e-14
"""
obs = np.array(
[[[12, 34, 23],
[35, 31, 11],
[12, 32, 9],
[12, 12, 14]],
[[4, 47, 11],
[34, 10, 18],
[18, 13, 19],
[9, 33, 25]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 102.17, significant=5)
assert_approx_equal(p, 3.514e-14, significant=4)
assert_equal(dof, 17)
Rcode = \
"""
# Data vector.
data <- c(
#
12, 17,
11, 16,
#
11, 12,
15, 16,
#
23, 15,
30, 22,
#
14, 17,
15, 16
)
# Create factor tags:r=rows, c=columns, d=depths(?), t=tiers
r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2")))
c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2")))
d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2")))
t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2")))
# 4-way Chi squared test of independence
s = summary(xtabs(data~r+c+d+t))
print(s)
"""
Routput = \
"""
Call: xtabs(formula = data ~ r + c + d + t)
Number of cases in table: 262
Number of factors: 4
Test for independence of all factors:
Chisq = 8.758, df = 11, p-value = 0.6442
"""
obs = np.array(
[[[[12, 17],
[11, 16]],
[[11, 12],
[15, 16]]],
[[[23, 15],
[30, 22]],
[[14, 17],
[15, 16]]]])
chi2, p, dof, expected = chi2_contingency(obs)
assert_approx_equal(chi2, 8.758, significant=4)
assert_approx_equal(p, 0.6442, significant=4)
assert_equal(dof, 11)
def test_chi2_contingency_g():
c = np.array([[15, 60], [15, 90]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=False)
assert_allclose(g, 2*xlogy(c, c/e).sum())
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=True)
c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]])
assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum())
c = np.array([[10, 12, 10], [12, 10, 10]])
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood')
assert_allclose(g, 2*xlogy(c, c/e).sum())
def test_chi2_contingency_bad_args():
# Test that "bad" inputs raise a ValueError.
# Negative value in the array of observed frequencies.
obs = np.array([[-1, 10], [1, 2]])
assert_raises(ValueError, chi2_contingency, obs)
# The zeros in this will result in zeros in the array
# of expected frequencies.
obs = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, chi2_contingency, obs)
# A degenerate case: `observed` has size 0.
obs = np.empty((0, 8))
assert_raises(ValueError, chi2_contingency, obs)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 1,608,764,446,934,120,400 | 28.5 | 82 | 0.549589 | false |
Lkhagvadelger/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py | 122 | 10361 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.net.layouttestresults import LayoutTestResults
class UnableToApplyPatch(Exception):
def __init__(self, patch):
Exception.__init__(self)
self.patch = patch
class PatchAnalysisTaskDelegate(object):
def parent_command(self):
raise NotImplementedError("subclasses must implement")
def run_command(self, command):
raise NotImplementedError("subclasses must implement")
def command_passed(self, message, patch):
raise NotImplementedError("subclasses must implement")
def command_failed(self, message, script_error, patch):
raise NotImplementedError("subclasses must implement")
def refetch_patch(self, patch):
raise NotImplementedError("subclasses must implement")
def expected_failures(self):
raise NotImplementedError("subclasses must implement")
def test_results(self):
raise NotImplementedError("subclasses must implement")
def archive_last_test_results(self, patch):
raise NotImplementedError("subclasses must implement")
def build_style(self):
raise NotImplementedError("subclasses must implement")
# We could make results_archive optional, but for now it's required.
def report_flaky_tests(self, patch, flaky_tests, results_archive):
raise NotImplementedError("subclasses must implement")
class PatchAnalysisTask(object):
def __init__(self, delegate, patch):
self._delegate = delegate
self._patch = patch
self._script_error = None
self._results_archive_from_patch_test_run = None
self._results_from_patch_test_run = None
self._expected_failures = delegate.expected_failures()
def _run_command(self, command, success_message, failure_message):
try:
self._delegate.run_command(command)
self._delegate.command_passed(success_message, patch=self._patch)
return True
except ScriptError, e:
self._script_error = e
self.failure_status_id = self._delegate.command_failed(failure_message, script_error=self._script_error, patch=self._patch)
return False
def _clean(self):
return self._run_command([
"clean",
],
"Cleaned working directory",
"Unable to clean working directory")
def _update(self):
# FIXME: Ideally the status server log message should include which revision we updated to.
return self._run_command([
"update",
],
"Updated working directory",
"Unable to update working directory")
def _apply(self):
return self._run_command([
"apply-attachment",
"--no-update",
"--non-interactive",
self._patch.id(),
],
"Applied patch",
"Patch does not apply")
def _build(self):
return self._run_command([
"build",
"--no-clean",
"--no-update",
"--build-style=%s" % self._delegate.build_style(),
],
"Built patch",
"Patch does not build")
def _build_without_patch(self):
return self._run_command([
"build",
"--force-clean",
"--no-update",
"--build-style=%s" % self._delegate.build_style(),
],
"Able to build without patch",
"Unable to build without patch")
def _test(self):
return self._run_command([
"build-and-test",
"--no-clean",
"--no-update",
# Notice that we don't pass --build, which means we won't build!
"--test",
"--non-interactive",
],
"Passed tests",
"Patch does not pass tests")
def _build_and_test_without_patch(self):
return self._run_command([
"build-and-test",
"--force-clean",
"--no-update",
"--build",
"--test",
"--non-interactive",
],
"Able to pass tests without patch",
"Unable to pass tests without patch (tree is red?)")
def _land(self):
# Unclear if this should pass --quiet or not. If --parent-command always does the reporting, then it should.
return self._run_command([
"land-attachment",
"--force-clean",
"--non-interactive",
"--parent-command=" + self._delegate.parent_command(),
self._patch.id(),
],
"Landed patch",
"Unable to land patch")
def _report_flaky_tests(self, flaky_test_results, results_archive):
self._delegate.report_flaky_tests(self._patch, flaky_test_results, results_archive)
def _results_failed_different_tests(self, first, second):
first_failing_tests = [] if not first else first.failing_tests()
second_failing_tests = [] if not second else second.failing_tests()
return first_failing_tests != second_failing_tests
def _test_patch(self):
if self._test():
return True
# Note: archive_last_test_results deletes the results directory, making these calls order-sensitve.
# We could remove this dependency by building the test_results from the archive.
first_results = self._delegate.test_results()
first_results_archive = self._delegate.archive_last_test_results(self._patch)
first_script_error = self._script_error
first_failure_status_id = self.failure_status_id
if self._expected_failures.failures_were_expected(first_results):
return True
if self._test():
# Only report flaky tests if we were successful at parsing results.json and archiving results.
if first_results and first_results_archive:
self._report_flaky_tests(first_results.failing_test_results(), first_results_archive)
return True
second_results = self._delegate.test_results()
if self._results_failed_different_tests(first_results, second_results):
# We could report flaky tests here, but we would need to be careful
# to use similar checks to ExpectedFailures._can_trust_results
# to make sure we don't report constant failures as flakes when
# we happen to hit the --exit-after-N-failures limit.
# See https://bugs.webkit.org/show_bug.cgi?id=51272
return False
# Archive (and remove) second results so test_results() after
# build_and_test_without_patch won't use second results instead of the clean-tree results.
second_results_archive = self._delegate.archive_last_test_results(self._patch)
if self._build_and_test_without_patch():
# The error from the previous ._test() run is real, report it.
return self.report_failure(first_results_archive, first_results, first_script_error)
clean_tree_results = self._delegate.test_results()
self._expected_failures.update(clean_tree_results)
# Re-check if the original results are now to be expected to avoid a full re-try.
if self._expected_failures.failures_were_expected(first_results):
return True
# Now that we have updated information about failing tests with a clean checkout, we can
# tell if our original failures were unexpected and fail the patch if necessary.
if self._expected_failures.unexpected_failures_observed(first_results):
self.failure_status_id = first_failure_status_id
return self.report_failure(first_results_archive, first_results, first_script_error)
# We don't know what's going on. The tree is likely very red (beyond our layout-test-results
# failure limit), just keep retrying the patch. until someone fixes the tree.
return False
def results_archive_from_patch_test_run(self, patch):
assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
return self._results_archive_from_patch_test_run
def results_from_patch_test_run(self, patch):
assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
return self._results_from_patch_test_run
def report_failure(self, results_archive=None, results=None, script_error=None):
if not self.validate():
return False
self._results_archive_from_patch_test_run = results_archive
self._results_from_patch_test_run = results
raise script_error or self._script_error
def validate(self):
raise NotImplementedError("subclasses must implement")
def run(self):
raise NotImplementedError("subclasses must implement")
| bsd-3-clause | 375,294,841,708,857,340 | 39.952569 | 135 | 0.651868 | false |
baxter-cs/BaxterEPCSWeek1 | studentInfo.py | 1 | 1635 | def main():
students = [
Student("Larsson", 37),
Student("BonJovi", 55),
]
printHeader()
selection = getUserSelection()
if selection == 0:
printStudentsByAge(students)
elif selection == 1:
pass
elif selection == 2:
pass
else:
print "SELECTION NOT RECOGNIZED"
class Student:
def __init__(self, lastName, age):
self.lastName = lastName
self.age = age
self.firstName = "JOHN"
def assignRandomName(self):
pass
def assignRandomAge(self):
self.age = random.randint(0,100)
def assignRandomWeight(self, isMetric):
pass
def assignRandomHeight(self, isMetric):
pass
inputQuestions = [
"For STUDENTS BY AGE, type 0",
"For STUDENTS BY LAST NAME, type 1",
"For STUDENTS BY FIRST NAME, type 3",
"For SUM of STUDENT AGES type 4",
"For AVERAGE of STUDENT AGES type 5",
]
def getUserSelection():
print (inputQuestions[0])
print (inputQuestions[1])
print (inputQuestions[2])
return input("Type selection and press enter:")
def printHeader():
print("HEADER TEXT HERE")
def printStudentsByAge(students):
print ("----Students By Age-----")
sortStudents = sorted(students, key=lambda student: student.age)
for student in students:
print student.lastName + ", " + student.firstName + ", " + str(student.age)
def printStudentsByLName(students):
print ("----Students By -----")
def printStudentsByFName(students):
print ("----Students By -----")
def printSumAge(students):
print ("Answer:")
def printAvgAge(students):
print ("Answer:")
def ageRange(studentA, studentB):
return math.abs(studentA.age - studentB.age)
main() | mit | -5,100,208,046,927,496,000 | 20.526316 | 79 | 0.670948 | false |
Slezhuk/ansible | lib/ansible/modules/notification/pushover.py | 36 | 3667 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Jim Richardson <[email protected]>
# All rights reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
###
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pushover
version_added: "2.0"
short_description: Send notifications via U(https://pushover.net)
description:
- Send notifications via pushover, to subscriber list of devices, and email
addresses. Requires pushover app on devices.
notes:
- You will require a pushover.net account to use this module. But no account
is required to receive messages.
options:
msg:
description:
- What message you wish to send.
required: true
app_token:
description:
- Pushover issued token identifying your pushover app.
required: true
user_key:
description:
- Pushover issued authentication key for your user.
required: true
pri:
description:
- Message priority (see U(https://pushover.net) for details.)
required: false
author: "Jim Richardson (@weaselkeeper)"
'''
EXAMPLES = '''
- pushover:
msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic'
app_token: wxfdksl
user_key: baa5fe97f2c5ab3ca8f0bb59
delegate_to: localhost
'''
import urllib
class Pushover(object):
''' Instantiates a pushover object, use it to send notifications '''
base_uri = 'https://api.pushover.net'
def __init__(self, module, user, token):
self.module = module
self.user = user
self.token = token
def run(self, priority, msg):
''' Do, whatever it is, we do. '''
url = '%s/1/messages.json' % (self.base_uri)
# parse config
options = dict(user=self.user,
token=self.token,
priority=priority,
message=msg)
data = urllib.urlencode(options)
headers = { "Content-type": "application/x-www-form-urlencoded"}
r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers)
if info['status'] != 200:
raise Exception(info)
return r.read()
def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True),
app_token=dict(required=True, no_log=True),
user_key=dict(required=True, no_log=True),
pri=dict(required=False, default='0', choices=['-2','-1','0','1','2']),
),
)
msg_object = Pushover(module, module.params['user_key'], module.params['app_token'])
try:
response = msg_object.run(module.params['pri'], module.params['msg'])
except:
module.fail_json(msg='Unable to send msg via pushover')
module.exit_json(msg='message sent successfully: %s' % response, changed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 | 1,811,475,203,637,493,800 | 28.813008 | 88 | 0.646578 | false |
jeffzheng1/tensorflow | tensorflow/python/ops/clip_ops.py | 17 | 10072 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for clipping (gradient, weight) tensors to min/max values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
def clip_by_value(t, clip_value_min, clip_value_max,
name=None):
"""Clips tensor values to a specified min and max.
Given a tensor `t`, this operation returns a tensor of the same type and
shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
Any values less than `clip_value_min` are set to `clip_value_min`. Any values
greater than `clip_value_max` are set to `clip_value_max`.
Args:
t: A `Tensor`.
clip_value_min: A 0-D (scalar) `Tensor`. The minimum value to clip by.
clip_value_max: A 0-D (scalar) `Tensor`. The maximum value to clip by.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_value",
[t, clip_value_min, clip_value_max]) as name:
t = ops.convert_to_tensor(t, name="t")
# Go through list of tensors, for each value in each tensor clip
t_min = math_ops.minimum(t, clip_value_max)
t_max = math_ops.maximum(t_min, clip_value_min, name=name)
return t_max
def clip_by_norm(t, clip_norm, axes=None, name=None):
"""Clips tensor values to a maximum L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its L2-norm is less than or equal to `clip_norm`,
along the dimensions given in `axes`. Specifically, in the default case
where all dimensions are used for calculation, if the L2-norm of `t` is
already less than or equal to `clip_norm`, then `t` is not modified. If
the L2-norm is greater than `clip_norm`, then this operation returns a
tensor of the same type and shape as `t` with its values set to:
`t * clip_norm / l2norm(t)`
In this case, the L2-norm of the output tensor is `clip_norm`.
As another example, if `t` is a matrix and `axes == [1]`, then each row
of the output will have L2-norm equal to `clip_norm`. If `axes == [0]`
instead, each column of the output will be clipped.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions
to use for computing the L2-norm. If `None` (the default), uses all
dimensions.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
l2norm_inv = math_ops.rsqrt(
math_ops.reduce_sum(t * t, axes, keep_dims=True))
tclip = array_ops.identity(t * clip_norm * math_ops.minimum(
l2norm_inv, constant_op.constant(1.0, dtype=t.dtype) / clip_norm),
name=name)
return tclip
def global_norm(t_list, name=None):
"""Computes the global norm of multiple tensors.
Given a tuple or list of tensors `t_list`, this operation returns the
global norm of the elements in all tensors in `t_list`. The global norm is
computed as:
`global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`
Any entries in `t_list` that are of type None are ignored.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
name: A name for the operation (optional).
Returns:
A 0-D (scalar) `Tensor` of type `float`.
Raises:
TypeError: If `t_list` is not a sequence.
"""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
with ops.name_scope(name, "global_norm", t_list) as name:
values = [
ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
half_squared_norms = []
for v in values:
if v is not None:
with ops.colocate_with(v):
half_squared_norms.append(nn_ops.l2_loss(v))
half_squared_norm = math_ops.reduce_sum(array_ops.pack(half_squared_norms))
norm = math_ops.sqrt(
half_squared_norm *
constant_op.constant(2.0, dtype=half_squared_norm.dtype),
name="global_norm")
return norm
def clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):
"""Clips values of multiple tensors by the ratio of the sum of their norms.
Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,
this operation returns a list of clipped tensors `list_clipped`
and the global norm (`global_norm`) of all tensors in `t_list`. Optionally,
if you've already computed the global norm for `t_list`, you can specify
the global norm with `use_norm`.
To perform the clipping, the values `t_list[i]` are set to:
t_list[i] * clip_norm / max(global_norm, clip_norm)
where:
global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))
If `clip_norm > global_norm` then the entries in `t_list` remain as they are,
otherwise they're all shrunk by the global ratio.
Any of the entries of `t_list` that are of type `None` are ignored.
This is the correct way to perform gradient clipping (for example, see
[Pascanu et al., 2012](http://arxiv.org/abs/1211.5063)
([pdf](http://arxiv.org/pdf/1211.5063.pdf))).
However, it is slower than `clip_by_norm()` because all the parameters must be
ready before the clipping operation can be performed.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.
use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global
norm to use. If not provided, `global_norm()` is used to compute the norm.
name: A name for the operation (optional).
Returns:
list_clipped: A list of `Tensors` of the same type as `list_t`.
global_norm: A 0-D (scalar) `Tensor` representing the global norm.
Raises:
TypeError: If `t_list` is not a sequence.
"""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
if use_norm is None:
use_norm = global_norm(t_list, name)
with ops.name_scope(name, "clip_by_global_norm",
t_list + [clip_norm]) as name:
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale = clip_norm * math_ops.minimum(
1.0 / use_norm,
constant_op.constant(1.0, dtype=use_norm.dtype) / clip_norm)
values = [
ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
values_clipped = []
for i, v in enumerate(values):
if v is None:
values_clipped.append(None)
else:
with ops.colocate_with(v):
values_clipped.append(
array_ops.identity(v * scale, name="%s_%d" % (name, i)))
list_clipped = [
ops.IndexedSlices(c_v, t.indices, t.dense_shape)
if isinstance(t, ops.IndexedSlices)
else c_v
for (c_v, t) in zip(values_clipped, t_list)]
return list_clipped, use_norm
def clip_by_average_norm(t, clip_norm, name=None):
"""Clips tensor values to a maximum average L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its average L2-norm is less than or equal to
`clip_norm`. Specifically, if the average L2-norm is already less than or
equal to `clip_norm`, then `t` is not modified. If the average L2-norm is
greater than `clip_norm`, then this operation returns a tensor of the same
type and shape as `t` with its values set to:
`t * clip_norm / l2norm_avg(t)`
In this case, the average L2-norm of the output tensor is `clip_norm`.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_average_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm per element, clip elements by ratio of clip_norm to
# L2-norm per element
n_element = math_ops.cast(array_ops.size(t), dtypes.float32)
l2norm_inv = math_ops.rsqrt(
math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))
tclip = array_ops.identity(
t * clip_norm * math_ops.minimum(
l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm),
name=name)
return tclip
| apache-2.0 | -4,841,790,955,476,334,000 | 35.759124 | 80 | 0.662927 | false |
openplans/shareabouts-api | src/sa_api_v2/cors/south_migrations/0004__rename_originpermission_to_origin.py | 1 | 5202 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'OriginPermission'
db.rename_table('cors_originpermission', 'cors_origin')
db.rename_column('cors_originpermission_datasets', 'originpermission_id', 'origin_id')
db.rename_table('cors_originpermission_datasets', 'cors_origin_datasets')
def backwards(self, orm):
# Deleting model 'Origin'
db.rename_table('cors_origin', 'cors_originpermission')
db.rename_column('cors_origin_datasets', 'origin_id', 'originpermission_id')
db.rename_table('cors_origin_datasets', 'cors_originpermission_datasets')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cors.origin': {
'Meta': {'object_name': 'Origin'},
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'origins'", 'blank': 'True', 'to': "orm['sa_api_v2.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_used': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'logged_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'pattern': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sa_api_v2.dataset': {
'Meta': {'unique_together': "(('owner', 'slug'),)", 'object_name': 'DataSet', 'db_table': "'sa_api_dataset'"},
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "u''", 'max_length': '128'})
}
}
complete_apps = ['cors'] | gpl-3.0 | -8,386,165,305,271,780,000 | 65.705128 | 187 | 0.564783 | false |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/cython/src/Cython/Compiler/Naming.py | 93 | 5553 | #
# C naming conventions
#
#
# Prefixes for generating C names.
# Collected here to facilitate ensuring uniqueness.
#
pyrex_prefix = "__pyx_"
codewriter_temp_prefix = pyrex_prefix + "t_"
temp_prefix = u"__cyt_"
builtin_prefix = pyrex_prefix + "builtin_"
arg_prefix = pyrex_prefix + "arg_"
funcdoc_prefix = pyrex_prefix + "doc_"
enum_prefix = pyrex_prefix + "e_"
func_prefix = pyrex_prefix + "f_"
pyfunc_prefix = pyrex_prefix + "pf_"
pywrap_prefix = pyrex_prefix + "pw_"
genbody_prefix = pyrex_prefix + "gb_"
gstab_prefix = pyrex_prefix + "getsets_"
prop_get_prefix = pyrex_prefix + "getprop_"
const_prefix = pyrex_prefix + "k_"
py_const_prefix = pyrex_prefix + "kp_"
label_prefix = pyrex_prefix + "L"
pymethdef_prefix = pyrex_prefix + "mdef_"
methtab_prefix = pyrex_prefix + "methods_"
memtab_prefix = pyrex_prefix + "members_"
objstruct_prefix = pyrex_prefix + "obj_"
typeptr_prefix = pyrex_prefix + "ptype_"
prop_set_prefix = pyrex_prefix + "setprop_"
type_prefix = pyrex_prefix + "t_"
typeobj_prefix = pyrex_prefix + "type_"
var_prefix = pyrex_prefix + "v_"
varptr_prefix = pyrex_prefix + "vp_"
wrapperbase_prefix= pyrex_prefix + "wrapperbase_"
pybuffernd_prefix = pyrex_prefix + "pybuffernd_"
pybufferstruct_prefix = pyrex_prefix + "pybuffer_"
vtable_prefix = pyrex_prefix + "vtable_"
vtabptr_prefix = pyrex_prefix + "vtabptr_"
vtabstruct_prefix = pyrex_prefix + "vtabstruct_"
opt_arg_prefix = pyrex_prefix + "opt_args_"
convert_func_prefix = pyrex_prefix + "convert_"
closure_scope_prefix = pyrex_prefix + "scope_"
closure_class_prefix = pyrex_prefix + "scope_struct_"
lambda_func_prefix = pyrex_prefix + "lambda_"
module_is_main = pyrex_prefix + "module_is_main_"
defaults_struct_prefix = pyrex_prefix + "defaults"
dynamic_args_cname = pyrex_prefix + "dynamic_args"
interned_prefixes = {
'str': pyrex_prefix + "n_",
'int': pyrex_prefix + "int_",
'float': pyrex_prefix + "float_",
'tuple': pyrex_prefix + "tuple_",
'codeobj': pyrex_prefix + "codeobj_",
'slice': pyrex_prefix + "slice_",
'ustring': pyrex_prefix + "ustring_",
}
args_cname = pyrex_prefix + "args"
generator_cname = pyrex_prefix + "generator"
sent_value_cname = pyrex_prefix + "sent_value"
pykwdlist_cname = pyrex_prefix + "pyargnames"
obj_base_cname = pyrex_prefix + "base"
builtins_cname = pyrex_prefix + "b"
preimport_cname = pyrex_prefix + "i"
moddict_cname = pyrex_prefix + "d"
dummy_cname = pyrex_prefix + "dummy"
filename_cname = pyrex_prefix + "filename"
modulename_cname = pyrex_prefix + "modulename"
filetable_cname = pyrex_prefix + "f"
intern_tab_cname = pyrex_prefix + "intern_tab"
kwds_cname = pyrex_prefix + "kwds"
lineno_cname = pyrex_prefix + "lineno"
clineno_cname = pyrex_prefix + "clineno"
cfilenm_cname = pyrex_prefix + "cfilenm"
module_cname = pyrex_prefix + "m"
moddoc_cname = pyrex_prefix + "mdoc"
methtable_cname = pyrex_prefix + "methods"
retval_cname = pyrex_prefix + "r"
reqd_kwds_cname = pyrex_prefix + "reqd_kwds"
self_cname = pyrex_prefix + "self"
stringtab_cname = pyrex_prefix + "string_tab"
vtabslot_cname = pyrex_prefix + "vtab"
c_api_tab_cname = pyrex_prefix + "c_api_tab"
gilstate_cname = pyrex_prefix + "state"
skip_dispatch_cname = pyrex_prefix + "skip_dispatch"
empty_tuple = pyrex_prefix + "empty_tuple"
empty_bytes = pyrex_prefix + "empty_bytes"
print_function = pyrex_prefix + "print"
print_function_kwargs = pyrex_prefix + "print_kwargs"
cleanup_cname = pyrex_prefix + "module_cleanup"
pymoduledef_cname = pyrex_prefix + "moduledef"
optional_args_cname = pyrex_prefix + "optional_args"
import_star = pyrex_prefix + "import_star"
import_star_set = pyrex_prefix + "import_star_set"
outer_scope_cname= pyrex_prefix + "outer_scope"
cur_scope_cname = pyrex_prefix + "cur_scope"
enc_scope_cname = pyrex_prefix + "enc_scope"
frame_cname = pyrex_prefix + "frame"
frame_code_cname = pyrex_prefix + "frame_code"
binding_cfunc = pyrex_prefix + "binding_PyCFunctionType"
fused_func_prefix = pyrex_prefix + 'fuse_'
quick_temp_cname = pyrex_prefix + "temp" # temp variable for quick'n'dirty temping
global_code_object_cache_find = pyrex_prefix + 'find_code_object'
global_code_object_cache_insert = pyrex_prefix + 'insert_code_object'
genexpr_id_ref = 'genexpr'
freelist_name = 'freelist'
freecount_name = 'freecount'
line_c_macro = "__LINE__"
file_c_macro = "__FILE__"
extern_c_macro = pyrex_prefix.upper() + "EXTERN_C"
exc_type_name = pyrex_prefix + "exc_type"
exc_value_name = pyrex_prefix + "exc_value"
exc_tb_name = pyrex_prefix + "exc_tb"
exc_lineno_name = pyrex_prefix + "exc_lineno"
parallel_exc_type = pyrex_prefix + "parallel_exc_type"
parallel_exc_value = pyrex_prefix + "parallel_exc_value"
parallel_exc_tb = pyrex_prefix + "parallel_exc_tb"
parallel_filename = pyrex_prefix + "parallel_filename"
parallel_lineno = pyrex_prefix + "parallel_lineno"
parallel_clineno = pyrex_prefix + "parallel_clineno"
parallel_why = pyrex_prefix + "parallel_why"
exc_vars = (exc_type_name, exc_value_name, exc_tb_name)
api_name = pyrex_prefix + "capi__"
h_guard_prefix = "__PYX_HAVE__"
api_guard_prefix = "__PYX_HAVE_API__"
api_func_guard = "__PYX_HAVE_API_FUNC_"
PYX_NAN = "__PYX_NAN()"
def py_version_hex(major, minor=0, micro=0, release_level=0, release_serial=0):
return (major << 24) | (minor << 16) | (micro << 8) | (release_level << 4) | (release_serial)
| mit | 8,775,358,642,645,349,000 | 36.77551 | 97 | 0.669008 | false |
buckett/sakai-gitflow | reference/library/src/webapp/editor/FCKeditor/editor/filemanager/connectors/py/fckoutput.py | 131 | 4042 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
if number != 1:
return """<Error number="%s" />""" % (number)
else:
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| apache-2.0 | 2,306,227,955,323,575,000 | 31.966387 | 208 | 0.644483 | false |
aerickson/ansible | lib/ansible/modules/cloud/openstack/os_network.py | 27 | 8686 | #!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_network
short_description: Creates/removes networks from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or remove network from OpenStack.
options:
name:
description:
- Name to be assigned to the network.
required: true
shared:
description:
- Whether this network is shared or not.
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down.
required: false
default: true
external:
description:
- Whether this network is externally accessible.
required: false
default: false
state:
description:
- Indicate desired state of the resource.
choices: ['present', 'absent']
required: false
default: present
provider_physical_network:
description:
- The physical network where this network object is implemented.
required: false
default: None
version_added: "2.1"
provider_network_type:
description:
- The type of physical network that maps to this network resource.
required: false
default: None
version_added: "2.1"
provider_segmentation_id:
description:
- An isolated segment on the physical network. The I(network_type)
attribute defines the segmentation model. For example, if the
I(network_type) value is vlan, this ID is a vlan identifier. If
the I(network_type) value is gre, this ID is a gre key.
required: false
default: None
version_added: "2.1"
project:
description:
- Project name or ID containing the network (name admin-only)
required: false
default: None
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Create an externally accessible network named 'ext_network'.
- os_network:
cloud: mycloud
state: present
name: ext_network
external: true
'''
RETURN = '''
network:
description: Dictionary describing the network.
returned: On success when I(state) is 'present'.
type: dictionary
contains:
id:
description: Network ID.
type: string
sample: "4bb4f9a5-3bd2-4562-bf6a-d17a6341bb56"
name:
description: Network name.
type: string
sample: "ext_network"
shared:
description: Indicates whether this network is shared across all tenants.
type: bool
sample: false
status:
description: Network status.
type: string
sample: "ACTIVE"
mtu:
description: The MTU of a network resource.
type: integer
sample: 0
admin_state_up:
description: The administrative state of the network.
type: bool
sample: true
port_security_enabled:
description: The port security status
type: bool
sample: true
router:external:
description: Indicates whether this network is externally accessible.
type: bool
sample: true
tenant_id:
description: The tenant ID.
type: string
sample: "06820f94b9f54b119636be2728d216fc"
subnets:
description: The associated subnets.
type: list
sample: []
"provider:physical_network":
description: The physical network where this network object is implemented.
type: string
sample: my_vlan_net
"provider:network_type":
description: The type of physical network that maps to this network resource.
type: string
sample: vlan
"provider:segmentation_id":
description: An isolated segment on the physical network.
type: string
sample: 101
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
shared=dict(default=False, type='bool'),
admin_state_up=dict(default=True, type='bool'),
external=dict(default=False, type='bool'),
provider_physical_network=dict(required=False),
provider_network_type=dict(required=False),
provider_segmentation_id=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['project'] and
StrictVersion(shade.__version__) < StrictVersion('1.6.0')):
module.fail_json(msg="To utilize project, the installed version of"
"the shade library MUST be >=1.6.0")
state = module.params['state']
name = module.params['name']
shared = module.params['shared']
admin_state_up = module.params['admin_state_up']
external = module.params['external']
provider_physical_network = module.params['provider_physical_network']
provider_network_type = module.params['provider_network_type']
provider_segmentation_id = module.params['provider_segmentation_id']
project = module.params.pop('project')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
net = cloud.get_network(name, filters=filters)
if state == 'present':
if not net:
provider = {}
if provider_physical_network:
provider['physical_network'] = provider_physical_network
if provider_network_type:
provider['network_type'] = provider_network_type
if provider_segmentation_id:
provider['segmentation_id'] = provider_segmentation_id
if provider and StrictVersion(shade.__version__) < StrictVersion('1.5.0'):
module.fail_json(msg="Shade >= 1.5.0 required to use provider options")
if project_id is not None:
net = cloud.create_network(name, shared, admin_state_up,
external, provider, project_id)
else:
net = cloud.create_network(name, shared, admin_state_up,
external, provider)
changed = True
else:
changed = False
module.exit_json(changed=changed, network=net, id=net['id'])
elif state == 'absent':
if not net:
module.exit_json(changed=False)
else:
cloud.delete_network(name)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
| gpl-3.0 | -1,259,732,425,694,379,800 | 32.797665 | 91 | 0.608796 | false |
adishjain/youtube-dl | youtube_dl/extractor/parliamentliveuk.py | 179 | 1781 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class ParliamentLiveUKIE(InfoExtractor):
IE_NAME = 'parliamentlive.tv'
IE_DESC = 'UK parliament videos'
_VALID_URL = r'https?://www\.parliamentlive\.tv/Main/Player\.aspx\?(?:[^&]+&)*?meetingId=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.parliamentlive.tv/Main/Player.aspx?meetingId=15121&player=windowsmedia',
'info_dict': {
'id': '15121',
'ext': 'asf',
'title': 'hoc home affairs committee, 18 mar 2014.pm',
'description': 'md5:033b3acdf83304cd43946b2d5e5798d1',
},
'params': {
'skip_download': True, # Requires mplayer (mms)
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
asx_url = self._html_search_regex(
r'embed.*?src="([^"]+)" name="MediaPlayer"', webpage,
'metadata URL')
asx = self._download_xml(asx_url, video_id, 'Downloading ASX metadata')
video_url = asx.find('.//REF').attrib['HREF']
title = self._search_regex(
r'''(?x)player\.setClipDetails\(
(?:(?:[0-9]+|"[^"]+"),\s*){2}
"([^"]+",\s*"[^"]+)"
''',
webpage, 'title').replace('", "', ', ')
description = self._html_search_regex(
r'(?s)<span id="MainContentPlaceHolder_CaptionsBlock_WitnessInfo">(.*?)</span>',
webpage, 'description')
return {
'id': video_id,
'ext': 'asf',
'url': video_url,
'title': title,
'description': description,
}
| unlicense | -6,493,123,807,118,066,000 | 32.603774 | 108 | 0.515441 | false |
eckucukoglu/arm-linux-gnueabihf | arm-linux-gnueabihf/libc/usr/lib/python2.7/lib2to3/fixes/fix_operator.py | 326 | 3472 | """Fixer for operator functions.
operator.isCallable(obj) -> hasattr(obj, '__call__')
operator.sequenceIncludes(obj) -> operator.contains(obj)
operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence)
operator.isMappingType(obj) -> isinstance(obj, collections.Mapping)
operator.isNumberType(obj) -> isinstance(obj, numbers.Number)
operator.repeat(obj, n) -> operator.mul(obj, n)
operator.irepeat(obj, n) -> operator.imul(obj, n)
"""
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import Call, Name, String, touch_import
def invocation(s):
def dec(f):
f.invocation = s
return f
return dec
class FixOperator(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
methods = """
method=('isCallable'|'sequenceIncludes'
|'isSequenceType'|'isMappingType'|'isNumberType'
|'repeat'|'irepeat')
"""
obj = "'(' obj=any ')'"
PATTERN = """
power< module='operator'
trailer< '.' %(methods)s > trailer< %(obj)s > >
|
power< %(methods)s trailer< %(obj)s > >
""" % dict(methods=methods, obj=obj)
def transform(self, node, results):
method = self._check_method(node, results)
if method is not None:
return method(node, results)
@invocation("operator.contains(%s)")
def _sequenceIncludes(self, node, results):
return self._handle_rename(node, results, u"contains")
@invocation("hasattr(%s, '__call__')")
def _isCallable(self, node, results):
obj = results["obj"]
args = [obj.clone(), String(u", "), String(u"'__call__'")]
return Call(Name(u"hasattr"), args, prefix=node.prefix)
@invocation("operator.mul(%s)")
def _repeat(self, node, results):
return self._handle_rename(node, results, u"mul")
@invocation("operator.imul(%s)")
def _irepeat(self, node, results):
return self._handle_rename(node, results, u"imul")
@invocation("isinstance(%s, collections.Sequence)")
def _isSequenceType(self, node, results):
return self._handle_type2abc(node, results, u"collections", u"Sequence")
@invocation("isinstance(%s, collections.Mapping)")
def _isMappingType(self, node, results):
return self._handle_type2abc(node, results, u"collections", u"Mapping")
@invocation("isinstance(%s, numbers.Number)")
def _isNumberType(self, node, results):
return self._handle_type2abc(node, results, u"numbers", u"Number")
def _handle_rename(self, node, results, name):
method = results["method"][0]
method.value = name
method.changed()
def _handle_type2abc(self, node, results, module, abc):
touch_import(None, module, node)
obj = results["obj"]
args = [obj.clone(), String(u", " + u".".join([module, abc]))]
return Call(Name(u"isinstance"), args, prefix=node.prefix)
def _check_method(self, node, results):
method = getattr(self, "_" + results["method"][0].value.encode("ascii"))
if callable(method):
if "module" in results:
return method
else:
sub = (unicode(results["obj"]),)
invocation_str = unicode(method.invocation) % sub
self.warning(node, u"You should use '%s' here." % invocation_str)
return None
| gpl-2.0 | -3,357,940,651,782,782,500 | 35.166667 | 81 | 0.599366 | false |
openstack/swift | test/unit/common/ring/test_builder.py | 1 | 197289 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import mock
import operator
import os
import unittest
import six.moves.cPickle as pickle
from array import array
from collections import Counter, defaultdict
from math import ceil
from tempfile import mkdtemp
from shutil import rmtree
import sys
import random
import uuid
import itertools
from six.moves import range
from swift.common import exceptions
from swift.common import ring
from swift.common.ring import utils
from swift.common.ring.builder import MAX_BALANCE
def _partition_counts(builder, key='id'):
"""
Returns a dictionary mapping the given device key to (number of
partitions assigned to that key).
"""
return Counter(builder.devs[dev_id][key]
for part2dev_id in builder._replica2part2dev
for dev_id in part2dev_id)
class TestRingBuilder(unittest.TestCase):
def setUp(self):
self.testdir = mkdtemp()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def _get_population_by_region(self, builder):
"""
Returns a dictionary mapping region to number of partitions in that
region.
"""
return _partition_counts(builder, key='region')
def test_init(self):
rb = ring.RingBuilder(8, 3, 1)
self.assertEqual(rb.part_power, 8)
self.assertEqual(rb.replicas, 3)
self.assertEqual(rb.min_part_hours, 1)
self.assertEqual(rb.parts, 2 ** 8)
self.assertEqual(rb.devs, [])
self.assertFalse(rb.devs_changed)
self.assertEqual(rb.version, 0)
self.assertIsNotNone(rb._last_part_moves)
rd = rb.get_ring()
self.assertEqual(rd.devs, [])
self.assertEqual(rd.version, 0)
self.assertIsNone(rd.next_part_power)
self.assertEqual(rd.replica_count, 0)
def test_overlarge_part_powers(self):
expected_msg = 'part_power must be at most 32 (was 33)'
with self.assertRaises(ValueError) as ctx:
ring.RingBuilder(33, 3, 1)
self.assertEqual(str(ctx.exception), expected_msg)
def test_insufficient_replicas(self):
expected_msg = 'replicas must be at least 1 (was 0.999000)'
with self.assertRaises(ValueError) as ctx:
ring.RingBuilder(8, 0.999, 1)
self.assertEqual(str(ctx.exception), expected_msg)
def test_negative_min_part_hours(self):
expected_msg = 'min_part_hours must be non-negative (was -1)'
with self.assertRaises(ValueError) as ctx:
ring.RingBuilder(8, 3, -1)
self.assertEqual(str(ctx.exception), expected_msg)
def test_deepcopy(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdb1'})
# more devices in zone #1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sdc1'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sdd1'})
rb.rebalance()
rb_copy = copy.deepcopy(rb)
self.assertEqual(rb.to_dict(), rb_copy.to_dict())
self.assertIsNot(rb.devs, rb_copy.devs)
self.assertIsNot(rb._replica2part2dev, rb_copy._replica2part2dev)
self.assertIsNot(rb._last_part_moves, rb_copy._last_part_moves)
self.assertIsNot(rb._remove_devs, rb_copy._remove_devs)
self.assertIsNot(rb._dispersion_graph, rb_copy._dispersion_graph)
def test_get_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.remove_dev(1)
rb.rebalance()
r = rb.get_ring()
self.assertIsInstance(r, ring.RingData)
r2 = rb.get_ring()
self.assertIs(r, r2)
rb.rebalance()
r3 = rb.get_ring()
self.assertIsNot(r3, r2)
r4 = rb.get_ring()
self.assertIs(r3, r4)
def test_rebalance_with_seed(self):
devs = [(0, 10000), (1, 10001), (2, 10002), (1, 10003)]
ring_builders = []
for n in range(3):
rb = ring.RingBuilder(8, 3, 1)
idx = 0
for zone, port in devs:
for d in ('sda1', 'sdb1'):
rb.add_dev({'id': idx, 'region': 0, 'zone': zone,
'ip': '127.0.0.1', 'port': port,
'device': d, 'weight': 1})
idx += 1
ring_builders.append(rb)
rb0 = ring_builders[0]
rb1 = ring_builders[1]
rb2 = ring_builders[2]
r0 = rb0.get_ring()
self.assertIs(rb0.get_ring(), r0)
rb0.rebalance() # NO SEED
rb1.rebalance(seed=10)
rb2.rebalance(seed=10)
r1 = rb1.get_ring()
r2 = rb2.get_ring()
self.assertIsNot(rb0.get_ring(), r0)
self.assertNotEqual(r0.to_dict(), r1.to_dict())
self.assertEqual(r1.to_dict(), r2.to_dict())
# check that random state is reset
pre_state = random.getstate()
rb2.rebalance(seed=10)
self.assertEqual(pre_state, random.getstate(),
"Random state was not reset")
pre_state = random.getstate()
with mock.patch.object(rb2, "_build_replica_plan",
side_effect=Exception()):
self.assertRaises(Exception, rb2.rebalance, seed=10)
self.assertEqual(pre_state, random.getstate(),
"Random state was not reset")
def test_rebalance_part_on_deleted_other_part_on_drained(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'})
rb.rebalance(seed=1)
# We want a partition where 1 replica is on a removed device, 1
# replica is on a 0-weight device, and 1 on a normal device. To
# guarantee we have one, we see where partition 123 is, then
# manipulate its devices accordingly.
zero_weight_dev_id = rb._replica2part2dev[1][123]
delete_dev_id = rb._replica2part2dev[2][123]
rb.set_dev_weight(zero_weight_dev_id, 0.0)
rb.remove_dev(delete_dev_id)
rb.rebalance()
def test_set_replicas(self):
rb = ring.RingBuilder(8, 3.2, 1)
rb.devs_changed = False
rb.set_replicas(3.25)
self.assertTrue(rb.devs_changed)
rb.devs_changed = False
rb.set_replicas(3.2500001)
self.assertFalse(rb.devs_changed)
def test_add_dev(self):
rb = ring.RingBuilder(8, 3, 1)
dev = {'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}
dev_id = rb.add_dev(dev)
self.assertRaises(exceptions.DuplicateDeviceError, rb.add_dev, dev)
self.assertEqual(dev_id, 0)
rb = ring.RingBuilder(8, 3, 1)
# test add new dev with no id
dev_id = rb.add_dev({'zone': 0, 'region': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 6200,
'device': 'sda2'})
self.assertEqual(rb.devs[0]['id'], 0)
self.assertEqual(dev_id, 0)
# test add another dev with no id
dev_id = rb.add_dev({'zone': 3, 'region': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 6200,
'device': 'sda3'})
self.assertEqual(rb.devs[1]['id'], 1)
self.assertEqual(dev_id, 1)
# some keys are required
self.assertRaises(ValueError, rb.add_dev, {})
stub_dev = {'weight': 1, 'ip': '127.0.0.1', 'port': 7000}
for key in (stub_dev.keys()):
dev = stub_dev.copy()
dev.pop(key)
self.assertRaises(ValueError, rb.add_dev, dev)
def test_set_dev_weight(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 128, 1: 128, 2: 256, 3: 256})
rb.set_dev_weight(0, 0.75)
rb.set_dev_weight(1, 0.25)
rb.pretend_min_part_hours_passed()
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 192, 1: 64, 2: 256, 3: 256})
def test_remove_dev(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 192, 1: 192, 2: 192, 3: 192})
rb.remove_dev(1)
rb.pretend_min_part_hours_passed()
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 256, 2: 256, 3: 256})
def test_round_off_error(self):
# 3 nodes with 11 disks each is particularly problematic. Probably has
# to do with the binary repr. of 1/33? Those ones look suspicious...
#
# >>> bin(int(struct.pack('!f', 1.0/(33)).encode('hex'), 16))
# '0b111100111110000011111000010000'
rb = ring.RingBuilder(8, 3, 1)
for dev_id, (region, zone) in enumerate(
11 * [(0, 0), (1, 10), (1, 11)]):
rb.add_dev({'id': dev_id, 'region': region, 'zone': zone,
'weight': 1, 'ip': '127.0.0.1',
'port': 10000 + region * 100 + zone,
'device': 'sda%d' % dev_id})
rb.rebalance()
self.assertEqual(_partition_counts(rb, 'zone'),
{0: 256, 10: 256, 11: 256})
wanted_by_zone = defaultdict(lambda: defaultdict(int))
for dev in rb._iter_devs():
wanted_by_zone[dev['zone']][dev['parts_wanted']] += 1
# We're nicely balanced, but parts_wanted is slightly lumpy
# because reasons.
self.assertEqual(wanted_by_zone, {
0: {0: 10, 1: 1},
10: {0: 11},
11: {0: 10, -1: 1}})
def test_remove_a_lot(self):
rb = ring.RingBuilder(3, 3, 1)
rb.add_dev({'id': 0, 'device': 'd0', 'ip': '10.0.0.1',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1})
rb.add_dev({'id': 1, 'device': 'd1', 'ip': '10.0.0.2',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 2})
rb.add_dev({'id': 2, 'device': 'd2', 'ip': '10.0.0.3',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3})
rb.add_dev({'id': 3, 'device': 'd3', 'ip': '10.0.0.1',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1})
rb.add_dev({'id': 4, 'device': 'd4', 'ip': '10.0.0.2',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 2})
rb.add_dev({'id': 5, 'device': 'd5', 'ip': '10.0.0.3',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3})
rb.rebalance()
rb.validate()
# this has to put more than 1/3 of the partitions in the
# cluster on removed devices in order to ensure that at least
# one partition has multiple replicas that need to move.
#
# (for an N-replica ring, it's more than 1/N of the
# partitions, of course)
rb.remove_dev(3)
rb.remove_dev(4)
rb.remove_dev(5)
rb.rebalance()
rb.validate()
def test_remove_zero_weighted(self):
rb = ring.RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'device': 'd0', 'ip': '10.0.0.1',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1})
rb.add_dev({'id': 1, 'device': 'd1', 'ip': '10.0.0.2',
'port': 6202, 'weight': 0.0, 'region': 0, 'zone': 2})
rb.add_dev({'id': 2, 'device': 'd2', 'ip': '10.0.0.3',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3})
rb.add_dev({'id': 3, 'device': 'd3', 'ip': '10.0.0.1',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1})
rb.rebalance()
rb.remove_dev(1)
parts, balance, removed = rb.rebalance()
self.assertEqual(removed, 1)
def test_shuffled_gather(self):
if self._shuffled_gather_helper() and \
self._shuffled_gather_helper():
raise AssertionError('It is highly likely the ring is no '
'longer shuffling the set of partitions '
'to reassign on a rebalance.')
def _shuffled_gather_helper(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.rebalance()
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
replica_plan = rb._build_replica_plan()
rb._set_parts_wanted(replica_plan)
for dev in rb._iter_devs():
dev['tiers'] = utils.tiers_for_dev(dev)
assign_parts = defaultdict(list)
rb._gather_parts_for_balance(assign_parts, replica_plan, False)
max_run = 0
run = 0
last_part = 0
for part, _ in assign_parts.items():
if part > last_part:
run += 1
else:
if run > max_run:
max_run = run
run = 0
last_part = part
if run > max_run:
max_run = run
return max_run > len(assign_parts) / 2
def test_initial_balance(self):
# 2 boxes, 2 drives each in zone 1
# 1 box, 2 drives in zone 2
#
# This is balanceable, but there used to be some nondeterminism in
# rebalance() that would sometimes give you an imbalanced ring.
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'region': 1, 'zone': 2, 'weight': 4000.0,
'ip': '10.1.1.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'region': 1, 'zone': 2, 'weight': 4000.0,
'ip': '10.1.1.3', 'port': 10000, 'device': 'sdb'})
_, balance, _ = rb.rebalance(seed=2)
# maybe not *perfect*, but should be close
self.assertLessEqual(balance, 1)
def test_multitier_partial(self):
# Multitier test, nothing full
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 2, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 3, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['region'][dev['region']] += 1
counts['zone'][dev['zone']] += 1
if any(c > 1 for c in counts['region'].values()):
raise AssertionError(
"Partition %d not evenly region-distributed (got %r)" %
(part, counts['region']))
if any(c > 1 for c in counts['zone'].values()):
raise AssertionError(
"Partition %d not evenly zone-distributed (got %r)" %
(part, counts['zone']))
# Multitier test, zones full, nodes not full
rb = ring.RingBuilder(8, 6, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdi'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
if counts['zone'] != {0: 2, 1: 2, 2: 2}:
raise AssertionError(
"Partition %d not evenly distributed (got %r)" %
(part, counts['zone']))
for dev_id, replica_count in counts['dev_id'].items():
if replica_count > 1:
raise AssertionError(
"Partition %d is on device %d more than once (%r)" %
(part, dev_id, counts['dev_id']))
def test_multitier_full(self):
# Multitier test, #replicas == #devs
rb = ring.RingBuilder(8, 6, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
if counts['zone'] != {0: 2, 1: 2, 2: 2}:
raise AssertionError(
"Partition %d not evenly distributed (got %r)" %
(part, counts['zone']))
for dev_id, replica_count in counts['dev_id'].items():
if replica_count != 1:
raise AssertionError(
"Partition %d is on device %d %d times, not 1 (%r)" %
(part, dev_id, replica_count, counts['dev_id']))
def test_multitier_overfull(self):
# Multitier test, #replicas > #zones (to prove even distribution)
rb = ring.RingBuilder(8, 8, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
self.assertEqual(8, sum(counts['zone'].values()))
for zone, replica_count in counts['zone'].items():
if replica_count not in (2, 3):
raise AssertionError(
"Partition %d not evenly distributed (got %r)" %
(part, counts['zone']))
for dev_id, replica_count in counts['dev_id'].items():
if replica_count not in (1, 2):
raise AssertionError(
"Partition %d is on device %d %d times, "
"not 1 or 2 (%r)" %
(part, dev_id, replica_count, counts['dev_id']))
def test_multitier_expansion_more_devices(self):
rb = ring.RingBuilder(8, 6, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.rebalance()
rb.validate()
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
for _ in range(5):
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = dict(zone=defaultdict(int),
dev_id=defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
self.assertEqual({0: 2, 1: 2, 2: 2}, dict(counts['zone']))
# each part is assigned once to six unique devices
self.assertEqual(list(counts['dev_id'].values()), [1] * 6)
self.assertEqual(len(set(counts['dev_id'].keys())), 6)
def test_multitier_part_moves_with_0_min_part_hours(self):
rb = ring.RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.rebalance()
rb.validate()
# min_part_hours is 0, so we're clear to move 2 replicas to
# new devs
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
devs = set()
for replica in range(rb.replicas):
devs.add(rb._replica2part2dev[replica][part])
if len(devs) != 3:
raise AssertionError(
"Partition %d not on 3 devs (got %r)" % (part, devs))
def test_multitier_part_moves_with_positive_min_part_hours(self):
rb = ring.RingBuilder(8, 3, 99)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.rebalance()
rb.validate()
# min_part_hours is >0, so we'll only be able to move 1
# replica to a new home
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate()
for part in range(rb.parts):
devs = set()
for replica in range(rb.replicas):
devs.add(rb._replica2part2dev[replica][part])
if not any(rb.devs[dev_id]['zone'] == 1 for dev_id in devs):
raise AssertionError(
"Partition %d did not move (got %r)" % (part, devs))
def test_multitier_dont_move_too_many_replicas(self):
rb = ring.RingBuilder(8, 3, 1)
# there'll be at least one replica in z0 and z1
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.rebalance()
rb.validate()
# only 1 replica should move
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 4, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate()
for part in range(rb.parts):
zones = set()
for replica in range(rb.replicas):
zones.add(rb.devs[rb._replica2part2dev[replica][part]]['zone'])
if len(zones) != 3:
raise AssertionError(
"Partition %d not in 3 zones (got %r)" % (part, zones))
if 0 not in zones or 1 not in zones:
raise AssertionError(
"Partition %d not in zones 0 and 1 (got %r)" %
(part, zones))
def test_min_part_hours_zero_will_move_one_replica(self):
rb = ring.RingBuilder(8, 3, 0)
# there'll be at least one replica in z0 and z1
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.rebalance(seed=1)
rb.validate()
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 4, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf1'})
rb.rebalance(seed=3)
rb.validate()
self.assertEqual(0, rb.dispersion)
# Only one replica could move, so some zones are quite unbalanced
self.assertAlmostEqual(rb.get_balance(), 66.66, delta=0.5)
# There was only zone 0 and 1 before adding more devices. Only one
# replica should have been moved, therefore we expect 256 parts in zone
# 0 and 1, and a total of 256 in zone 2,3, and 4
expected = defaultdict(int, {0: 256, 1: 256, 2: 86, 3: 85, 4: 85})
self.assertEqual(expected, _partition_counts(rb, key='zone'))
zone_histogram = defaultdict(int)
for part in range(rb.parts):
zones = [
rb.devs[rb._replica2part2dev[replica][part]]['zone']
for replica in range(rb.replicas)]
zone_histogram[tuple(sorted(zones))] += 1
# We expect that every partition moved exactly one replica
expected = {
(0, 1, 2): 86,
(0, 1, 3): 85,
(0, 1, 4): 85,
}
self.assertEqual(zone_histogram, expected)
# After rebalancing one more times, we expect that everything is in a
# good state
rb.rebalance(seed=3)
self.assertEqual(0, rb.dispersion)
# a balance of w/i a 1% isn't too bad for 3 replicas on 7
# devices when part power is only 8
self.assertAlmostEqual(rb.get_balance(), 0, delta=0.5)
# every zone has either 153 or 154 parts
for zone, count in _partition_counts(
rb, key='zone').items():
self.assertAlmostEqual(153.5, count, delta=1)
parts_with_moved_count = defaultdict(int)
for part in range(rb.parts):
zones = set()
for replica in range(rb.replicas):
zones.add(rb.devs[rb._replica2part2dev[replica][part]]['zone'])
moved_replicas = len(zones - {0, 1})
parts_with_moved_count[moved_replicas] += 1
# as usual, the real numbers depend on the seed, but we want to
# validate a few things here:
#
# 1) every part had to move one replica to hit dispersion (so no
# one can have a moved count 0)
#
# 2) it's quite reasonable that some small percent of parts will
# have a replica in {0, 1, X} (meaning only one replica of the
# part moved)
#
# 3) when min_part_hours is 0, more than one replica of a part
# can move in a rebalance, and since that movement would get to
# better dispersion faster we expect to observe most parts in
# {[0,1], X, X} (meaning *two* replicas of the part moved)
#
# 4) there's plenty of weight in z0 & z1 to hold a whole
# replicanth, so there is no reason for any part to have to move
# all three replicas out of those zones (meaning no one can have
# a moved count 3)
#
expected = {
1: 52,
2: 204,
}
self.assertEqual(parts_with_moved_count, expected)
def test_ever_rebalanced(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
self.assertFalse(rb.ever_rebalanced)
builder_file = os.path.join(self.testdir, 'test.buider')
rb.save(builder_file)
rb = ring.RingBuilder.load(builder_file)
self.assertFalse(rb.ever_rebalanced)
rb.rebalance()
self.assertTrue(rb.ever_rebalanced)
rb.save(builder_file)
rb = ring.RingBuilder.load(builder_file)
self.assertTrue(rb.ever_rebalanced)
def test_rerebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
self.assertFalse(rb.ever_rebalanced)
rb.rebalance()
self.assertTrue(rb.ever_rebalanced)
counts = _partition_counts(rb)
self.assertEqual(counts, {0: 256, 1: 256, 2: 256})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
self.assertTrue(rb.ever_rebalanced)
counts = _partition_counts(rb)
self.assertEqual(counts, {0: 192, 1: 192, 2: 192, 3: 192})
rb.set_dev_weight(3, 100)
rb.rebalance()
counts = _partition_counts(rb)
self.assertEqual(counts[3], 256)
def test_add_rebalance_add_rebalance_delete_rebalance(self):
# Test for https://bugs.launchpad.net/swift/+bug/845952
# min_part of 0 to allow for rapid rebalancing
rb = ring.RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.remove_dev(1)
# well now we have only one device in z0
rb.set_overload(0.5)
rb.rebalance()
rb.validate()
def test_remove_last_partition_from_zero_weight(self):
rb = ring.RingBuilder(4, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 1, 'weight': 1.0,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 2, 'weight': 1.0,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 3, 'weight': 1.0,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 3, 'weight': 1.0,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 1.0,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 0.4,
'ip': '127.0.0.3', 'port': 10001, 'device': 'zero'})
zero_weight_dev = 3
rb.rebalance(seed=1)
# We want at least one partition with replicas only in zone 2 and 3
# due to device weights. It would *like* to spread out into zone 1,
# but can't, due to device weight.
#
# Also, we want such a partition to have a replica on device 3,
# which we will then reduce to zero weight. This should cause the
# removal of the replica from device 3.
#
# Getting this to happen by chance is hard, so let's just set up a
# builder so that it's in the state we want. This is a synthetic
# example; while the bug has happened on a real cluster, that
# builder file had a part_power of 16, so its contents are much too
# big to include here.
rb._replica2part2dev = [
# these are the relevant ones
# | | |
# v v v
array('H', [2, 5, 6, 2, 5, 6, 2, 5, 6, 2, 5, 6, 2, 5, 6, 2]),
array('H', [1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4]),
array('H', [0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 5, 6, 2, 5, 6])]
# fix up bookkeeping
new_dev_parts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
new_dev_parts[dev_id] += 1
for dev in rb._iter_devs():
dev['parts'] = new_dev_parts[dev['id']]
rb.set_dev_weight(zero_weight_dev, 0.0)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=1)
node_counts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
node_counts[dev_id] += 1
self.assertEqual(node_counts[zero_weight_dev], 0)
# it's as balanced as it gets, so nothing moves anymore
rb.pretend_min_part_hours_passed()
parts_moved, _balance, _removed = rb.rebalance(seed=1)
new_node_counts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
new_node_counts[dev_id] += 1
del node_counts[zero_weight_dev]
self.assertEqual(node_counts, new_node_counts)
self.assertEqual(parts_moved, 0)
def test_part_swapping_problem(self):
rb = ring.RingBuilder(4, 3, 1)
# 127.0.0.1 (2 devs)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
# 127.0.0.2 (3 devs)
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdc'})
expected = {
'127.0.0.1': 1.2,
'127.0.0.2': 1.7999999999999998,
}
for wr in (rb._build_weighted_replicas_by_tier(),
rb._build_wanted_replicas_by_tier(),
rb._build_target_replicas_by_tier()):
self.assertEqual(expected, {t[-1]: r for (t, r) in
wr.items() if len(t) == 3})
self.assertEqual(rb.get_required_overload(), 0)
rb.rebalance(seed=3)
# so 127.0.0.1 ended up with...
tier = (0, 0, '127.0.0.1')
# ... 6 parts with 1 replicas
self.assertEqual(rb._dispersion_graph[tier][1], 12)
# ... 4 parts with 2 replicas
self.assertEqual(rb._dispersion_graph[tier][2], 4)
# but since we only have two tiers, this is *totally* dispersed
self.assertEqual(0, rb.dispersion)
# small rings are hard to balance...
expected = {0: 10, 1: 10, 2: 10, 3: 9, 4: 9}
self.assertEqual(expected, {d['id']: d['parts']
for d in rb._iter_devs()})
# everyone wants 9.6 parts
expected = {
0: 4.166666666666671,
1: 4.166666666666671,
2: 4.166666666666671,
3: -6.25,
4: -6.25,
}
self.assertEqual(expected, rb._build_balance_per_dev())
# original sorted _replica2part2dev
"""
rb._replica2part2dev = [
array('H', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]),
array('H', [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3]),
array('H', [2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4])]
"""
# now imagine if we came along this _replica2part2dev through no
# fault of our own; if instead of the 12 parts with only one
# replica on 127.0.0.1 being split evenly (6 and 6) on device's
# 0 and 1 - device 1 inexplicitly had 3 extra parts
rb._replica2part2dev = [
# these are the relevant one's here
# | | |
# v v v
array('H', [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
array('H', [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3]),
array('H', [2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4])]
# fix up bookkeeping
new_dev_parts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
new_dev_parts[dev_id] += 1
for dev in rb._iter_devs():
dev['parts'] = new_dev_parts[dev['id']]
# reset the _last_part_gather_start otherwise
# there is a chance it'll unluckly wrap and try and
# move one of the device 1's from replica 2
# causing the intermitant failure in bug 1724356
rb._last_part_gather_start = 0
rb.pretend_min_part_hours_passed()
rb.rebalance()
expected = {
0: 4.166666666666671,
1: 4.166666666666671,
2: 4.166666666666671,
3: -6.25,
4: -6.25,
}
self.assertEqual(expected, rb._build_balance_per_dev())
self.assertEqual(rb.get_balance(), 6.25)
def test_wrong_tier_with_no_where_to_go(self):
rb = ring.RingBuilder(4, 3, 1)
# 127.0.0.1 (even devices)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 900,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 900,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 900,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
# 127.0.0.2 (odd devices)
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdd'})
expected = {
'127.0.0.1': 1.75,
'127.0.0.2': 1.25,
}
for wr in (rb._build_weighted_replicas_by_tier(),
rb._build_wanted_replicas_by_tier(),
rb._build_target_replicas_by_tier()):
self.assertEqual(expected, {t[-1]: r for (t, r) in
wr.items() if len(t) == 3})
self.assertEqual(rb.get_required_overload(), 0)
rb.rebalance(seed=3)
# so 127.0.0.1 ended up with...
tier = (0, 0, '127.0.0.1')
# ... 4 parts with 1 replicas
self.assertEqual(rb._dispersion_graph[tier][1], 4)
# ... 12 parts with 2 replicas
self.assertEqual(rb._dispersion_graph[tier][2], 12)
# ... and of course 0 parts with 3 replicas
self.assertEqual(rb._dispersion_graph[tier][3], 0)
# but since we only have two tiers, this is *totally* dispersed
self.assertEqual(0, rb.dispersion)
# small rings are hard to balance, but it's possible when
# part-replicas (3 * 2 ** 4) can go evenly into device weights
# (4800) like we've done here
expected = {
0: 1,
2: 9,
4: 9,
6: 9,
1: 5,
3: 5,
5: 5,
7: 5,
}
self.assertEqual(expected, {d['id']: d['parts']
for d in rb._iter_devs()})
expected = {
0: 0.0,
1: 0.0,
2: 0.0,
3: 0.0,
4: 0.0,
5: 0.0,
6: 0.0,
7: 0.0,
}
self.assertEqual(expected, rb._build_balance_per_dev())
# all devices have exactly the # of parts they want
expected = {
0: 0,
2: 0,
4: 0,
6: 0,
1: 0,
3: 0,
5: 0,
7: 0,
}
self.assertEqual(expected, {d['id']: d['parts_wanted']
for d in rb._iter_devs()})
# original sorted _replica2part2dev
"""
rb._replica2part2dev = [
array('H', [0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, ]),
array('H', [4, 4, 4, 6, 6, 6, 6, 6, 6, 6, 6, 6, 1, 1, 1, 1, ]),
array('H', [1, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7, ])]
"""
# now imagine if we came along this _replica2part2dev through no
# fault of our own; and device 0 had extra parts, but both
# copies of the other replicas were already in the other tier!
rb._replica2part2dev = [
# these are the relevant one's here
# | |
# v v
array('H', [2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0]),
array('H', [4, 4, 4, 4, 6, 6, 6, 6, 6, 6, 6, 6, 6, 1, 1, 1]),
array('H', [1, 1, 3, 3, 3, 3, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7])]
# fix up bookkeeping
new_dev_parts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
new_dev_parts[dev_id] += 1
for dev in rb._iter_devs():
dev['parts'] = new_dev_parts[dev['id']]
replica_plan = rb._build_replica_plan()
rb._set_parts_wanted(replica_plan)
expected = {
0: -1, # this device wants to shed
2: 0,
4: 0,
6: 0,
1: 0,
3: 1, # there's devices with room on the other server
5: 0,
7: 0,
}
self.assertEqual(expected, {d['id']: d['parts_wanted']
for d in rb._iter_devs()})
self.assertEqual(rb.get_balance(), 100)
rb.pretend_min_part_hours_passed()
# There's something like a 11% chance that we won't be able to get to
# a balance of 0 (and a 6% chance that we won't change anything at all)
# Pick a seed to make this pass.
rb.rebalance(seed=123)
self.assertEqual(rb.get_balance(), 0)
def test_multiple_duplicate_device_assignment(self):
rb = ring.RingBuilder(4, 4, 1)
devs = [
'r1z1-127.0.0.1:6200/d1',
'r1z1-127.0.0.1:6201/d2',
'r1z1-127.0.0.1:6202/d3',
'r1z1-127.0.0.1:33443/d4',
'r1z1-127.0.0.2:6200/d5',
'r1z1-127.0.0.2:6201/d6',
'r1z1-127.0.0.2:6202/d7',
'r1z1-127.0.0.2:6202/d8',
]
for add_value in devs:
dev = utils.parse_add_value(add_value)
dev['weight'] = 1.0
rb.add_dev(dev)
rb.rebalance()
rb._replica2part2dev = [
# these are the relevant one's here
# | | | | |
# v v v v v
array('H', [0, 1, 2, 3, 3, 0, 0, 0, 4, 6, 4, 4, 4, 4, 4, 4]),
array('H', [0, 1, 3, 1, 1, 1, 1, 1, 5, 7, 5, 5, 5, 5, 5, 5]),
array('H', [0, 1, 2, 2, 2, 2, 2, 2, 4, 6, 6, 6, 6, 6, 6, 6]),
array('H', [0, 3, 2, 3, 3, 3, 3, 3, 5, 7, 7, 7, 7, 7, 7, 7])
# ^
# |
# this sort of thing worked already
]
# fix up bookkeeping
new_dev_parts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
new_dev_parts[dev_id] += 1
for dev in rb._iter_devs():
dev['parts'] = new_dev_parts[dev['id']]
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate()
def test_region_fullness_with_balanceable_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 2, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 2, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10006, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 3, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10007, 'device': 'sda1'})
rb.add_dev({'id': 7, 'region': 3, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10008, 'device': 'sda1'})
rb.rebalance(seed=2)
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region,
{0: 192, 1: 192, 2: 192, 3: 192})
def test_region_fullness_with_unbalanceable_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 2,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.rebalance(seed=2)
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region, {0: 512, 1: 256})
def test_adding_region_slowly_with_unbalanceable_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc1'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd1'})
rb.rebalance(seed=2)
rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 0.25,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 0.25,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
changed_parts, _balance, _removed = rb.rebalance(seed=2)
# there's not enough room in r1 for every partition to have a replica
# in it, so only 86 assignments occur in r1 (that's ~1/5 of the total,
# since r1 has 1/5 of the weight).
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region, {0: 682, 1: 86})
# really 86 parts *should* move (to the new region) but to avoid
# accidentally picking up too many and causing some parts to randomly
# flop around devices in the original region - our gather algorithm
# is conservative when picking up only from devices that are for sure
# holding more parts than they want (math.ceil() of the replica_plan)
# which guarantees any parts picked up will have new homes in a better
# tier or failure_domain.
self.assertEqual(86, changed_parts)
# and since there's not enough room, subsequent rebalances will not
# cause additional assignments to r1
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=2)
rb.validate()
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region, {0: 682, 1: 86})
# after you add more weight, more partition assignments move
rb.set_dev_weight(2, 0.5)
rb.set_dev_weight(3, 0.5)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=2)
rb.validate()
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region, {0: 614, 1: 154})
rb.set_dev_weight(2, 1.0)
rb.set_dev_weight(3, 1.0)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=2)
rb.validate()
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region, {0: 512, 1: 256})
def test_avoid_tier_change_new_region(self):
rb = ring.RingBuilder(8, 3, 1)
for i in range(5):
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': i, 'device': 'sda1'})
rb.rebalance(seed=2)
# Add a new device in new region to a balanced ring
rb.add_dev({'id': 5, 'region': 1, 'zone': 0, 'weight': 0,
'ip': '127.0.0.5', 'port': 10000, 'device': 'sda1'})
# Increase the weight of region 1 slowly
moved_partitions = []
errors = []
for weight in range(0, 101, 10):
rb.set_dev_weight(5, weight)
rb.pretend_min_part_hours_passed()
changed_parts, _balance, _removed = rb.rebalance(seed=2)
rb.validate()
moved_partitions.append(changed_parts)
# Ensure that the second region has enough partitions
# Otherwise there will be replicas at risk
min_parts_for_r1 = ceil(weight / (500.0 + weight) * 768)
parts_for_r1 = self._get_population_by_region(rb).get(1, 0)
try:
self.assertEqual(min_parts_for_r1, parts_for_r1)
except AssertionError:
errors.append('weight %s got %s parts but expected %s' % (
weight, parts_for_r1, min_parts_for_r1))
self.assertFalse(errors)
# Number of partitions moved on each rebalance
# 10/510 * 768 ~ 15.06 -> move at least 15 partitions in first step
ref = [0, 16, 14, 14, 13, 13, 13, 12, 11, 12, 10]
self.assertEqual(ref, moved_partitions)
def test_set_replicas_increase(self):
rb = ring.RingBuilder(8, 2, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.replicas = 2.1
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[256, 256, 25])
rb.replicas = 2.2
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[256, 256, 51])
def test_set_replicas_decrease(self):
rb = ring.RingBuilder(4, 5, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.replicas = 4.9
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[16, 16, 16, 16, 14])
# cross a couple of integer thresholds (4 and 3)
rb.replicas = 2.5
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[16, 16, 8])
def test_fractional_replicas_rebalance(self):
rb = ring.RingBuilder(8, 2.5, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance() # passes by not crashing
rb.validate() # also passes by not crashing
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[256, 256, 128])
def test_create_add_dev_add_replica_rebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.set_replicas(4)
rb.rebalance() # this would crash since parts_wanted was not set
rb.validate()
def test_reduce_replicas_after_remove_device(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.rebalance()
rb.remove_dev(0)
self.assertRaises(exceptions.RingValidationError, rb.rebalance)
rb.set_replicas(2)
rb.rebalance()
rb.validate()
def test_rebalance_post_upgrade(self):
rb = ring.RingBuilder(8, 3, 1)
# 5 devices: 5 is the smallest number that does not divide 3 * 2^8,
# which forces some rounding to happen.
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.rebalance()
rb.validate()
# Older versions of the ring builder code would round down when
# computing parts_wanted, while the new code rounds up. Make sure we
# can handle a ring built by the old method.
#
# This code mimics the old _set_parts_wanted.
weight_of_one_part = rb.weight_of_one_part()
for dev in rb._iter_devs():
if not dev['weight']:
dev['parts_wanted'] = -rb.parts * rb.replicas
else:
dev['parts_wanted'] = (
int(weight_of_one_part * dev['weight']) -
dev['parts'])
rb.pretend_min_part_hours_passed()
rb.rebalance() # this crashes unless rebalance resets parts_wanted
rb.validate()
def test_add_replicas_then_rebalance_respects_weight(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdi'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdj'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdk'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdl'})
rb.rebalance(seed=1)
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 96, 1: 96,
2: 32, 3: 32,
4: 96, 5: 96,
6: 32, 7: 32,
8: 96, 9: 96,
10: 32, 11: 32})
rb.replicas *= 2
rb.rebalance(seed=1)
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 192, 1: 192,
2: 64, 3: 64,
4: 192, 5: 192,
6: 64, 7: 64,
8: 192, 9: 192,
10: 64, 11: 64})
def test_overload(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdj'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdk'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdl'})
rb.rebalance(seed=12345)
rb.validate()
# sanity check: balance respects weights, so default
part_counts = _partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 192)
self.assertEqual(part_counts[1], 192)
self.assertEqual(part_counts[2], 384)
# Devices 0 and 1 take 10% more than their fair shares by weight since
# overload is 10% (0.1).
rb.set_overload(0.1)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb, key='zone')
self.assertEqual({0: 212, 1: 211, 2: 345}, part_counts)
# Now, devices 0 and 1 take 50% more than their fair shares by
# weight.
rb.set_overload(0.5)
for _ in range(3):
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb, key='zone')
self.assertEqual({0: 256, 1: 256, 2: 256}, part_counts)
# Devices 0 and 1 may take up to 75% over their fair share, but the
# placement algorithm only wants to spread things out evenly between
# all drives, so the devices stay at 50% more.
rb.set_overload(0.75)
for _ in range(3):
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 256)
self.assertEqual(part_counts[1], 256)
self.assertEqual(part_counts[2], 256)
def test_unoverload(self):
# Start off needing overload to balance, then add capacity until we
# don't need overload any more and see that things still balance.
# Overload doesn't prevent optimal balancing.
rb = ring.RingBuilder(8, 3, 1)
rb.set_overload(0.125)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.rebalance(seed=12345)
# sanity check: our overload is big enough to balance things
part_counts = _partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 216)
self.assertEqual(part_counts['127.0.0.2'], 216)
self.assertEqual(part_counts['127.0.0.3'], 336)
# Add some weight: balance improves
for dev in rb.devs:
if dev['ip'] in ('127.0.0.1', '127.0.0.2'):
rb.set_dev_weight(dev['id'], 1.22)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb, key='ip')
self.assertEqual({
'127.0.0.1': 237,
'127.0.0.2': 237,
'127.0.0.3': 294,
}, part_counts)
# Even out the weights: balance becomes perfect
for dev in rb.devs:
if dev['ip'] in ('127.0.0.1', '127.0.0.2'):
rb.set_dev_weight(dev['id'], 2)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 256)
self.assertEqual(part_counts['127.0.0.2'], 256)
self.assertEqual(part_counts['127.0.0.3'], 256)
# Add a new server: balance stays optimal
rb.add_dev({'id': 12, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 13, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 14, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 15, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'})
# we're moving more than 1/3 of the replicas but fewer than 2/3, so
# we have to do this twice
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
expected = {
'127.0.0.1': 192,
'127.0.0.2': 192,
'127.0.0.3': 192,
'127.0.0.4': 192,
}
part_counts = _partition_counts(rb, key='ip')
self.assertEqual(part_counts, expected)
def test_overload_keeps_balanceable_things_balanced_initially(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'})
rb.set_overload(99999)
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb)
self.assertEqual(part_counts, {
0: 128,
1: 128,
2: 64,
3: 64,
4: 64,
5: 64,
6: 64,
7: 64,
8: 64,
9: 64,
})
def test_overload_keeps_balanceable_things_balanced_on_rebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'})
rb.set_overload(99999)
rb.rebalance(seed=123)
part_counts = _partition_counts(rb)
self.assertEqual(part_counts, {
0: 128,
1: 128,
2: 64,
3: 64,
4: 64,
5: 64,
6: 64,
7: 64,
8: 64,
9: 64,
})
# swap weights between 10.0.0.1 and 10.0.0.2
rb.set_dev_weight(0, 4)
rb.set_dev_weight(1, 4)
rb.set_dev_weight(2, 8)
rb.set_dev_weight(1, 8)
rb.rebalance(seed=456)
part_counts = _partition_counts(rb)
self.assertEqual(part_counts, {
0: 128,
1: 128,
2: 64,
3: 64,
4: 64,
5: 64,
6: 64,
7: 64,
8: 64,
9: 64,
})
def test_server_per_port(self):
# 3 servers, 3 disks each, with each disk on its own port
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10001, 'device': 'sdy'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10001, 'device': 'sdy'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10001, 'device': 'sdy'})
rb.rebalance(seed=1)
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10002, 'device': 'sdz'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10002, 'device': 'sdz'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10002, 'device': 'sdz'})
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=1)
poorly_dispersed = []
for part in range(rb.parts):
on_nodes = set()
for replica in range(rb.replicas):
dev_id = rb._replica2part2dev[replica][part]
on_nodes.add(rb.devs[dev_id]['ip'])
if len(on_nodes) < rb.replicas:
poorly_dispersed.append(part)
self.assertEqual(poorly_dispersed, [])
def test_load(self):
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1',
'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1',
'meta': 'meta1'},
{'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1',
'meta': 'meta2'},
{'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1'}]
for d in devs:
rb.add_dev(d)
rb.rebalance()
real_pickle = pickle.load
fake_open = mock.mock_open()
io_error_not_found = IOError()
io_error_not_found.errno = errno.ENOENT
io_error_no_perm = IOError()
io_error_no_perm.errno = errno.EPERM
io_error_generic = IOError()
io_error_generic.errno = errno.EOPNOTSUPP
try:
# test a legit builder
fake_pickle = mock.Mock(return_value=rb)
pickle.load = fake_pickle
builder = ring.RingBuilder.load('fake.builder', open=fake_open)
self.assertEqual(fake_pickle.call_count, 1)
fake_open.assert_has_calls([mock.call('fake.builder', 'rb')])
self.assertEqual(builder, rb)
fake_pickle.reset_mock()
# test old style builder
fake_pickle.return_value = rb.to_dict()
pickle.load = fake_pickle
builder = ring.RingBuilder.load('fake.builder', open=fake_open)
fake_open.assert_has_calls([mock.call('fake.builder', 'rb')])
self.assertEqual(builder.devs, rb.devs)
fake_pickle.reset_mock()
# test old devs but no meta
no_meta_builder = rb
for dev in no_meta_builder.devs:
del(dev['meta'])
fake_pickle.return_value = no_meta_builder
pickle.load = fake_pickle
builder = ring.RingBuilder.load('fake.builder', open=fake_open)
fake_open.assert_has_calls([mock.call('fake.builder', 'rb')])
self.assertEqual(builder.devs, rb.devs)
# test an empty builder
fake_pickle.side_effect = EOFError
pickle.load = fake_pickle
self.assertRaises(exceptions.UnPicklingError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test a corrupted builder
fake_pickle.side_effect = pickle.UnpicklingError
pickle.load = fake_pickle
self.assertRaises(exceptions.UnPicklingError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test some error
fake_pickle.side_effect = AttributeError
pickle.load = fake_pickle
self.assertRaises(exceptions.UnPicklingError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
finally:
pickle.load = real_pickle
# test non existent builder file
fake_open.side_effect = io_error_not_found
self.assertRaises(exceptions.FileNotFoundError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test non accessible builder file
fake_open.side_effect = io_error_no_perm
self.assertRaises(exceptions.PermissionError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test an error other then ENOENT and ENOPERM
fake_open.side_effect = io_error_generic
self.assertRaises(IOError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
def test_save_load(self):
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000,
'replication_ip': '127.0.0.0', 'replication_port': 10000,
'device': 'sda1', 'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001,
'replication_ip': '127.0.0.1', 'replication_port': 10001,
'device': 'sdb1', 'meta': 'meta1'},
{'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002,
'replication_ip': '127.0.0.2', 'replication_port': 10002,
'device': 'sdc1', 'meta': 'meta2'},
{'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003,
'replication_ip': '127.0.0.3', 'replication_port': 10003,
'device': 'sdd1', 'meta': ''}]
rb.set_overload(3.14159)
for d in devs:
rb.add_dev(d)
rb.rebalance()
builder_file = os.path.join(self.testdir, 'test_save.builder')
rb.save(builder_file)
loaded_rb = ring.RingBuilder.load(builder_file)
self.maxDiff = None
self.assertEqual(loaded_rb.to_dict(), rb.to_dict())
self.assertEqual(loaded_rb.overload, 3.14159)
@mock.patch('six.moves.builtins.open', autospec=True)
@mock.patch('swift.common.ring.builder.pickle.dump', autospec=True)
def test_save(self, mock_pickle_dump, mock_open):
mock_open.return_value = mock_fh = mock.MagicMock()
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1',
'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1',
'meta': 'meta1'},
{'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1',
'meta': 'meta2'},
{'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1'}]
for d in devs:
rb.add_dev(d)
rb.rebalance()
rb.save('some.builder')
mock_open.assert_called_once_with('some.builder', 'wb')
mock_pickle_dump.assert_called_once_with(rb.to_dict(),
mock_fh.__enter__(),
protocol=2)
def test_id(self):
rb = ring.RingBuilder(8, 3, 1)
# check id is assigned after save
builder_file = os.path.join(self.testdir, 'test_save.builder')
rb.save(builder_file)
assigned_id = rb.id
# check id doesn't change when builder is saved again
rb.save(builder_file)
self.assertEqual(assigned_id, rb.id)
# check same id after loading
loaded_rb = ring.RingBuilder.load(builder_file)
self.assertEqual(assigned_id, loaded_rb.id)
# check id doesn't change when loaded builder is saved
rb.save(builder_file)
self.assertEqual(assigned_id, rb.id)
# check same id after loading again
loaded_rb = ring.RingBuilder.load(builder_file)
self.assertEqual(assigned_id, loaded_rb.id)
# check id remains once assigned, even when save fails
with self.assertRaises(IOError):
rb.save(os.path.join(
self.testdir, 'non_existent_dir', 'test_save.file'))
self.assertEqual(assigned_id, rb.id)
# sanity check that different builders get different id's
other_rb = ring.RingBuilder(8, 3, 1)
other_builder_file = os.path.join(self.testdir, 'test_save_2.builder')
other_rb.save(other_builder_file)
self.assertNotEqual(assigned_id, other_rb.id)
def test_id_copy_from(self):
# copy_from preserves the same id
orig_rb = ring.RingBuilder(8, 3, 1)
copy_rb = ring.RingBuilder(8, 3, 1)
copy_rb.copy_from(orig_rb)
for rb in (orig_rb, copy_rb):
with self.assertRaises(AttributeError) as cm:
rb.id
self.assertIn('id attribute has not been initialised',
cm.exception.args[0])
builder_file = os.path.join(self.testdir, 'test_save.builder')
orig_rb.save(builder_file)
copy_rb = ring.RingBuilder(8, 3, 1)
copy_rb.copy_from(orig_rb)
self.assertEqual(orig_rb.id, copy_rb.id)
def test_id_legacy_builder_file(self):
builder_file = os.path.join(self.testdir, 'legacy.builder')
def do_test():
# load legacy file
loaded_rb = ring.RingBuilder.load(builder_file)
with self.assertRaises(AttributeError) as cm:
loaded_rb.id
self.assertIn('id attribute has not been initialised',
cm.exception.args[0])
# check saving assigns an id, and that it is persisted
loaded_rb.save(builder_file)
assigned_id = loaded_rb.id
self.assertIsNotNone(assigned_id)
loaded_rb = ring.RingBuilder.load(builder_file)
self.assertEqual(assigned_id, loaded_rb.id)
# older builders had no id so the pickled builder dict had no id key
rb = ring.RingBuilder(8, 3, 1)
orig_to_dict = rb.to_dict
def mock_to_dict():
result = orig_to_dict()
result.pop('id')
return result
with mock.patch.object(rb, 'to_dict', mock_to_dict):
rb.save(builder_file)
do_test()
# even older builders pickled the class instance, which would have had
# no _id attribute
rb = ring.RingBuilder(8, 3, 1)
del rb.logger # logger type cannot be pickled
del rb._id
builder_file = os.path.join(self.testdir, 'legacy.builder')
with open(builder_file, 'wb') as f:
pickle.dump(rb, f, protocol=2)
do_test()
def test_id_not_initialised_errors(self):
rb = ring.RingBuilder(8, 3, 1)
# id is not set until builder has been saved
with self.assertRaises(AttributeError) as cm:
rb.id
self.assertIn('id attribute has not been initialised',
cm.exception.args[0])
# save must succeed for id to be assigned
with self.assertRaises(IOError):
rb.save(os.path.join(
self.testdir, 'non-existent-dir', 'foo.builder'))
with self.assertRaises(AttributeError) as cm:
rb.id
self.assertIn('id attribute has not been initialised',
cm.exception.args[0])
def test_search_devs(self):
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1',
'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1',
'meta': 'meta1'},
{'id': 2, 'region': 1, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1',
'meta': 'meta2'},
{'id': 3, 'region': 1, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1',
'meta': 'meta3'},
{'id': 4, 'region': 2, 'zone': 4, 'weight': 1,
'ip': '127.0.0.4', 'port': 10004, 'device': 'sde1',
'meta': 'meta4', 'replication_ip': '127.0.0.10',
'replication_port': 20000},
{'id': 5, 'region': 2, 'zone': 5, 'weight': 2,
'ip': '127.0.0.5', 'port': 10005, 'device': 'sdf1',
'meta': 'meta5', 'replication_ip': '127.0.0.11',
'replication_port': 20001},
{'id': 6, 'region': 2, 'zone': 6, 'weight': 2,
'ip': '127.0.0.6', 'port': 10006, 'device': 'sdg1',
'meta': 'meta6', 'replication_ip': '127.0.0.12',
'replication_port': 20002}]
for d in devs:
rb.add_dev(d)
rb.rebalance()
res = rb.search_devs({'region': 0})
self.assertEqual(res, [devs[0], devs[1]])
res = rb.search_devs({'region': 1})
self.assertEqual(res, [devs[2], devs[3]])
res = rb.search_devs({'region': 1, 'zone': 2})
self.assertEqual(res, [devs[2]])
res = rb.search_devs({'id': 1})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'zone': 1})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'ip': '127.0.0.1'})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'ip': '127.0.0.1', 'port': 10001})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'port': 10001})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'replication_ip': '127.0.0.10'})
self.assertEqual(res, [devs[4]])
res = rb.search_devs({'replication_ip': '127.0.0.10',
'replication_port': 20000})
self.assertEqual(res, [devs[4]])
res = rb.search_devs({'replication_port': 20000})
self.assertEqual(res, [devs[4]])
res = rb.search_devs({'device': 'sdb1'})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'meta': 'meta1'})
self.assertEqual(res, [devs[1]])
def test_validate(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 12, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 13, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 14, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 15, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
# Degenerate case: devices added but not rebalanced yet
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb.rebalance()
counts = _partition_counts(rb, key='zone')
self.assertEqual(counts, {0: 128, 1: 128, 2: 256, 3: 256})
dev_usage, worst = rb.validate()
self.assertIsNone(dev_usage)
self.assertIsNone(worst)
dev_usage, worst = rb.validate(stats=True)
self.assertEqual(list(dev_usage), [32, 32, 64, 64,
32, 32, 32, # added zone0
32, 32, 32, # added zone1
64, 64, 64, # added zone2
64, 64, 64, # added zone3
])
self.assertEqual(int(worst), 0)
# min part hours should pin all the parts assigned to this zero
# weight device onto it such that the balance will look horrible
rb.set_dev_weight(2, 0)
rb.rebalance()
self.assertEqual(rb.validate(stats=True)[1], MAX_BALANCE)
# Test not all partitions doubly accounted for
rb.devs[1]['parts'] -= 1
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb.devs[1]['parts'] += 1
# Test non-numeric port
rb.devs[1]['port'] = '10001'
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb.devs[1]['port'] = 10001
# Test partition on nonexistent device
rb.pretend_min_part_hours_passed()
orig_dev_id = rb._replica2part2dev[0][0]
rb._replica2part2dev[0][0] = len(rb.devs)
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb._replica2part2dev[0][0] = orig_dev_id
# Tests that validate can handle 'holes' in .devs
rb.remove_dev(2)
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate(stats=True)
# Test partition assigned to a hole
if rb.devs[2]:
rb.remove_dev(2)
rb.pretend_min_part_hours_passed()
orig_dev_id = rb._replica2part2dev[0][0]
rb._replica2part2dev[0][0] = 2
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb._replica2part2dev[0][0] = orig_dev_id
# Validate that zero weight devices with no partitions don't count on
# the 'worst' value.
self.assertNotEqual(rb.validate(stats=True)[1], MAX_BALANCE)
rb.add_dev({'id': 16, 'region': 0, 'zone': 0, 'weight': 0,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
self.assertNotEqual(rb.validate(stats=True)[1], MAX_BALANCE)
def test_validate_partial_replica(self):
rb = ring.RingBuilder(8, 2.5, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc'})
rb.rebalance()
rb.validate() # sanity
self.assertEqual(len(rb._replica2part2dev[0]), 256)
self.assertEqual(len(rb._replica2part2dev[1]), 256)
self.assertEqual(len(rb._replica2part2dev[2]), 128)
# now swap partial replica part maps
rb._replica2part2dev[1], rb._replica2part2dev[2] = \
rb._replica2part2dev[2], rb._replica2part2dev[1]
self.assertRaises(exceptions.RingValidationError, rb.validate)
def test_validate_duplicate_part_assignment(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc'})
rb.rebalance()
rb.validate() # sanity
# now double up a device assignment
rb._replica2part2dev[1][200] = rb._replica2part2dev[2][200]
with self.assertRaises(exceptions.RingValidationError) as e:
rb.validate()
expected = 'The partition 200 has been assigned to duplicate devices'
self.assertIn(expected, str(e.exception))
def test_get_part_devices(self):
rb = ring.RingBuilder(8, 3, 1)
self.assertEqual(rb.get_part_devices(0), [])
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance()
part_devs = sorted(rb.get_part_devices(0),
key=operator.itemgetter('id'))
self.assertEqual(part_devs, [rb.devs[0], rb.devs[1], rb.devs[2]])
def test_get_part_devices_partial_replicas(self):
rb = ring.RingBuilder(8, 2.5, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance(seed=4)
# note: partition 255 will only have 2 replicas
part_devs = sorted(rb.get_part_devices(255),
key=operator.itemgetter('id'))
self.assertEqual(part_devs, [rb.devs[1], rb.devs[2]])
def test_dispersion_with_zero_weight_devices(self):
rb = ring.RingBuilder(8, 3.0, 0)
# add two devices to a single server in a single zone
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
# and a zero weight device
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 0,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.rebalance()
self.assertEqual(rb.dispersion, 0.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 1): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 2): [0, 256, 0, 0],
})
def test_dispersion_with_zero_weight_devices_with_parts(self):
rb = ring.RingBuilder(8, 3.0, 1)
# add four devices to a single server in a single zone
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.rebalance(seed=1)
self.assertEqual(rb.dispersion, 0.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 1): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 2): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 3): [64, 192, 0, 0],
})
# now mark a device 2 for decom
rb.set_dev_weight(2, 0.0)
# we'll rebalance but can't move any parts
rb.rebalance(seed=1)
# zero weight tier has one copy of 1/4 part-replica
self.assertEqual(rb.dispersion, 25.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 1): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 2): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 3): [64, 192, 0, 0],
})
# unlock the stuck parts
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=3)
self.assertEqual(rb.dispersion, 0.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 1): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 3): [0, 256, 0, 0],
})
@unittest.skipIf(sys.version_info >= (3,),
"Seed-specific tests don't work well on py3")
def test_undispersable_zone_converge_on_balance(self):
rb = ring.RingBuilder(8, 6, 0)
dev_id = 0
# 3 regions, 2 zone for each region, 1 server with only *one* device in
# each zone (this is an absolutely pathological case)
for r in range(3):
for z in range(2):
ip = '127.%s.%s.1' % (r, z)
dev_id += 1
rb.add_dev({'id': dev_id, 'region': r, 'zone': z,
'weight': 1000, 'ip': ip, 'port': 10000,
'device': 'd%s' % dev_id})
rb.rebalance(seed=7)
# sanity, all balanced and 0 dispersion
self.assertEqual(rb.get_balance(), 0)
self.assertEqual(rb.dispersion, 0)
# add one device to the server in z1 for each region, N.B. when we
# *balance* this topology we will have very bad dispersion (too much
# weight in z1 compared to z2!)
for r in range(3):
z = 0
ip = '127.%s.%s.1' % (r, z)
dev_id += 1
rb.add_dev({'id': dev_id, 'region': r, 'zone': z,
'weight': 1000, 'ip': ip, 'port': 10000,
'device': 'd%s' % dev_id})
changed_part, _, _ = rb.rebalance(seed=7)
# sanity, all part but only one replica moved to new devices
self.assertEqual(changed_part, 2 ** 8)
# so the first time, rings are still unbalanced becase we'll only move
# one replica of each part.
self.assertEqual(rb.get_balance(), 50.1953125)
self.assertEqual(rb.dispersion, 16.6015625)
# N.B. since we mostly end up grabbing parts by "weight forced" some
# seeds given some specific ring state will randomly pick bad
# part-replicas that end up going back down onto the same devices
changed_part, _, _ = rb.rebalance(seed=7)
self.assertEqual(changed_part, 14)
# ... this isn't a really "desirable" behavior, but even with bad luck,
# things do get better
self.assertEqual(rb.get_balance(), 47.265625)
self.assertEqual(rb.dispersion, 16.6015625)
# but if you stick with it, eventually the next rebalance, will get to
# move "the right" part-replicas, resulting in near optimal balance
changed_part, _, _ = rb.rebalance(seed=7)
self.assertEqual(changed_part, 240)
self.assertEqual(rb.get_balance(), 0.390625)
self.assertEqual(rb.dispersion, 16.6015625)
@unittest.skipIf(sys.version_info >= (3,),
"Seed-specific tests don't work well on py3")
def test_undispersable_server_converge_on_balance(self):
rb = ring.RingBuilder(8, 6, 0)
dev_id = 0
# 3 zones, 2 server for each zone, 2 device for each server
for z in range(3):
for i in range(2):
ip = '127.0.%s.%s' % (z, i + 1)
for d in range(2):
dev_id += 1
rb.add_dev({'id': dev_id, 'region': 1, 'zone': z,
'weight': 1000, 'ip': ip, 'port': 10000,
'device': 'd%s' % dev_id})
rb.rebalance(seed=7)
# sanity, all balanced and 0 dispersion
self.assertEqual(rb.get_balance(), 0)
self.assertEqual(rb.dispersion, 0)
# add one device for first server for each zone
for z in range(3):
ip = '127.0.%s.1' % z
dev_id += 1
rb.add_dev({'id': dev_id, 'region': 1, 'zone': z,
'weight': 1000, 'ip': ip, 'port': 10000,
'device': 'd%s' % dev_id})
changed_part, _, _ = rb.rebalance(seed=7)
# sanity, all part but only one replica moved to new devices
self.assertEqual(changed_part, 2 ** 8)
# but the first time, those are still unbalance becase ring builder
# can move only one replica for each part
self.assertEqual(rb.get_balance(), 16.9921875)
self.assertEqual(rb.dispersion, 9.9609375)
rb.rebalance(seed=7)
# converge into around 0~1
self.assertGreaterEqual(rb.get_balance(), 0)
self.assertLess(rb.get_balance(), 1)
# dispersion doesn't get any worse
self.assertEqual(rb.dispersion, 9.9609375)
def test_effective_overload(self):
rb = ring.RingBuilder(8, 3, 1)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
# z1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
# z2
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
# this ring requires overload
required = rb.get_required_overload()
self.assertGreater(required, 0.1)
# and we'll use a little bit
rb.set_overload(0.1)
rb.rebalance(seed=7)
rb.validate()
# but with-out enough overload we're not dispersed
self.assertGreater(rb.dispersion, 0)
# add the other dev to z2
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdc'})
# but also fail another device in the same!
rb.remove_dev(6)
# we still require overload
required = rb.get_required_overload()
self.assertGreater(required, 0.1)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=7)
rb.validate()
# ... and without enough we're full dispersed
self.assertGreater(rb.dispersion, 0)
# ok, let's fix z2's weight for real
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
# ... technically, we no longer require overload
self.assertEqual(rb.get_required_overload(), 0.0)
# so let's rebalance w/o resetting min_part_hours
rb.rebalance(seed=7)
rb.validate()
# ... and that got it in one pass boo-yah!
self.assertEqual(rb.dispersion, 0)
def zone_weights_over_device_count(self):
rb = ring.RingBuilder(8, 3, 1)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
# z1
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
# z2
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 200,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.rebalance(seed=7)
rb.validate()
self.assertEqual(rb.dispersion, 0)
self.assertAlmostEqual(rb.get_balance(), (1.0 / 3.0) * 100)
def test_more_devices_than_replicas_validation_when_removed_dev(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sdc'})
rb.rebalance()
rb.remove_dev(2)
with self.assertRaises(ValueError) as e:
rb.set_dev_weight(2, 1)
msg = "Can not set weight of dev_id 2 because it is marked " \
"for removal"
self.assertIn(msg, str(e.exception))
with self.assertRaises(exceptions.RingValidationError) as e:
rb.rebalance()
msg = 'Replica count of 3 requires more than 2 devices'
self.assertIn(msg, str(e.exception))
def _add_dev_delete_first_n(self, add_dev_count, n):
rb = ring.RingBuilder(8, 3, 1)
dev_names = ['sda', 'sdb', 'sdc', 'sdd', 'sde', 'sdf']
for i in range(add_dev_count):
if i < len(dev_names):
dev_name = dev_names[i]
else:
dev_name = 'sda'
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': dev_name})
rb.rebalance()
if (n > 0):
rb.pretend_min_part_hours_passed()
# remove first n
for i in range(n):
rb.remove_dev(i)
rb.pretend_min_part_hours_passed()
rb.rebalance()
return rb
def test_reuse_of_dev_holes_without_id(self):
# try with contiguous holes at beginning
add_dev_count = 6
rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3)
new_dev_id = rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0,
'device': 'sda'})
self.assertLess(new_dev_id, add_dev_count)
# try with non-contiguous holes
# [0, 1, None, 3, 4, None]
rb2 = ring.RingBuilder(8, 3, 1)
for i in range(6):
rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sda'})
rb2.rebalance()
rb2.pretend_min_part_hours_passed()
rb2.remove_dev(2)
rb2.remove_dev(5)
rb2.pretend_min_part_hours_passed()
rb2.rebalance()
first = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sda'})
second = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sda'})
# add a new one (without reusing a hole)
third = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sda'})
self.assertEqual(first, 2)
self.assertEqual(second, 5)
self.assertEqual(third, 6)
def test_reuse_of_dev_holes_with_id(self):
add_dev_count = 6
rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3)
# add specifying id
exp_new_dev_id = 2
# [dev, dev, None, dev, dev, None]
try:
new_dev_id = rb.add_dev({'id': exp_new_dev_id, 'region': 0,
'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0,
'device': 'sda'})
self.assertEqual(new_dev_id, exp_new_dev_id)
except exceptions.DuplicateDeviceError:
self.fail("device hole not reused")
def test_prepare_increase_partition_power(self):
ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz')
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
self.assertFalse(rb.cancel_increase_partition_power())
self.assertEqual(rb.part_power, 8)
self.assertIsNone(rb.next_part_power)
self.assertFalse(rb.finish_increase_partition_power())
self.assertEqual(rb.part_power, 8)
self.assertIsNone(rb.next_part_power)
self.assertTrue(rb.prepare_increase_partition_power())
self.assertEqual(rb.part_power, 8)
self.assertEqual(rb.next_part_power, 9)
# Save .ring.gz, and load ring from it to ensure prev/next is set
rd = rb.get_ring()
rd.save(ring_file)
r = ring.Ring(ring_file)
expected_part_shift = 32 - 8
self.assertEqual(expected_part_shift, r._part_shift)
self.assertEqual(9, r.next_part_power)
def test_increase_partition_power(self):
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
# Let's save the ring, and get the nodes for an object
ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz')
rd = rb.get_ring()
rd.save(ring_file)
r = ring.Ring(ring_file)
old_part, old_nodes = r.get_nodes("acc", "cont", "obj")
old_version = rb.version
self.assertTrue(rb.prepare_increase_partition_power())
self.assertTrue(rb.increase_partition_power())
rb.validate()
changed_parts, _balance, removed_devs = rb.rebalance()
self.assertEqual(changed_parts, 0)
self.assertEqual(removed_devs, 0)
# Make sure cancellation is not possible
# after increasing the partition power
self.assertFalse(rb.cancel_increase_partition_power())
old_ring = r
rd = rb.get_ring()
rd.save(ring_file)
r = ring.Ring(ring_file)
new_part, new_nodes = r.get_nodes("acc", "cont", "obj")
# sanity checks
self.assertEqual(9, rb.part_power)
self.assertEqual(9, rb.next_part_power)
self.assertEqual(rb.version, old_version + 3)
# make sure there is always the same device assigned to every pair of
# partitions
for replica in rb._replica2part2dev:
for part in range(0, len(replica), 2):
dev = replica[part]
next_dev = replica[part + 1]
self.assertEqual(dev, next_dev)
# same for last_part moves
for part in range(0, rb.parts, 2):
this_last_moved = rb._last_part_moves[part]
next_last_moved = rb._last_part_moves[part + 1]
self.assertEqual(this_last_moved, next_last_moved)
for i in range(100):
suffix = uuid.uuid4()
account = 'account_%s' % suffix
container = 'container_%s' % suffix
obj = 'obj_%s' % suffix
old_part, old_nodes = old_ring.get_nodes(account, container, obj)
new_part, new_nodes = r.get_nodes(account, container, obj)
# Due to the increased partition power, the partition each object
# is assigned to has changed. If the old partition was X, it will
# now be either located in 2*X or 2*X+1
self.assertIn(new_part, [old_part * 2, old_part * 2 + 1])
# Importantly, we expect the objects to be placed on the same
# nodes after increasing the partition power
self.assertEqual(old_nodes, new_nodes)
def test_finalize_increase_partition_power(self):
ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz')
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
self.assertTrue(rb.prepare_increase_partition_power())
# Make sure this doesn't do any harm before actually increasing the
# partition power
self.assertFalse(rb.finish_increase_partition_power())
self.assertEqual(rb.next_part_power, 9)
self.assertTrue(rb.increase_partition_power())
self.assertFalse(rb.prepare_increase_partition_power())
self.assertEqual(rb.part_power, 9)
self.assertEqual(rb.next_part_power, 9)
self.assertTrue(rb.finish_increase_partition_power())
self.assertEqual(rb.part_power, 9)
self.assertIsNone(rb.next_part_power)
# Save .ring.gz, and load ring from it to ensure prev/next is set
rd = rb.get_ring()
rd.save(ring_file)
r = ring.Ring(ring_file)
expected_part_shift = 32 - 9
self.assertEqual(expected_part_shift, r._part_shift)
self.assertIsNone(r.next_part_power)
def test_prepare_increase_partition_power_failed(self):
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
self.assertTrue(rb.prepare_increase_partition_power())
self.assertEqual(rb.next_part_power, 9)
# next_part_power is still set, do not increase again
self.assertFalse(rb.prepare_increase_partition_power())
self.assertEqual(rb.next_part_power, 9)
def test_increase_partition_power_failed(self):
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
# next_part_power not set, can't increase the part power
self.assertFalse(rb.increase_partition_power())
self.assertEqual(rb.part_power, 8)
self.assertTrue(rb.prepare_increase_partition_power())
self.assertTrue(rb.increase_partition_power())
self.assertEqual(rb.part_power, 9)
# part_power already increased
self.assertFalse(rb.increase_partition_power())
self.assertEqual(rb.part_power, 9)
def test_cancel_increase_partition_power(self):
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
old_version = rb.version
self.assertTrue(rb.prepare_increase_partition_power())
# sanity checks
self.assertEqual(8, rb.part_power)
self.assertEqual(9, rb.next_part_power)
self.assertEqual(rb.version, old_version + 1)
self.assertTrue(rb.cancel_increase_partition_power())
rb.validate()
self.assertEqual(8, rb.part_power)
self.assertEqual(8, rb.next_part_power)
self.assertEqual(rb.version, old_version + 2)
class TestGetRequiredOverload(unittest.TestCase):
maxDiff = None
def test_none_needed(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
# 4 equal-weight devs and 3 replicas: this can be balanced without
# resorting to overload at all
self.assertAlmostEqual(rb.get_required_overload(), 0)
expected = {
(0, 0, '127.0.0.1', 0): 0.75,
(0, 0, '127.0.0.1', 1): 0.75,
(0, 0, '127.0.0.1', 2): 0.75,
(0, 0, '127.0.0.1', 3): 0.75,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {
tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 4})
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 4})
# since no overload is needed, target_replicas is the same
rb.set_overload(0.10)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
# ... no matter how high you go!
rb.set_overload(100.0)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
# 3 equal-weight devs and 3 replicas: this can also be balanced
rb.remove_dev(3)
self.assertAlmostEqual(rb.get_required_overload(), 0)
expected = {
(0, 0, '127.0.0.1', 0): 1.0,
(0, 0, '127.0.0.1', 1): 1.0,
(0, 0, '127.0.0.1', 2): 1.0,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 4})
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 4})
# ... still no overload
rb.set_overload(100.0)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
def test_equal_replica_and_devices_count_ignore_weights(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 7.47,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 5.91,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 6.44,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
expected = {
0: 1.0,
1: 1.0,
2: 1.0,
}
# simplicity itself
self.assertEqual(expected, {
t[-1]: r for (t, r) in
rb._build_weighted_replicas_by_tier().items()
if len(t) == 4})
self.assertEqual(expected, {
t[-1]: r for (t, r) in
rb._build_wanted_replicas_by_tier().items()
if len(t) == 4})
self.assertEqual(expected, {
t[-1]: r for (t, r) in
rb._build_target_replicas_by_tier().items()
if len(t) == 4})
# ... no overload required!
self.assertEqual(0, rb.get_required_overload())
rb.rebalance()
expected = {
0: 256,
1: 256,
2: 256,
}
self.assertEqual(expected, {d['id']: d['parts'] for d in
rb._iter_devs()})
def test_small_zone(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
expected = {
(0, 0): 1.0434782608695652,
(0, 1): 1.0434782608695652,
(0, 2): 0.9130434782608695,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 1.0,
(0, 1): 1.0,
(0, 2): 1.0,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# the device tier is interesting because one of the devices in zone
# two has a different weight
expected = {
0: 0.5217391304347826,
1: 0.5217391304347826,
2: 0.5217391304347826,
3: 0.5217391304347826,
4: 0.5217391304347826,
5: 0.3913043478260869,
}
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 4})
# ... but, each pair of devices still needs to hold a whole
# replicanth; which we'll try distribute fairly among devices in
# zone 2, so that they can share the burden and ultimately the
# required overload will be as small as possible.
expected = {
0: 0.5,
1: 0.5,
2: 0.5,
3: 0.5,
4: 0.5714285714285715,
5: 0.42857142857142855,
}
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 4})
# full dispersion requires zone two's devices to eat more than
# they're weighted for
self.assertAlmostEqual(rb.get_required_overload(), 0.095238,
delta=1e-5)
# so... if we give it enough overload it we should get full dispersion
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
def test_multiple_small_zones(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 150,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 150,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'weight': 150,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
expected = {
(0, 0): 2.1052631578947367,
(0, 1): 0.47368421052631576,
(0, 2): 0.21052631578947367,
(0, 3): 0.21052631578947367,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
# without any overload, we get weight
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: r
for (tier, r) in target_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 1.0,
(0, 1): 1.0,
(0, 2): 0.49999999999999994,
(0, 3): 0.49999999999999994,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{t: r
for (t, r) in wanted_replicas.items()
if len(t) == 2})
self.assertEqual(1.3750000000000002, rb.get_required_overload())
# with enough overload we get the full dispersion
rb.set_overload(1.5)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: r
for (tier, r) in target_replicas.items()
if len(tier) == 2})
# with not enough overload, we get somewhere in the middle
rb.set_overload(1.0)
expected = {
(0, 0): 1.3014354066985647,
(0, 1): 0.8564593301435406,
(0, 2): 0.4210526315789473,
(0, 3): 0.4210526315789473,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: r
for (tier, r) in target_replicas.items()
if len(tier) == 2})
def test_big_zone(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdb'})
expected = {
(0, 0): 1.0714285714285714,
(0, 1): 0.6428571428571429,
(0, 2): 0.6428571428571429,
(0, 3): 0.6428571428571429,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 1.0,
(0, 1): 0.6666666666666667,
(0, 2): 0.6666666666666667,
(0, 3): 0.6666666666666667,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# when all the devices and servers in a zone are evenly weighted
# it will accurately proxy their required overload, all the
# zones besides 0 require the same overload
t = random.choice([t for t in weighted_replicas
if len(t) == 2
and t[1] != 0])
expected_overload = ((wanted_replicas[t] - weighted_replicas[t])
/ weighted_replicas[t])
self.assertAlmostEqual(rb.get_required_overload(),
expected_overload)
# but if you only give it out half of that
rb.set_overload(expected_overload / 2.0)
# ... you can expect it's not going to full disperse
expected = {
(0, 0): 1.0357142857142856,
(0, 1): 0.6547619047619049,
(0, 2): 0.6547619047619049,
(0, 3): 0.6547619047619049,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
def test_enormous_zone(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
expected = {
(0, 0): 2.542372881355932,
(0, 1): 0.15254237288135591,
(0, 2): 0.15254237288135591,
(0, 3): 0.15254237288135591,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 1.0,
(0, 1): 0.6666666666666667,
(0, 2): 0.6666666666666667,
(0, 3): 0.6666666666666667,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# ouch, those "tiny" devices need to hold 3x more than their
# weighted for!
self.assertAlmostEqual(rb.get_required_overload(), 3.370370,
delta=1e-5)
# let's get a little crazy, and let devices eat up to 1x more than
# their capacity is weighted for - see how far that gets us...
rb.set_overload(1)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
(0, 0): 2.084745762711864,
(0, 1): 0.30508474576271183,
(0, 2): 0.30508474576271183,
(0, 3): 0.30508474576271183,
}
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
def test_two_big_two_small(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 45,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 45,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 35,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 35,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
expected = {
(0, 0): 1.0714285714285714,
(0, 1): 1.0714285714285714,
(0, 2): 0.48214285714285715,
(0, 3): 0.375,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 1.0,
(0, 1): 1.0,
(0, 2): 0.5625,
(0, 3): 0.43749999999999994,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# I'm not sure it's significant or coincidental that the devices
# in zone 2 & 3 who end up splitting the 3rd replica turn out to
# need to eat ~1/6th extra replicanths
self.assertAlmostEqual(rb.get_required_overload(), 1.0 / 6.0)
# ... *so* 10% isn't *quite* enough
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
(0, 0): 1.0285714285714285,
(0, 1): 1.0285714285714285,
(0, 2): 0.5303571428571429,
(0, 3): 0.4125,
}
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
# ... but 20% will do the trick!
rb.set_overload(0.2)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
(0, 0): 1.0,
(0, 1): 1.0,
(0, 2): 0.5625,
(0, 3): 0.43749999999999994,
}
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
def test_multiple_replicas_each(self):
rb = ring.RingBuilder(8, 7, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
expected = {
(0, 0): 4.117647058823529,
(0, 1): 2.8823529411764706,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 4.0,
(0, 1): 3.0,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# I guess 2.88 => 3.0 is about a 4% increase
self.assertAlmostEqual(rb.get_required_overload(),
0.040816326530612256)
# ... 10% is plenty enough here
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
def test_small_extra_server_in_zone_with_multiple_replicas(self):
rb = ring.RingBuilder(8, 5, 1)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 1000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 1000})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdc', 'weight': 1000})
# z1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 1000})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 1000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdc', 'weight': 1000})
# z1 - extra small server
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 50})
expected = {
(0, 0): 2.479338842975207,
(0, 1): 2.5206611570247937,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t: r for (t, r) in
weighted_replicas.items()
if len(t) == 2})
# dispersion is fine with this at the zone tier
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected, {t: r for (t, r) in
wanted_replicas.items()
if len(t) == 2})
# ... but not ok with that tiny server
expected = {
'127.0.0.1': 2.479338842975207,
'127.0.0.2': 1.5206611570247937,
'127.0.0.3': 1.0,
}
self.assertEqual(expected, {t[-1]: r for (t, r) in
wanted_replicas.items()
if len(t) == 3})
self.assertAlmostEqual(23.2, rb.get_required_overload())
def test_multiple_replicas_in_zone_with_single_device(self):
rb = ring.RingBuilder(8, 5, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 100})
# z1
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sdb', 'weight': 100})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sdc', 'weight': 100})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sdd', 'weight': 100})
# first things first, make sure we do this right
rb.rebalance()
# each device get's a sing replica of every part
expected = {
0: 256,
1: 256,
2: 256,
3: 256,
4: 256,
}
self.assertEqual(expected, {d['id']: d['parts']
for d in rb._iter_devs()})
# but let's make sure we're thinking about it right too
expected = {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
4: 1.0,
}
# by weight everyone is equal
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
weighted_replicas.items()
if len(t) == 4})
# wanted might have liked to have fewer replicas in z1, but the
# single device in z0 limits us one replica per device
with rb.debug():
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
wanted_replicas.items()
if len(t) == 4})
# even with some overload - still one replica per device
rb.set_overload(1.0)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# when overload can not change the outcome none is required
self.assertEqual(0.0, rb.get_required_overload())
# even though dispersion is terrible (in z1 particularly)
self.assertEqual(20.0, rb.dispersion)
def test_one_big_guy_does_not_spoil_his_buddy(self):
rb = ring.RingBuilder(8, 3, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 100})
# z1
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sda', 'weight': 100})
# z2
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.2.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.2',
'port': 6200, 'device': 'sda', 'weight': 10000})
# obviously d5 gets one whole replica; the other two replicas
# are split evenly among the five other devices
# (i.e. ~0.4 replicanths for each 100 units of weight)
expected = {
0: 0.39999999999999997,
1: 0.39999999999999997,
2: 0.39999999999999997,
3: 0.39999999999999997,
4: 0.39999999999999997,
5: 1.0,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
weighted_replicas.items()
if len(t) == 4})
# with no overload we get the "balanced" placement
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# but in reality, these devices having such disparate weights
# leads to a *terrible* balance even w/o overload!
rb.rebalance(seed=9)
self.assertEqual(rb.get_balance(), 1308.2031249999998)
# even though part assignment is pretty reasonable
expected = {
0: 103,
1: 102,
2: 103,
3: 102,
4: 102,
5: 256,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# so whats happening is the small devices are holding *way* more
# *real* parts than their *relative* portion of the weight would
# like them too!
expected = {
0: 1308.2031249999998,
1: 1294.5312499999998,
2: 1308.2031249999998,
3: 1294.5312499999998,
4: 1294.5312499999998,
5: -65.0,
}
self.assertEqual(expected, rb._build_balance_per_dev())
# increasing overload moves towards one replica in each tier
rb.set_overload(0.20)
expected = {
0: 0.48,
1: 0.48,
2: 0.48,
3: 0.48,
4: 0.30857142857142855,
5: 0.7714285714285714,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# ... and as always increasing overload makes balance *worse*
rb.rebalance(seed=17)
self.assertEqual(rb.get_balance(), 1581.6406249999998)
# but despite the overall trend toward imbalance, in the tier with the
# huge device, we want to see the small device (d4) try to shed parts
# as effectively as it can to the huge device in the same tier (d5)
# this is a useful behavior anytime when for whatever reason a device
# w/i a tier wants parts from another device already in the same tier
# another example is `test_one_small_guy_does_not_spoil_his_buddy`
expected = {
0: 123,
1: 123,
2: 123,
3: 123,
4: 79,
5: 197,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# *see*, at least *someones* balance is getting better!
expected = {
0: 1581.6406249999998,
1: 1581.6406249999998,
2: 1581.6406249999998,
3: 1581.6406249999998,
4: 980.078125,
5: -73.06640625,
}
self.assertEqual(expected, rb._build_balance_per_dev())
def test_one_small_guy_does_not_spoil_his_buddy(self):
rb = ring.RingBuilder(8, 3, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 10000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 10000})
# z1
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 10000})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sda', 'weight': 10000})
# z2
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.2.1',
'port': 6200, 'device': 'sda', 'weight': 10000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.2',
'port': 6200, 'device': 'sda', 'weight': 100})
# it's almost like 3.0 / 5 ~= 0.6, but that one little guy get's
# his fair share
expected = {
0: 0.5988023952095808,
1: 0.5988023952095808,
2: 0.5988023952095808,
3: 0.5988023952095808,
4: 0.5988023952095808,
5: 0.005988023952095809,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
weighted_replicas.items()
if len(t) == 4})
# with no overload we get a nice balanced placement
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
rb.rebalance(seed=9)
# part placement looks goods
expected = {
0: 154,
1: 153,
2: 153,
3: 153,
4: 153,
5: 2,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# ... balance is a little lumpy on the small guy since he wants
# one and a half parts :\
expected = {
0: 0.4609375000000142,
1: -0.1914062499999858,
2: -0.1914062499999858,
3: -0.1914062499999858,
4: -0.1914062499999858,
5: 30.46875,
}
self.assertEqual(expected, rb._build_balance_per_dev())
self.assertEqual(rb.get_balance(), 30.46875)
# increasing overload moves towards one replica in each tier
rb.set_overload(0.3)
expected = {
0: 0.553443113772455,
1: 0.553443113772455,
2: 0.553443113772455,
3: 0.553443113772455,
4: 0.778443113772455,
5: 0.007784431137724551,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# ... and as always increasing overload makes balance *worse*
rb.rebalance(seed=12)
self.assertEqual(rb.get_balance(), 30.46875)
# the little guy it really struggling to take his share tho
expected = {
0: 142,
1: 141,
2: 142,
3: 141,
4: 200,
5: 2,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# ... and you can see it in the balance!
expected = {
0: -7.367187499999986,
1: -8.019531249999986,
2: -7.367187499999986,
3: -8.019531249999986,
4: 30.46875,
5: 30.46875,
}
self.assertEqual(expected, rb._build_balance_per_dev())
rb.set_overload(0.5)
expected = {
0: 0.5232035928143712,
1: 0.5232035928143712,
2: 0.5232035928143712,
3: 0.5232035928143712,
4: 0.8982035928143712,
5: 0.008982035928143714,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# because the device is so small, balance get's bad quick
rb.rebalance(seed=17)
self.assertEqual(rb.get_balance(), 95.703125)
# but despite the overall trend toward imbalance, the little guy
# isn't really taking on many new parts!
expected = {
0: 134,
1: 134,
2: 134,
3: 133,
4: 230,
5: 3,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# *see*, at everyone's balance is getting worse *together*!
expected = {
0: -12.585937499999986,
1: -12.585937499999986,
2: -12.585937499999986,
3: -13.238281249999986,
4: 50.0390625,
5: 95.703125,
}
self.assertEqual(expected, rb._build_balance_per_dev())
def test_two_servers_with_more_than_one_replica(self):
rb = ring.RingBuilder(8, 3, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 60})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 60})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 60})
# z1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 80})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sda', 'weight': 128})
# z2
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.1',
'port': 6200, 'device': 'sda', 'weight': 80})
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'ip': '127.0.2.2',
'port': 6200, 'device': 'sda', 'weight': 240})
rb.set_overload(0.1)
rb.rebalance()
self.assertEqual(12.161458333333343, rb.get_balance())
replica_plan = rb._build_target_replicas_by_tier()
for dev in rb._iter_devs():
tier = (dev['region'], dev['zone'], dev['ip'], dev['id'])
expected_parts = replica_plan[tier] * rb.parts
self.assertAlmostEqual(dev['parts'], expected_parts,
delta=1)
def test_multi_zone_with_failed_device(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 2000})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 2000})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sdb', 'weight': 2000})
# sanity, balanced and dispersed
expected = {
(0, 0): 1.0,
(0, 1): 1.0,
(0, 2): 1.0,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
self.assertEqual(rb.get_required_overload(), 0.0)
# fail a device in zone 2
rb.remove_dev(4)
expected = {
0: 0.6,
1: 0.6,
2: 0.6,
3: 0.6,
5: 0.6,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 4})
expected = {
0: 0.5,
1: 0.5,
2: 0.5,
3: 0.5,
5: 1.0,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 4})
# does this make sense? every zone was holding 1/3rd of the
# replicas, so each device was 1/6th, remove a device and
# suddenly it's holding *both* sixths which is 2/3rds?
self.assertAlmostEqual(rb.get_required_overload(), 2.0 / 3.0)
# 10% isn't nearly enough
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
0: 0.585,
1: 0.585,
2: 0.585,
3: 0.585,
5: 0.6599999999999999,
}
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
# 50% isn't even enough
rb.set_overload(0.5)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
0: 0.525,
1: 0.525,
2: 0.525,
3: 0.525,
5: 0.8999999999999999,
}
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
# even 65% isn't enough (but it's getting closer)
rb.set_overload(0.65)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
0: 0.5025000000000001,
1: 0.5025000000000001,
2: 0.5025000000000001,
3: 0.5025000000000001,
5: 0.99,
}
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
def test_balanced_zones_unbalanced_servers(self):
rb = ring.RingBuilder(8, 3, 1)
# zone 0 server 127.0.0.1
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 3000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 3000})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 3000})
# zone 1 server 127.0.0.2
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 4000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 4000})
# zone 1 (again) server 127.0.0.3
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 1000})
weighted_replicas = rb._build_weighted_replicas_by_tier()
# zones are evenly weighted
expected = {
(0, 0): 1.5,
(0, 1): 1.5,
}
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
# ... but servers are not
expected = {
'127.0.0.1': 1.5,
'127.0.0.2': 1.3333333333333333,
'127.0.0.3': 0.16666666666666666,
}
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 3})
# make sure wanted will even it out
expected = {
'127.0.0.1': 1.5,
'127.0.0.2': 1.0,
'127.0.0.3': 0.4999999999999999,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 3})
# so it wants 1/6th and eats 1/2 - that's 2/6ths more than it
# wants which is a 200% increase
self.assertAlmostEqual(rb.get_required_overload(), 2.0)
# the overload doesn't effect the tiers that are already dispersed
rb.set_overload(1)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
'127.0.0.1': 1.5,
# notice with half the overload 1/6th replicanth swapped servers
'127.0.0.2': 1.1666666666666665,
'127.0.0.3': 0.3333333333333333,
}
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 3})
def test_adding_second_zone(self):
rb = ring.RingBuilder(3, 3, 1)
# zone 0 server 127.0.0.1
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 2000})
# zone 0 server 127.0.0.2
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 2000})
# zone 0 server 127.0.0.3
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sdb', 'weight': 2000})
# sanity, balanced and dispersed
expected = {
'127.0.0.1': 1.0,
'127.0.0.2': 1.0,
'127.0.0.3': 1.0,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 3})
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 3})
self.assertEqual(rb.get_required_overload(), 0)
# start adding a second zone
# zone 1 server 127.0.1.1
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sdb', 'weight': 100})
# zone 1 server 127.0.1.2
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sdb', 'weight': 100})
# zone 1 server 127.0.1.3
rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'ip': '127.0.1.3',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 11, 'region': 0, 'zone': 1, 'ip': '127.0.1.3',
'port': 6200, 'device': 'sdb', 'weight': 100})
# this messes things up pretty royally
expected = {
'127.0.0.1': 0.9523809523809523,
'127.0.0.2': 0.9523809523809523,
'127.0.0.3': 0.9523809523809523,
'127.0.1.1': 0.047619047619047616,
'127.0.1.2': 0.047619047619047616,
'127.0.1.3': 0.047619047619047616,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 3})
expected = {
'127.0.0.1': 0.6666666666666667,
'127.0.0.2': 0.6666666666666667,
'127.0.0.3': 0.6666666666666667,
'127.0.1.1': 0.3333333333333333,
'127.0.1.2': 0.3333333333333333,
'127.0.1.3': 0.3333333333333333,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 3})
# so dispersion would require these devices hold 6x more than
# prescribed by weight, defeating any attempt at gradually
# anything
self.assertAlmostEqual(rb.get_required_overload(), 6.0)
# so let's suppose we only allow for 10% overload
rb.set_overload(0.10)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
# we expect servers in zone 0 to be between 0.952 and 0.666
'127.0.0.1': 0.9476190476190476,
'127.0.0.2': 0.9476190476190476,
'127.0.0.3': 0.9476190476190476,
# we expect servers in zone 1 to be between 0.0476 and 0.333
# and in fact its ~10% increase (very little compared to 6x!)
'127.0.1.1': 0.052380952380952375,
'127.0.1.2': 0.052380952380952375,
'127.0.1.3': 0.052380952380952375,
}
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 3})
def test_gradual_replica_count(self):
rb = ring.RingBuilder(3, 2.5, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 2000})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 2000})
expected = {
0: 0.625,
1: 0.625,
2: 0.625,
3: 0.625,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {
tier[3]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 4})
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected, {
tier[3]: wanted
for (tier, wanted) in wanted_replicas.items()
if len(tier) == 4})
self.assertEqual(rb.get_required_overload(), 0)
# server 127.0.0.2 will have only one device
rb.remove_dev(2)
# server 127.0.0.1 has twice the capacity of 127.0.0.2
expected = {
'127.0.0.1': 1.6666666666666667,
'127.0.0.2': 0.8333333333333334,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {
tier[2]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 3})
# dispersion requirements extend only to whole replicas
expected = {
'127.0.0.1': 1.4999999999999998,
'127.0.0.2': 1.0,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected, {
tier[2]: wanted
for (tier, wanted) in wanted_replicas.items()
if len(tier) == 3})
# 5/6ths to a whole replicanth is a 20% increase
self.assertAlmostEqual(rb.get_required_overload(), 0.2)
# so let's suppose we only allow for 10% overload
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
'127.0.0.1': 1.5833333333333333,
'127.0.0.2': 0.9166666666666667,
}
self.assertEqual(expected, {
tier[2]: wanted
for (tier, wanted) in target_replicas.items()
if len(tier) == 3})
def test_perfect_four_zone_four_replica_bad_placement(self):
rb = ring.RingBuilder(4, 4, 1)
# this weight is sorta nuts, but it's really just to help the
# weight_of_one_part hit a magic number where floats mess up
# like they would on ring with a part power of 19 and 100's of
# 1000's of units of weight.
weight = 21739130434795e-11
# r0z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': weight,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': weight,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
# r0z1
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': weight,
'ip': '127.0.1.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': weight,
'ip': '127.0.1.2', 'port': 10000, 'device': 'sdb'})
# r1z0
rb.add_dev({'id': 4, 'region': 1, 'zone': 0, 'weight': weight,
'ip': '127.1.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 1, 'zone': 0, 'weight': weight,
'ip': '127.1.0.2', 'port': 10000, 'device': 'sdb'})
# r1z1
rb.add_dev({'id': 6, 'region': 1, 'zone': 1, 'weight': weight,
'ip': '127.1.1.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 1, 'zone': 1, 'weight': weight,
'ip': '127.1.1.2', 'port': 10000, 'device': 'sdb'})
# the replica plan is sound
expectations = {
# tier_len => expected replicas
1: {
(0,): 2.0,
(1,): 2.0,
},
2: {
(0, 0): 1.0,
(0, 1): 1.0,
(1, 0): 1.0,
(1, 1): 1.0,
}
}
wr = rb._build_replica_plan()
for tier_len, expected in expectations.items():
self.assertEqual(expected, {t: r['max'] for (t, r) in
wr.items() if len(t) == tier_len})
# even thought a naive ceil of weights is surprisingly wrong
expectations = {
# tier_len => expected replicas
1: {
(0,): 3.0,
(1,): 3.0,
},
2: {
(0, 0): 2.0,
(0, 1): 2.0,
(1, 0): 2.0,
(1, 1): 2.0,
}
}
wr = rb._build_weighted_replicas_by_tier()
for tier_len, expected in expectations.items():
self.assertEqual(expected, {t: ceil(r) for (t, r) in
wr.items() if len(t) == tier_len})
class TestRingBuilderDispersion(unittest.TestCase):
def setUp(self):
self.devs = ('d%s' % i for i in itertools.count())
def assertAlmostPartCount(self, counts, expected, delta=3):
msgs = []
failed = False
for k, p in sorted(expected.items()):
try:
self.assertAlmostEqual(counts[k], p, delta=delta)
except KeyError:
self.fail('%r is missing the key %r' % (counts, k))
except AssertionError:
failed = True
state = '!='
else:
state = 'ok'
msgs.append('parts in %s was %s expected %s (%s)' % (
k, counts[k], p, state))
if failed:
self.fail('some part counts not close enough '
'to expected:\n' + '\n'.join(msgs))
def test_rebalance_dispersion(self):
rb = ring.RingBuilder(8, 6, 0)
for i in range(6):
rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6000, 'weight': 1.0,
'device': next(self.devs)})
rb.rebalance()
self.assertEqual(0, rb.dispersion)
for z in range(2):
for i in range(6):
rb.add_dev({'region': 0, 'zone': z + 1, 'ip': '127.0.1.1',
'port': 6000, 'weight': 1.0,
'device': next(self.devs)})
self.assertAlmostPartCount(_partition_counts(rb, 'zone'),
{0: 1536, 1: 0, 2: 0})
rb.rebalance()
self.assertEqual(rb.dispersion, 50.0)
expected = {0: 1280, 1: 128, 2: 128}
self.assertAlmostPartCount(_partition_counts(rb, 'zone'),
expected)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+$', verbose=True)['graph'])
counts = {int(k.split('z')[1]): d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected)
rb.rebalance()
self.assertEqual(rb.dispersion, 33.333333333333336)
expected = {0: 1024, 1: 256, 2: 256}
self.assertAlmostPartCount(_partition_counts(rb, 'zone'),
expected)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+$', verbose=True)['graph'])
counts = {int(k.split('z')[1]): d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected)
rb.rebalance()
self.assertEqual(rb.dispersion, 16.666666666666668)
expected = {0: 768, 1: 384, 2: 384}
self.assertAlmostPartCount(_partition_counts(rb, 'zone'),
expected)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+$', verbose=True)['graph'])
counts = {int(k.split('z')[1]): d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected)
rb.rebalance()
self.assertEqual(0, rb.dispersion)
expected = {0: 512, 1: 512, 2: 512}
self.assertAlmostPartCount(_partition_counts(rb, 'zone'), expected)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+$', verbose=True)['graph'])
counts = {int(k.split('z')[1]): d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected)
def test_weight_dispersion(self):
rb = ring.RingBuilder(8, 3, 0)
for i in range(2):
for d in range(3):
rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.%s.1' % i,
'port': 6000, 'weight': 1.0,
'device': next(self.devs)})
for d in range(3):
rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.2.1',
'port': 6000, 'weight': 10.0,
'device': next(self.devs)})
rb.rebalance()
# each tier should only have 1 replicanth, but the big server has 2
# replicas of every part and 3 replicas another 1/2 - so our total
# dispersion is greater than one replicanth, it's 1.5
self.assertEqual(50.0, rb.dispersion)
expected = {
'127.0.0.1': 64,
'127.0.1.1': 64,
'127.0.2.1': 640,
}
self.assertAlmostPartCount(_partition_counts(rb, 'ip'),
expected)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+-[^/]*$', verbose=True)['graph'])
counts = {k.split('-')[1]: d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected)
def test_multiple_tier_dispersion(self):
rb = ring.RingBuilder(10, 8, 0)
r_z_to_ip_count = {
(0, 0): 2,
(1, 1): 1,
(1, 2): 2,
}
ip_index = 0
for (r, z), ip_count in sorted(r_z_to_ip_count.items()):
for i in range(ip_count):
ip_index += 1
for d in range(3):
rb.add_dev({'region': r, 'zone': z,
'ip': '127.%s.%s.%s' % (r, z, ip_index),
'port': 6000, 'weight': 1.0,
'device': next(self.devs)})
for i in range(3):
# it might take a few rebalances for all the right part replicas to
# balance from r1z2 into r1z1
rb.rebalance()
self.assertAlmostEqual(15.52734375, rb.dispersion, delta=5.0)
self.assertAlmostEqual(0.0, rb.get_balance(), delta=0.5)
expected = {
'127.0.0.1': 1638,
'127.0.0.2': 1638,
'127.1.1.3': 1638,
'127.1.2.4': 1638,
'127.1.2.5': 1638,
}
delta = 10
self.assertAlmostPartCount(_partition_counts(rb, 'ip'), expected,
delta=delta)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+-[^/]*$', verbose=True)['graph'])
counts = {k.split('-')[1]: d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected, delta=delta)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,144,215,153,188,027,000 | 42.513233 | 79 | 0.482992 | false |
komsas/OpenUpgrade | addons/sale_crm/__init__.py | 353 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import sale_crm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -953,248,329,403,408,800 | 42.04 | 78 | 0.6171 | false |
mattvick/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/filenamepattern_unittest.py | 124 | 2557 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import unittest2 as unittest
from webkitpy.common.watchlist.filenamepattern import FilenamePattern
class FileNamePatternTest(unittest.TestCase):
def test_filename_pattern_literal(self):
filename_pattern = FilenamePattern(re.compile(r'MyFileName\.cpp'))
# Note the follow filenames are not regex.
self.assertTrue(filename_pattern.match('MyFileName.cpp', None))
self.assertTrue(filename_pattern.match('MyFileName.cppa', None))
self.assertFalse(filename_pattern.match('aMyFileName.cpp', None))
self.assertFalse(filename_pattern.match('MyFileNamebcpp', None))
def test_filename_pattern_substring(self):
filename_pattern = FilenamePattern(re.compile(r'.*\\MyFileName\..*'))
# Note the follow filenames are not regex.
self.assertTrue(filename_pattern.match(r'\\MyFileName.cpp', None))
self.assertTrue(filename_pattern.match(r'a\\MyFileName.h', None))
self.assertFalse(filename_pattern.match(r'\\aMyFileName.cpp', None))
| bsd-3-clause | -7,268,910,329,784,358,000 | 48.173077 | 77 | 0.757137 | false |
tudorvio/nova | nova/api/openstack/compute/plugins/v3/deferred_delete.py | 36 | 2841 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The deferred instance delete extension."""
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
ALIAS = 'os-deferred-delete'
authorize = extensions.os_compute_authorizer(ALIAS)
class DeferredDeleteController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(DeferredDeleteController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
@wsgi.response(202)
@extensions.expected_errors((404, 409, 403))
@wsgi.action('restore')
def _restore(self, req, id, body):
"""Restore a previously deleted instance."""
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.restore(context, instance)
except exception.QuotaError as error:
raise webob.exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'restore', id)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('forceDelete')
def _force_delete(self, req, id, body):
"""Force delete of instance before deferred cleanup."""
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.force_delete(context, instance)
except exception.InstanceIsLocked as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
class DeferredDelete(extensions.V3APIExtensionBase):
"""Instance deferred delete."""
name = "DeferredDelete"
alias = "os-deferred-delete"
version = 1
def get_controller_extensions(self):
controller = DeferredDeleteController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 | -7,896,704,534,790,792,000 | 35.423077 | 79 | 0.687786 | false |
JASON0916/testrail-library | testrail_client/api/plan.py | 1 | 6164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from base import TestRailAPIBase
class Plan(TestRailAPIBase):
"""
Use the following API methods to request details
about test plans and to create or modify test plans.
"""
def __repr__(self):
return '<TestRailAPI plan>'
def get(self, plan_id):
"""
Returns an existing test plan.
:param plan_id:The ID of the test plan
"""
return self._get('get_plan/{}'.format(plan_id))
def for_project(self, project_id, **filters):
"""
Returns a list of test plans for a project.
:param project_id:The ID of the project
:param filters: dict, request filters
"""
return self._get('get_plans/{}'.format(project_id),
params=filters)
def add(self, project_id, name='', description='',
milestone_id=None, entries=list()):
"""
Creates a new test plan.
:param project_id:The ID of the project the test plan should be added to
:param name:The name of the test plan (required)
:param description:The description of the test plan
:param milestone_id:The ID of the milestone to link to the test plan
:param entries:An array of objects describing the test runs of the plan
"""
param = dict(name=name, description=description,
milestone_id=milestone_id, entries=entries)
return self._post('add_plan/{}'.format(project_id),
json=param)
def add_entry(self, plan_id, suite_id, name='', description='',
assignedto_id=None, include_all=True, case_ids=list(),
config_ids=list(), runs=list()):
"""
Adds one or more new test runs to a test plan.
:param plan_id:The ID of the plan the test runs should be added to
:param suite_id:The ID of the test suite for the test run(s) (required)
:param name:The name of the test run(s)
:param description:The description of the test run(s) (requires TestRail 5.2 or later)
:param assignedto_id:The ID of the user the test run(s) should be assigned to
:param include_all:True for including all test cases of the test suite
and false for a custom case selection
:param case_ids:An array of case IDs for the custom case selection
:param config_ids:An array of configuration IDs used for the test runs
of the test plan entry (requires TestRail 3.1 or later)
:param runs:An array of test runs with configurations,
please see the example below for details (requires TestRail 3.1 or later)
"""
param = dict(suite_id=suite_id, name=name, description=description,
assignedto_id=assignedto_id, include_all=include_all, case_ids=case_ids,
config_ids=config_ids, runs=runs)
return self._post('add_plan_entry/{}'.format(plan_id),
json=param)
def update(self, plan_id, name='', description='',
milestone_id=None, entries=list()):
"""
Updates an existing test plan (partial updates are supported,
i.e. you can submit and update specific fields only).
:param plan_id:The ID of the test plan
:param name:The name of the test plan (required)
:param description:The description of the test plan
:param milestone_id:The ID of the milestone to link to the test plan
:param entries:An array of objects describing the test runs of the plan
"""
param = dict(name=name, description=description,
milestone_id=milestone_id, entries=entries)
return self._post('update_plan/{}', format(plan_id),
json=param)
def update_entry(self, plan_id, entry_id, name='',
description=None, assignedto_id=None,
include_all=True, case_ids=list()):
"""
Updates one or more existing test runs in a plan
(partial updates are supported, i.e. you can submit and update specific fields only).
:param plan_id: The ID of the test plan
:param entry_id:The ID of the test plan entry (note: not the test run ID)
:param name:The name of the test run(s)
:param description:The description of the test run(s)
(requires TestRail 5.2 or later)
:param assignedto_id:The ID of the user the test run(s) should be assigned to
:param include_all:True for including all test cases of the test suite
and false for a custom case selection
:param case_ids:An array of case IDs for the custom case selection
"""
return self._post('update_plan_entry/{plan_id}/{entry_id}'
.format(plan_id=plan_id, entry_id=entry_id),
json=dict(name=name, description=description,
assignedto_id=assignedto_id,
include_all=include_all, case_ids=case_ids)
)
def close(self, plan_id):
"""
Closes an existing test plan and archives its test runs & results.
:param plan_id:The ID of the test plan
"""
return self._post('close_plan/{}'.format(plan_id))
def delete(self, plan_id):
"""
Deletes an existing test plan.
:param plan_id:The ID of the test plan
"""
return self._post('delete_plan/{}'.format(plan_id))
def delete_entry(self, plan_id, entry_id):
"""
Deletes one or more existing test runs from a plan.
:param plan_id:The ID of the test plan
:param entry_id:The ID of the test plan entry (note: not the test run ID)
"""
return self._post('delete_plan_entry/{plan_id}/{entry_id}'
.format(plan_id=plan_id,
entry_id=entry_id)
)
| mit | -6,673,403,322,345,974,000 | 45.69697 | 165 | 0.578034 | false |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/urbansim/configs/dplcm_zone_estimation_config.py | 2 | 1198 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from dplcm_estimation_config import dplcm_configuration as dplcm_gridcell_config
from estimation_zone_config import run_configuration as config
class dplcm_configuration(dplcm_gridcell_config):
def get_configuration(self):
run_configuration = config.copy()
dplcm_local_configuration = self.get_dplcm_configuration()
run_configuration.merge(dplcm_local_configuration)
return run_configuration
def get_dplcm_configuration(self):
run_configuration = dplcm_gridcell_config.get_dplcm_configuration(self)
run_configuration.merge(self.get_local_dplcm_configuration())
return run_configuration
def get_local_dplcm_configuration(self):
run_configuration = {}
run_configuration["models"] = [
{"%s_development_project_location_choice_model" % self.type: ["estimate"]}
]
run_configuration["datasets_to_preload"] = {
'zone':{},
'job':{},
'gridcell': {}
}
return run_configuration | gpl-2.0 | -462,067,657,238,776,450 | 38 | 86 | 0.649416 | false |
transt/cloud-init-0.7.5 | cloudinit/distros/parsers/hosts.py | 8 | 3241 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Joshua Harlow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from StringIO import StringIO
from cloudinit.distros.parsers import chop_comment
# See: man hosts
# or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts
# or http://tinyurl.com/6lmox3
class HostsConf(object):
def __init__(self, text):
self._text = text
self._contents = None
def parse(self):
if self._contents is None:
self._contents = self._parse(self._text)
def get_entry(self, ip):
self.parse()
options = []
for (line_type, components) in self._contents:
if line_type == 'option':
(pieces, _tail) = components
if len(pieces) and pieces[0] == ip:
options.append(pieces[1:])
return options
def del_entries(self, ip):
self.parse()
n_entries = []
for (line_type, components) in self._contents:
if line_type != 'option':
n_entries.append((line_type, components))
continue
else:
(pieces, _tail) = components
if len(pieces) and pieces[0] == ip:
pass
elif len(pieces):
n_entries.append((line_type, list(components)))
self._contents = n_entries
def add_entry(self, ip, canonical_hostname, *aliases):
self.parse()
self._contents.append(('option',
([ip, canonical_hostname] + list(aliases), '')))
def _parse(self, contents):
entries = []
for line in contents.splitlines():
if not len(line.strip()):
entries.append(('blank', [line]))
continue
(head, tail) = chop_comment(line.strip(), '#')
if not len(head):
entries.append(('all_comment', [line]))
continue
entries.append(('option', [head.split(None), tail]))
return entries
def __str__(self):
self.parse()
contents = StringIO()
for (line_type, components) in self._contents:
if line_type == 'blank':
contents.write("%s\n" % (components[0]))
elif line_type == 'all_comment':
contents.write("%s\n" % (components[0]))
elif line_type == 'option':
(pieces, tail) = components
pieces = [str(p) for p in pieces]
pieces = "\t".join(pieces)
contents.write("%s%s\n" % (pieces, tail))
return contents.getvalue()
| gpl-3.0 | -6,904,120,220,140,626,000 | 34.228261 | 78 | 0.553224 | false |
mrunge/horizon | openstack_dashboard/dashboards/project/data_processing/jobs/workflows/launch.py | 14 | 16820 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.api import sahara as saharaclient
import openstack_dashboard.dashboards.project.data_processing. \
cluster_templates.workflows.create as t_flows
import openstack_dashboard.dashboards.project.data_processing. \
clusters.workflows.create as c_flow
import openstack_dashboard.dashboards.project.data_processing. \
utils.workflow_helpers as whelpers
LOG = logging.getLogger(__name__)
DATA_SOURCE_CREATE_URL = ("horizon:project:data_processing.data_sources"
":create-data-source")
class JobExecutionGeneralConfigAction(workflows.Action):
job_input = forms.DynamicChoiceField(
label=_("Input"),
initial=(None, "None"),
add_item_link=DATA_SOURCE_CREATE_URL)
job_output = forms.DynamicChoiceField(
label=_("Output"),
initial=(None, "None"),
add_item_link=DATA_SOURCE_CREATE_URL)
def __init__(self, request, *args, **kwargs):
super(JobExecutionGeneralConfigAction, self).__init__(request,
*args,
**kwargs)
if request.REQUEST.get("job_id", None) is None:
self.fields["job"] = forms.ChoiceField(
label=_("Job"))
self.fields["job"].choices = self.populate_job_choices(request)
else:
self.fields["job"] = forms.CharField(
widget=forms.HiddenInput(),
initial=request.REQUEST.get("job_id", None))
def populate_job_input_choices(self, request, context):
return self.get_data_source_choices(request, context)
def populate_job_output_choices(self, request, context):
return self.get_data_source_choices(request, context)
def get_data_source_choices(self, request, context):
try:
data_sources = saharaclient.data_source_list(request)
except Exception:
data_sources = []
exceptions.handle(request,
_("Unable to fetch data sources."))
choices = [(data_source.id, data_source.name)
for data_source in data_sources]
choices.insert(0, (None, 'None'))
return choices
def populate_job_choices(self, request):
try:
jobs = saharaclient.job_list(request)
except Exception:
jobs = []
exceptions.handle(request,
_("Unable to fetch jobs."))
choices = [(job.id, job.name)
for job in jobs]
return choices
class Meta:
name = _("Job")
help_text_template = (
"project/data_processing.jobs/_launch_job_help.html")
class JobExecutionExistingGeneralConfigAction(JobExecutionGeneralConfigAction):
cluster = forms.ChoiceField(
label=_("Cluster"),
initial=(None, "None"),
widget=forms.Select(attrs={"class": "cluster_choice"}))
def populate_cluster_choices(self, request, context):
try:
clusters = saharaclient.cluster_list(request)
except Exception:
clusters = []
exceptions.handle(request,
_("Unable to fetch clusters."))
choices = [(cluster.id, cluster.name)
for cluster in clusters]
return choices
class Meta:
name = _("Job")
help_text_template = (
"project/data_processing.jobs/_launch_job_help.html")
class JobConfigAction(workflows.Action):
MAIN_CLASS = "edp.java.main_class"
JAVA_OPTS = "edp.java.java_opts"
EDP_MAPPER = "edp.streaming.mapper"
EDP_REDUCER = "edp.streaming.reducer"
EDP_PREFIX = "edp."
property_name = forms.ChoiceField(
required=False,
)
job_configs = forms.CharField(
required=False,
widget=forms.HiddenInput())
job_params = forms.CharField(
required=False,
widget=forms.HiddenInput())
job_args_array = forms.CharField(
required=False,
widget=forms.HiddenInput())
job_type = forms.CharField(
required=False,
widget=forms.HiddenInput())
main_class = forms.CharField(label=_("Main Class"),
required=False)
java_opts = forms.CharField(label=_("Java Opts"),
required=False)
streaming_mapper = forms.CharField(label=_("Mapper"))
streaming_reducer = forms.CharField(label=_("Reducer"))
def __init__(self, request, *args, **kwargs):
super(JobConfigAction, self).__init__(request, *args, **kwargs)
job_ex_id = request.REQUEST.get("job_execution_id")
if job_ex_id is not None:
job_ex_id = request.REQUEST.get("job_execution_id")
job_ex = saharaclient.job_execution_get(request, job_ex_id)
job_configs = job_ex.job_configs
edp_configs = {}
if 'configs' in job_configs:
configs, edp_configs = (
self.clean_edp_configs(job_configs['configs']))
self.fields['job_configs'].initial = (
json.dumps(configs))
if 'params' in job_configs:
self.fields['job_params'].initial = (
json.dumps(job_configs['params']))
job_args = json.dumps(job_configs['args'])
self.fields['job_args_array'].initial = job_args
if self.MAIN_CLASS in edp_configs:
self.fields['main_class'].initial = (
edp_configs[self.MAIN_CLASS])
if self.JAVA_OPTS in edp_configs:
self.fields['java_opts'].initial = (
edp_configs[self.JAVA_OPTS])
if self.EDP_MAPPER in edp_configs:
self.fields['streaming_mapper'].initial = (
edp_configs[self.EDP_MAPPER])
if self.EDP_REDUCER in edp_configs:
self.fields['streaming_reducer'].initial = (
edp_configs[self.EDP_REDUCER])
def clean(self):
cleaned_data = super(workflows.Action, self).clean()
job_type = cleaned_data.get("job_type", None)
if job_type != "MapReduce.Streaming":
if "streaming_mapper" in self._errors:
del self._errors["streaming_mapper"]
if "streaming_reducer" in self._errors:
del self._errors["streaming_reducer"]
return cleaned_data
def populate_property_name_choices(self, request, context):
job_id = request.REQUEST.get("job_id") or request.REQUEST.get("job")
job_type = saharaclient.job_get(request, job_id).type
job_configs = (
saharaclient.job_get_configs(request, job_type).job_config)
choices = [(param['value'], param['name'])
for param in job_configs['configs']]
return choices
def clean_edp_configs(self, configs):
edp_configs = {}
for key, value in configs.iteritems():
if key.startswith(self.EDP_PREFIX):
edp_configs[key] = value
for rmkey in edp_configs.keys():
del configs[rmkey]
return (configs, edp_configs)
class Meta:
name = _("Configure")
help_text_template = (
"project/data_processing.jobs/_launch_job_configure_help.html")
class JobExecutionGeneralConfig(workflows.Step):
action_class = JobExecutionGeneralConfigAction
def contribute(self, data, context):
for k, v in data.items():
if k in ["job_input", "job_output"]:
context["job_general_" + k] = None if v == "None" else v
else:
context["job_general_" + k] = v
return context
class JobExecutionExistingGeneralConfig(workflows.Step):
action_class = JobExecutionExistingGeneralConfigAction
def contribute(self, data, context):
for k, v in data.items():
if k in ["job_input", "job_output"]:
context["job_general_" + k] = None if v == "None" else v
else:
context["job_general_" + k] = v
return context
class JobConfig(workflows.Step):
action_class = JobConfigAction
template_name = 'project/data_processing.jobs/config_template.html'
def contribute(self, data, context):
job_config = self.clean_configs(
json.loads(data.get("job_configs", '{}')))
job_params = self.clean_configs(
json.loads(data.get("job_params", '{}')))
job_args_array = self.clean_configs(
json.loads(data.get("job_args_array", '[]')))
job_type = data.get("job_type", '')
context["job_type"] = job_type
context["job_config"] = {"configs": job_config}
context["job_config"]["args"] = job_args_array
if job_type in ["Java", "Spark"]:
context["job_config"]["configs"][JobConfigAction.MAIN_CLASS] = (
data.get("main_class", ""))
context["job_config"]["configs"][JobConfigAction.JAVA_OPTS] = (
data.get("java_opts", ""))
elif job_type == "MapReduce.Streaming":
context["job_config"]["configs"][JobConfigAction.EDP_MAPPER] = (
data.get("streaming_mapper", ""))
context["job_config"]["configs"][JobConfigAction.EDP_REDUCER] = (
data.get("streaming_reducer", ""))
else:
context["job_config"]["params"] = job_params
return context
@staticmethod
def clean_configs(configs):
cleaned_conf = None
if isinstance(configs, dict):
cleaned_conf = dict([(k.strip(), v.strip())
for k, v in configs.items()
if len(v.strip()) > 0 and len(k.strip()) > 0])
elif isinstance(configs, list):
cleaned_conf = list([v.strip() for v in configs
if len(v.strip()) > 0])
return cleaned_conf
class NewClusterConfigAction(c_flow.GeneralConfigAction):
persist_cluster = forms.BooleanField(
label=_("Persist cluster after job exit"),
required=False)
class Meta:
name = _("Configure Cluster")
help_text_template = (
"project/data_processing.clusters/_configure_general_help.html")
class ClusterGeneralConfig(workflows.Step):
action_class = NewClusterConfigAction
contributes = ("hidden_configure_field", )
def contribute(self, data, context):
for k, v in data.items():
context["cluster_general_" + k] = v
return context
class LaunchJob(workflows.Workflow):
slug = "launch_job"
name = _("Launch Job")
finalize_button_name = _("Launch")
success_message = _("Job launched")
failure_message = _("Could not launch job")
success_url = "horizon:project:data_processing.job_executions:index"
default_steps = (JobExecutionExistingGeneralConfig, JobConfig)
def handle(self, request, context):
saharaclient.job_execution_create(
request,
context["job_general_job"],
context["job_general_cluster"],
context["job_general_job_input"],
context["job_general_job_output"],
context["job_config"])
return True
class SelectHadoopPluginAction(t_flows.SelectPluginAction):
def __init__(self, request, *args, **kwargs):
super(SelectHadoopPluginAction, self).__init__(request,
*args,
**kwargs)
self.fields["job_id"] = forms.ChoiceField(
label=_("Plugin name"),
initial=request.GET.get("job_id") or request.POST.get("job_id"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
self.fields["job_configs"] = forms.ChoiceField(
label=_("Job configs"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
self.fields["job_args"] = forms.ChoiceField(
label=_("Job args"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
self.fields["job_params"] = forms.ChoiceField(
label=_("Job params"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
job_ex_id = request.REQUEST.get("job_execution_id")
if job_ex_id is not None:
self.fields["job_execution_id"] = forms.ChoiceField(
label=_("Job Execution ID"),
initial=request.REQUEST.get("job_execution_id"),
widget=forms.HiddenInput(
attrs={"class": "hidden_create_field"}))
job_ex_id = request.REQUEST.get("job_execution_id")
job_configs = (
saharaclient.job_execution_get(request,
job_ex_id).job_configs)
if "configs" in job_configs:
self.fields["job_configs"].initial = (
json.dumps(job_configs["configs"]))
if "params" in job_configs:
self.fields["job_params"].initial = (
json.dumps(job_configs["params"]))
if "args" in job_configs:
self.fields["job_args"].initial = (
json.dumps(job_configs["args"]))
class Meta:
name = _("Select plugin and hadoop version for cluster")
help_text_template = ("project/data_processing.clusters/"
"_create_general_help.html")
class SelectHadoopPlugin(workflows.Step):
action_class = SelectHadoopPluginAction
class ChosePluginVersion(workflows.Workflow):
slug = "lunch_job"
name = _("Launch Job")
finalize_button_name = _("Create")
success_message = _("Created")
failure_message = _("Could not create")
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (SelectHadoopPlugin,)
class LaunchJobNewCluster(workflows.Workflow):
slug = "launch_job"
name = _("Launch Job")
finalize_button_name = _("Launch")
success_message = _("Job launched")
failure_message = _("Could not launch job")
success_url = "horizon:project:data_processing.jobs:index"
default_steps = (ClusterGeneralConfig,
JobExecutionGeneralConfig,
JobConfig)
def handle(self, request, context):
node_groups = None
plugin, hadoop_version = (
whelpers.get_plugin_and_hadoop_version(request))
ct_id = context["cluster_general_cluster_template"] or None
user_keypair = context["cluster_general_keypair"] or None
try:
cluster = saharaclient.cluster_create(
request,
context["cluster_general_cluster_name"],
plugin, hadoop_version,
cluster_template_id=ct_id,
default_image_id=context["cluster_general_image"],
description=context["cluster_general_description"],
node_groups=node_groups,
user_keypair_id=user_keypair,
is_transient=not(context["cluster_general_persist_cluster"]),
net_id=context.get(
"cluster_general_neutron_management_network",
None))
except Exception:
exceptions.handle(request,
_("Unable to create new cluster for job."))
return False
try:
saharaclient.job_execution_create(
request,
context["job_general_job"],
cluster.id,
context["job_general_job_input"],
context["job_general_job_output"],
context["job_config"])
except Exception:
exceptions.handle(request,
_("Unable to launch job."))
return False
return True
| apache-2.0 | -9,050,693,190,965,560,000 | 35.25 | 79 | 0.575565 | false |
apiaryio/snowcrash | tools/gyp/pylib/gyp/xcode_emulation.py | 3 | 66631 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_platform_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def IsBinaryOutputFormat(self, configname):
default = "binary" if self.isIOS else "xml"
format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
default)
return format == "binary"
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0 or self._IsXCTest()
def _IsXCTest(self):
return int(self.spec.get('mac_xctest_bundle', 0)) != 0
def _IsIosAppExtension(self):
return int(self.spec.get('ios_app_extension', 0)) != 0
def _IsIosWatchKitExtension(self):
return int(self.spec.get('ios_watchkit_extension', 0)) != 0
def _IsIosWatchApp(self):
return int(self.spec.get('ios_watch_app', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
return '.' + self.spec.get('product_extension', 'appex')
else:
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsIosAppExtension():
assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.app-extension'
if self._IsIosWatchKitExtension():
assert self._IsBundle(), ('ios_watchkit_extension flag requires '
'mac_bundle (target %s)' % self.spec['target_name'])
return 'com.apple.product-type.watchkit-extension'
if self._IsIosWatchApp():
assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.application.watchapp'
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcrun', '--sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _XcodePlatformPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root not in XcodeSettings._platform_path_cache:
platform_path = self._GetSdkVersionInfoItem(sdk_root,
'--show-sdk-platform-path')
XcodeSettings._platform_path_cache[sdk_root] = platform_path
return XcodeSettings._platform_path_cache[sdk_root]
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
# In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or
# llvm-gcc. It also requires a fairly recent libtool, and
# if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the
# path to the libLTO.dylib that matches the used clang.
if self._Test('LLVM_LTO', 'YES', default='NO'):
cflags.append('-flto')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
platform_root = self._XcodePlatformPath(configname)
if platform_root and self._IsXCTest():
cflags.append('-F' + platform_root + '/Developer/Library/Frameworks/')
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = r'(\S+)'
WORD = r'\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
platform_root = self._XcodePlatformPath(configname)
if sdk_root and platform_root and self._IsXCTest():
ldflags.append('-F' + platform_root + '/Developer/Library/Frameworks/')
ldflags.append('-framework XCTest')
is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
if sdk_root and is_extension:
# Adds the link flags for extensions. These flags are common for all
# extensions and provide loader and main function.
# These flags reflect the compilation options used by xcode to compile
# extensions.
if XcodeVersion() < '0900':
ldflags.append('-lpkstart')
ldflags.append(sdk_root +
'/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
else:
ldflags.append('-e _NSExtensionMain')
ldflags.append('-fapplication-extension')
self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if ((self.spec['type'] == 'loadable_module' or self._IsIosAppExtension())
and self._IsBundle()):
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and
(self.spec['type'] == 'executable' or self._IsXCTest())):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
# Xcode 7 started shipping with ".tbd" (text based stubs) files instead of
# ".dylib" without providing a real support for them. What it does, for
# "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the
# library order and cause collision when building Chrome.
#
# Instead substitude ".tbd" to ".dylib" in the generated project when the
# following conditions are both true:
# - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib",
# - the ".dylib" file does not exists but a ".tbd" file do.
library = l.replace('$(SDKROOT)', sdk_root)
if l.startswith('$(SDKROOT)'):
basename, ext = os.path.splitext(library)
if ext == '.dylib' and not os.path.exists(library):
tbd_library = basename + '.tbd'
if os.path.exists(tbd_library):
library = tbd_library
return library
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
compiler = self.xcode_settings[configname].get('GCC_VERSION')
if compiler is not None:
cache['DTCompiler'] = compiler
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
sdk_version = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-version')
cache['DTSDKName'] = sdk_root + (sdk_version or '')
if xcode >= '0720':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, '--show-sdk-build-version')
elif xcode >= '0430':
cache['DTSDKBuild'] = sdk_version
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['MinimumOSVersion'] = self.xcode_settings[configname].get(
'IPHONEOS_DEPLOYMENT_TARGET')
cache['DTPlatformName'] = sdk_root
cache['DTPlatformVersion'] = sdk_version
if configname.endswith("iphoneos"):
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
cache['DTPlatformBuild'] = cache['DTSDKBuild']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
# This is weird, but Xcode sets DTPlatformBuild to an empty field
# for simulator builds.
cache['DTPlatformBuild'] = ""
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError("xcodebuild returned unexpected results")
except:
version = CLTVersion()
if version:
version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError("No Xcode or CLT version detected!")
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = int(spec.get('mac_xctest_bundle', 0)) != 0 or \
(int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_FRAMEWORKS_DIR' : built_products_dir,
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
'XCODE_VERSION_ACTUAL' : XcodeVersion()[0],
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
configs[config_name + '-iphonesimulator'] = config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
| mit | -8,256,173,839,986,919,000 | 39.042668 | 191 | 0.662379 | false |
ewindisch/nova | nova/tests/api/openstack/compute/plugins/v3/test_user_data.py | 17 | 8671 | # Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import datetime
import uuid
from oslo.config import cfg
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import servers
from nova.api.openstack.compute.plugins.v3 import user_data
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova.network import manager
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-user-data',
'osapi_v3')
self.no_user_data_controller = servers.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
user_data.ATTRIBUTE_NAME: None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2)
if no_image:
server.pop('image_ref', None)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
if override_controller:
server = override_controller.create(req, body=body).obj['server']
else:
server = self.controller.create(req, body=body).obj['server']
def test_create_instance_with_user_data_disabled(self):
params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('user_data', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(
params,
override_controller=self.no_user_data_controller)
def test_create_instance_with_user_data_enabled(self):
params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIn('user_data', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_user_data(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/flavors/3'
value = "A random string"
body = {
'server': {
'name': 'user_data_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
user_data.ATTRIBUTE_NAME: base64.b64encode(value),
},
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_with_bad_user_data(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/flavors/3'
value = "A random string"
body = {
'server': {
'name': 'user_data_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
user_data.ATTRIBUTE_NAME: value,
},
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body=body)
| apache-2.0 | 2,905,124,806,532,589,000 | 36.375 | 78 | 0.586092 | false |
imsparsh/python-for-android | python-build/python-libs/gdata/build/lib/gdata/docs/service.py | 133 | 22521 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DocsService extends the GDataService to streamline Google Documents
operations.
DocsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DocumentQuery: Queries a Google Document list feed.
DocumentAclQuery: Queries a Google Document Acl feed.
"""
__author__ = ('api.jfisher (Jeff Fisher), '
'e.bidelman (Eric Bidelman)')
import re
import atom
import gdata.service
import gdata.docs
import urllib
# XML Namespaces used in Google Documents entities.
DATA_KIND_SCHEME = gdata.GDATA_NAMESPACE + '#kind'
DOCUMENT_LABEL = 'document'
SPREADSHEET_LABEL = 'spreadsheet'
PRESENTATION_LABEL = 'presentation'
FOLDER_LABEL = 'folder'
PDF_LABEL = 'pdf'
LABEL_SCHEME = gdata.GDATA_NAMESPACE + '/labels'
STARRED_LABEL_TERM = LABEL_SCHEME + '#starred'
TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed'
HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden'
MINE_LABEL_TERM = LABEL_SCHEME + '#mine'
PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private'
SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain'
VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed'
FOLDERS_SCHEME_PREFIX = gdata.docs.DOCUMENTS_NAMESPACE + '/folders/'
# File extensions of documents that are permitted to be uploaded or downloaded.
SUPPORTED_FILETYPES = {
'CSV': 'text/csv',
'TSV': 'text/tab-separated-values',
'TAB': 'text/tab-separated-values',
'DOC': 'application/msword',
'DOCX': ('application/vnd.openxmlformats-officedocument.'
'wordprocessingml.document'),
'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet',
'ODT': 'application/vnd.oasis.opendocument.text',
'RTF': 'application/rtf',
'SXW': 'application/vnd.sun.xml.writer',
'TXT': 'text/plain',
'XLS': 'application/vnd.ms-excel',
'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'PDF': 'application/pdf',
'PNG': 'image/png',
'PPT': 'application/vnd.ms-powerpoint',
'PPS': 'application/vnd.ms-powerpoint',
'HTM': 'text/html',
'HTML': 'text/html',
'ZIP': 'application/zip',
'SWF': 'application/x-shockwave-flash'
}
class DocsService(gdata.service.GDataService):
"""Client extension for the Google Documents service Document List feed."""
__FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)')
__RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)(.*)$')
def __init__(self, email=None, password=None, source=None,
server='docs.google.com', additional_headers=None, **kwargs):
"""Creates a client for the Google Documents service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'docs.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='writely', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def _MakeKindCategory(self, label):
if label is None:
return None
return atom.Category(scheme=DATA_KIND_SCHEME,
term=gdata.docs.DOCUMENTS_NAMESPACE + '#' + label, label=label)
def _MakeContentLinkFromId(self, resource_id):
match = self.__RESOURCE_ID_PATTERN.match(resource_id)
label = match.group(1)
doc_id = match.group(3)
if label == DOCUMENT_LABEL:
return '/feeds/download/documents/Export?docId=%s' % doc_id
if label == PRESENTATION_LABEL:
return '/feeds/download/presentations/Export?docId=%s' % doc_id
if label == SPREADSHEET_LABEL:
return ('http://spreadsheets.google.com/feeds/download/spreadsheets/'
'Export?key=%s' % doc_id)
raise ValueError, 'Invalid resource id: %s' % resource_id
def _UploadFile(self, media_source, title, category, folder_or_uri=None):
"""Uploads a file to the Document List feed.
Args:
media_source: A gdata.MediaSource object containing the file to be
uploaded.
title: string The title of the document on the server after being
uploaded.
category: An atom.Category object specifying the appropriate document
type.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
entry = gdata.docs.DocumentListEntry()
entry.title = atom.Title(text=title)
if category is not None:
entry.category.append(category)
entry = self.Post(entry, uri, media_source=media_source,
extra_headers={'Slug': media_source.file_name},
converter=gdata.docs.DocumentListEntryFromString)
return entry
def _DownloadFile(self, uri, file_path):
"""Downloads a file.
Args:
uri: string The full Export URL to download the file from.
file_path: string The full path to save the file to.
Raises:
RequestError: on error response from server.
"""
server_response = self.request('GET', uri)
response_body = server_response.read()
if server_response.status != 200:
raise gdata.service.RequestError, {'status': server_response.status,
'reason': server_response.reason,
'body': response_body}
f = open(file_path, 'wb')
f.write(response_body)
f.flush()
f.close()
def MoveIntoFolder(self, source_entry, folder_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
folder_entry: DocumentListEntry An object with a link to the destination
folder.
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
entry = gdata.docs.DocumentListEntry()
entry.id = source_entry.id
entry = self.Post(entry, folder_entry.content.src,
converter=gdata.docs.DocumentListEntryFromString)
return entry
def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString):
"""Queries the Document List feed and returns the resulting feed of
entries.
Args:
uri: string The full URI to be queried. This can contain query
parameters, a hostname, or simply the relative path to a Document
List feed. The DocumentQuery object is useful when constructing
query parameters.
converter: func (optional) A function which will be executed on the
retrieved item, generally to render it into a Python object.
By default the DocumentListFeedFromString function is used to
return a DocumentListFeed object. This is because most feed
queries will result in a feed and not a single entry.
"""
return self.Get(uri, converter=converter)
def QueryDocumentListFeed(self, uri):
"""Retrieves a DocumentListFeed by retrieving a URI based off the Document
List feed, including any query parameters. A DocumentQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
A DocumentListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString)
def GetDocumentListEntry(self, uri):
"""Retrieves a particular DocumentListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString)
def GetDocumentListFeed(self, uri=None):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string A full URI to query the Document List feed.
"""
if not uri:
uri = gdata.docs.service.DocumentQuery().ToUri()
return self.QueryDocumentListFeed(uri)
def GetDocumentListAclEntry(self, uri):
"""Retrieves a particular DocumentListAclEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListAclEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclEntryFromString)
def GetDocumentListAclFeed(self, uri):
"""Retrieves a feed containing all of a user's documents.
Args:
uri: string The URI of a document's Acl feed to retrieve.
Returns:
A DocumentListAclFeed object representing the ACL feed
returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListAclFeedFromString)
def Upload(self, media_source, title, folder_or_uri=None, label=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
label: optional label describing the type of the document to be created.
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(media_source, title, self._MakeKindCategory(label),
folder_or_uri)
def Download(self, entry_or_id_or_url, file_path, export_format=None,
gid=None, extra_params=None):
"""Downloads a document from the Document List.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to.
export_format: the format to convert to, if conversion is required.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
if isinstance(entry_or_id_or_url, gdata.docs.DocumentListEntry):
url = entry_or_id_or_url.content.src
else:
if self.__RESOURCE_ID_PATTERN.match(entry_or_id_or_url):
url = self._MakeContentLinkFromId(entry_or_id_or_url)
else:
url = entry_or_id_or_url
if export_format is not None:
if url.find('/Export?') == -1:
raise Error, 'This entry cannot be exported as a different format'
url += '&exportFormat=%s' % export_format
if gid is not None:
if url.find('spreadsheets') == -1:
raise Error, 'grid id parameter is not valid for this entry'
url += '&gid=%s' % gid
if extra_params:
url += '&' + urllib.urlencode(extra_params)
self._DownloadFile(url, file_path)
def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None):
"""Downloads a document from the Document List in a different format.
Args:
entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry,
or a url to download from (such as the content src).
file_path: string The full path to save the file to. The export
format is inferred from the the file extension.
gid: grid id, for downloading a single grid of a spreadsheet
extra_params: a map of any further parameters to control how the document
is downloaded
Raises:
RequestError if the service does not respond with success
"""
ext = None
match = self.__FILE_EXT_PATTERN.match(file_path)
if match:
ext = match.group(1)
self.Download(entry_or_id_or_url, file_path, ext, gid, extra_params)
def CreateFolder(self, title, folder_or_uri=None):
"""Creates a folder in the Document List feed.
Args:
title: string The title of the folder on the server after being created.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the folder created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except AttributeError:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
folder_entry = gdata.docs.DocumentListEntry()
folder_entry.title = atom.Title(text=title)
folder_entry.category.append(self._MakeKindCategory(FOLDER_LABEL))
folder_entry = self.Post(folder_entry, uri,
converter=gdata.docs.DocumentListEntryFromString)
return folder_entry
def MoveOutOfFolder(self, source_entry):
"""Moves a document into a folder in the Document List Feed.
Args:
source_entry: DocumentListEntry An object representing the source
document/folder.
Returns:
True if the entry was moved out.
"""
return self.Delete(source_entry.GetEditLink().href)
# Deprecated methods
@atom.deprecated('Please use Upload instead')
def UploadPresentation(self, media_source, title, folder_or_uri=None):
"""Uploads a presentation inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a
presentation file to be uploaded.
title: string The title of the presentation on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the presentation created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(PRESENTATION_LABEL),
folder_or_uri=folder_or_uri)
@atom.deprecated('Please use Upload instead')
def UploadSpreadsheet(self, media_source, title, folder_or_uri=None):
"""Uploads a spreadsheet inside of a MediaSource object to the Document
List feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The MediaSource object containing a spreadsheet
file to be uploaded.
title: string The title of the spreadsheet on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the spreadsheet created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(SPREADSHEET_LABEL),
folder_or_uri=folder_or_uri)
@atom.deprecated('Please use Upload instead')
def UploadDocument(self, media_source, title, folder_or_uri=None):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
This method is deprecated, use Upload instead.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created
on the Google Documents service.
"""
return self._UploadFile(
media_source, title, self._MakeKindCategory(DOCUMENT_LABEL),
folder_or_uri=folder_or_uri)
"""Calling any of these functions is the same as calling Export"""
DownloadDocument = atom.deprecated('Please use Export instead')(Export)
DownloadPresentation = atom.deprecated('Please use Export instead')(Export)
DownloadSpreadsheet = atom.deprecated('Please use Export instead')(Export)
"""Calling any of these functions is the same as calling MoveIntoFolder"""
MoveDocumentIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MovePresentationIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveSpreadsheetIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
MoveFolderIntoFolder = atom.deprecated(
'Please use MoveIntoFolder instead')(MoveIntoFolder)
class DocumentQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Document List feed"""
def __init__(self, feed='/feeds/documents', visibility='private',
projection='full', text_query=None, params=None,
categories=None):
"""Constructor for Document List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current feed.
projection: string (optional) The projection chosen for the current feed.
text_query: string (optional) The contents of the q query parameter. This
string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.visibility = visibility
self.projection = projection
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.visibility, self.projection])
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
def AddNamedFolder(self, email, folder_name):
"""Adds a named folder category, qualified by a schema.
This function lets you query for documents that are contained inside a
named folder without fear of collision with other categories.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was added to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.append(category)
return category
def RemoveNamedFolder(self, email, folder_name):
"""Removes a named folder category, qualified by a schema.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was removed to the object.
"""
category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name)
self.categories.remove(category)
return category
class DocumentAclQuery(gdata.service.Query):
"""Object used to construct a URI to query a Document's ACL feed"""
def __init__(self, resource_id, feed='/feeds/acl/private/full'):
"""Constructor for Document ACL Query
Args:
resource_id: string The resource id. (e.g. 'document%3Adocument_id',
'spreadsheet%3Aspreadsheet_id', etc.)
feed: string (optional) The path for the feed.
(e.g. '/feeds/acl/private/full')
Yields:
A DocumentAclQuery object used to construct a URI based on the Document
ACL feed.
"""
self.resource_id = resource_id
gdata.service.Query.__init__(self, feed)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
ACL feed.
"""
return '%s/%s' % (gdata.service.Query.ToUri(self), self.resource_id)
| apache-2.0 | -2,228,777,188,269,581,000 | 36.472546 | 79 | 0.682208 | false |
thisisshi/cloud-custodian | tools/c7n_gcp/c7n_gcp/output.py | 2 | 6276 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
TODO: provider policy execution initialization for outputs
"""
import datetime
import logging
import time
try:
from google.cloud.storage import Bucket, Client as StorageClient
except ImportError:
Bucket, StorageClient = None, None
try:
from google.cloud.logging import Client as LogClient
from google.cloud.logging.handlers import CloudLoggingHandler
from google.cloud.logging.resource import Resource
except ImportError:
LogClient = None
from c7n.output import (
blob_outputs,
log_outputs,
metrics_outputs,
BlobOutput,
Metrics,
LogOutput)
from c7n.utils import local_session
@metrics_outputs.register('gcp')
class StackDriverMetrics(Metrics):
METRICS_PREFIX = 'custom.googleapis.com/custodian/policy'
DESCRIPTOR_COMMON = {
'metricsKind': 'GAUGE',
'labels': [{
'key': 'policy',
'valueType': 'STRING',
'description': 'Custodian Policy'}],
}
METRICS_DESCRIPTORS = {
'resourcecount': {
'type': '{}/{}'.format(METRICS_PREFIX, 'resourcecount'),
'valueType': 'INT64',
'units': 'items',
'description': 'Number of resources that matched the given policy',
'displayName': 'Resources',
},
'resourcetime': {
'type': '{}/{}'.format(METRICS_PREFIX, 'resourcetime'),
'valueType': 'DOUBLE',
'units': 's',
'description': 'Time to query the resources for a given policy',
'displayName': 'Query Time',
},
'actiontime': {
'type': '{}/{}'.format(METRICS_PREFIX, 'actiontime'),
'valueType': 'DOUBLE',
'units': 's',
'description': 'Time to perform actions for a given policy',
'displayName': 'Action Time',
},
}
# Custom metrics docs https://tinyurl.com/y8rrghwc
log = logging.getLogger('c7n_gcp.metrics')
def __init__(self, ctx, config=None):
super(StackDriverMetrics, self).__init__(ctx, config)
self.project_id = local_session(self.ctx.session_factory).get_default_project()
self.write_metrics_project_id = self.config.get('project_id', self.project_id)
def initialize(self):
"""One time initialization of metrics descriptors.
# tbd - unclear if this adding significant value.
"""
client = local_session(self.ctx.session_factory).client(
'monitoring', 'v3', 'projects.metricDescriptors')
descriptor_map = {
n['type'].rsplit('/', 1)[-1]: n for n in client.execute_command('list', {
'name': 'projects/%s' % self.project_id,
'filter': 'metric.type=startswith("{}")'.format(self.METRICS_PREFIX)}).get(
'metricsDescriptors', [])}
created = False
for name in self.METRICS_DESCRIPTORS:
if name in descriptor_map:
continue
created = True
md = self.METRICS_DESCRIPTORS[name]
md.update(self.DESCRIPTOR_COMMON)
client.execute_command(
'create', {'name': 'projects/%s' % self.project_id, 'body': md})
if created:
self.log.info("Initializing StackDriver Metrics Descriptors")
time.sleep(5)
def _format_metric(self, key, value, unit, dimensions):
# Resource is a Google controlled vocabulary with artificial
# limitations on resource type there's not much useful we can
# utilize.
now = datetime.datetime.utcnow()
metrics_series = {
'metric': {
'type': 'custom.googleapis.com/custodian/policy/%s' % key.lower(),
'labels': {
'policy': self.ctx.policy.name,
'project_id': self.project_id
},
},
'metricKind': 'GAUGE',
'valueType': 'INT64',
'resource': {
'type': 'global',
},
'points': [{
'interval': {
'endTime': now.isoformat('T') + 'Z',
'startTime': now.isoformat('T') + 'Z'},
'value': {'int64Value': int(value)}}]
}
return metrics_series
def _put_metrics(self, ns, metrics):
session = local_session(self.ctx.session_factory)
client = session.client('monitoring', 'v3', 'projects.timeSeries')
params = {'name': "projects/{}".format(self.write_metrics_project_id),
'body': {'timeSeries': metrics}}
client.execute_command('create', params)
@log_outputs.register('gcp', condition=bool(LogClient))
class StackDriverLogging(LogOutput):
def get_log_group(self):
log_group = self.config.netloc
if log_group:
log_group = "custodian-%s-%s" % (log_group, self.ctx.policy.name)
else:
log_group = "custodian-%s" % self.ctx.policy.name
return log_group
def get_handler(self):
# TODO drop these grpc variants for the REST versions, and we can drop
# protobuf/grpc deps, and also so we can record tests.
log_group = self.get_log_group()
project_id = local_session(self.ctx.session_factory).get_default_project()
client = LogClient(project_id)
return CloudLoggingHandler(
client,
log_group,
labels={
'policy': self.ctx.policy.name,
'resource': self.ctx.policy.resource_type},
resource=Resource(type='project', labels={'project_id': project_id}))
def leave_log(self):
super(StackDriverLogging, self).leave_log()
# Flush and stop the background thread
self.handler.transport.flush()
self.handler.transport.worker.stop()
@blob_outputs.register('gs', condition=bool(StorageClient))
class GCPStorageOutput(BlobOutput):
def __init__(self, ctx, config=None):
super().__init__(ctx, config)
self.bucket = Bucket(StorageClient(), self.bucket)
def upload_file(self, path, key):
blob = self.bucket.blob(key)
blob.upload_from_filename(path)
| apache-2.0 | 7,073,328,727,557,018,000 | 33.295082 | 91 | 0.579031 | false |
tomspur/shedskin | shedskin/lib/socket.py | 6 | 2243 | # Copyright 2005-2011 Mark Dufour and contributors; License Expat (See LICENSE)
# model for module socket for shed skin
# from python 2.5.1 documentation
SHUT_RD=0
SHUT_WR=1
SHUT_RDWR=2
SOL_IP=0
SOL_SOCKET=1
SO_REUSEADDR=2
AI_PASSIVE=1
AF_UNIX=1
AF_INET=2
AF_INET6=10
IP_TOS=1
SOCK_STREAM=1
SOCK_DGRAM=2
SOMAXCONN=128
INADDR_ANY=0
INADDR_BROADCAST=0xffffffff
INADDR_NONE=0xffffffff
INADDR_LOOPBACK=0x7f000001
class error(Exception): pass
class herror(Exception): pass
class gaierror(Exception): pass
class timeout(Exception): pass
class socket(object):
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0):
pass
def accept(self):
return (socket(), ('', 1) )
def fileno(self):
return 0
def makefile(self, flags=None):
return file('', flags)
def listen(self, backlog):
return self
def shutdown(self, how):
return self
def close(self):
return self
# setblocking(0) == settimeout(0.0)
# setblocking(1) == settimeout(None)
def setblocking(self, flag):
return self
def settimeout(self, value):
return self
def gettimeout(self):
return 0.0
def setsockopt(self, level, optname, value):
return self
def getsockopt(self, level, optname, value=0):
return ''
def bind(self, address):
return self
def connect(self, address):
return self
def recv(self, bufsize, flags=0):
return ''
def send(self, string, flags=0):
return 0
def sendall(self, string, flags=0):
pass
def getsockname(self):
return ('', 0)
def getpeername(self):
return ('', 0)
def recvfrom(self, bufsize, flags=0):
return ('', ('', 0))
def sendto(self, bufsize, flags=0, address=0):
return 0
def getfqdn(host):
return ''
def gethostname():
return ''
def gethostbyname(hostname):
return ''
def ntohs(x):
return 0
def htons(x):
return 0
def ntohl(x):
return 0
def htonl(x):
return 0
def inet_aton(x):
return ''
def inet_ntoa(x):
return ''
def has_ipv6():
return True
def getdefaulttimeout():
return 0.0
def setdefaulttimeout(x):
pass
| gpl-3.0 | 1,241,705,058,179,799,300 | 15.253623 | 79 | 0.620597 | false |
sbesson/openmicroscopy | components/tools/OmeroWeb/test/integration/test_thumbnails.py | 2 | 7336 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2016 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests rendering of thumbnails."""
from future import standard_library
from builtins import range
import base64
import json
from omero.model import EllipseI, LengthI, LineI, \
PointI, PolygonI, PolylineI, RectangleI, RoiI
from omero.model.enums import UnitsLength
from omero.rtypes import rstring, rint, rdouble
from omeroweb.testlib import IWebTest
from omeroweb.testlib import get
from io import BytesIO
import pytest
from django.core.urlresolvers import reverse
try:
from PIL import Image
except Exception:
import Image
standard_library.install_aliases()
def rgba_to_int(red, green, blue, alpha=255):
"""Return the color as an Integer in RGBA encoding."""
return int.from_bytes([red, green, blue, alpha],
byteorder='big', signed=True)
class TestThumbnails(IWebTest):
"""Tests loading of thumbnails."""
@pytest.mark.parametrize("size", [None, 100])
def test_default_thumb_size(self, size):
"""
Test that the default size of thumbnails is correct.
Default size is 96.
"""
# Create a square image
iId = self.create_test_image(size_x=125, size_y=125,
session=self.sf).getId().getValue()
args = [iId]
if size is not None:
args.append(size)
request_url = reverse('webgateway_render_thumbnail', args=args)
rsp = get(self.django_client, request_url)
thumb = Image.open(BytesIO(rsp.content))
# Should be 96 on both sides
if size is None:
assert thumb.size == (96, 96)
else:
assert thumb.size == (size, size)
@pytest.mark.parametrize("size", [None, 100])
def test_base64_thumb(self, size):
"""
Test base64 encoded retrival of single thumbnail
"""
# Create a square image
iid = self.create_test_image(size_x=256, size_y=256,
session=self.sf).id.val
args = [iid]
if size is not None:
args.append(size)
request_url = reverse('webgateway_render_thumbnail', args=args)
rsp = get(self.django_client, request_url)
thumb = json.dumps(
"data:image/jpeg;base64,%s" %
base64.b64encode(rsp.content).decode("utf-8"))
request_url = reverse('webgateway_get_thumbnail_json',
args=args)
b64rsp = get(self.django_client, request_url).content.decode("utf-8")
assert thumb == b64rsp
def test_base64_thumb_set(self):
"""
Test base64 encoded retrival of thumbnails in a batch
"""
# Create a square image
images = []
for i in range(2, 5):
iid = self.create_test_image(size_x=64*i, size_y=64*i,
session=self.sf).id.val
images.append(iid)
expected_thumbs = {}
for i in images:
request_url = reverse('webgateway_render_thumbnail',
args=[i])
rsp = get(self.django_client, request_url)
expected_thumbs[i] = \
"data:image/jpeg;base64,%s" % \
base64.b64encode(rsp.content).decode("utf-8")
iids = {'id': images}
request_url = reverse('webgateway_get_thumbnails_json')
b64rsp = get(self.django_client, request_url, iids).content
json_data = json.loads(b64rsp)
for i in images:
assert json_data[str(i)] == expected_thumbs[i]
class TestRoiThumbnails(IWebTest):
def shapes(self):
"""Create a bunch of unsaved Shapes."""
rect = RectangleI()
rect.x = rdouble(10)
rect.y = rdouble(20)
rect.width = rdouble(30)
rect.height = rdouble(40)
rect.textValue = rstring("test-Rectangle")
rect.fillColor = rint(rgba_to_int(255, 255, 255, 255))
rect.strokeColor = rint(rgba_to_int(255, 255, 0, 255))
ellipse = EllipseI()
ellipse.x = rdouble(33)
ellipse.y = rdouble(44)
ellipse.radiusX = rdouble(55)
ellipse.radiusY = rdouble(66)
ellipse.textValue = rstring("test-Ellipse")
line = LineI()
line.x1 = rdouble(20)
line.x2 = rdouble(30)
line.y1 = rdouble(40)
line.y2 = rdouble(50)
line.textValue = rstring("test-Line")
point = PointI()
point.x = rdouble(50)
point.y = rdouble(50)
point.textValue = rstring("test-Point")
polygon = PolygonI()
polygon.fillColor = rint(rgba_to_int(255, 0, 255, 50))
polygon.strokeColor = rint(rgba_to_int(255, 255, 0))
polygon.strokeWidth = LengthI(10, UnitsLength.PIXEL)
points = "10,20, 50,150, 100,100, 150,75"
polygon.points = rstring(points)
polyline = PolylineI()
polyline.points = rstring(points)
return [rect, ellipse, line, point, polygon, polyline]
@pytest.mark.parametrize("theT", [1, 0])
@pytest.mark.parametrize("theZ", [0, 1])
def test_roi_thumbnail(self, theT, theZ):
update_service = self.sf.getUpdateService()
img = self.create_test_image(size_x=125, size_y=125, size_z=2,
size_t=2, session=self.sf)
for s in self.shapes():
if theT is not None:
s.theT = rint(theT)
if theZ is not None:
s.theZ = rint(theZ)
roi = RoiI()
roi.addShape(s)
roi.setImage(img)
roi = update_service.saveAndReturnObject(roi)
shape = roi.copyShapes()[0]
# Test ROI thumbnail...
request_url = reverse('webgateway_render_roi_thumbnail',
kwargs={'roiId': roi.id.val})
rsp = get(self.django_client, request_url)
thumb_bytes = BytesIO(rsp.content)
try:
thumb = Image.open(thumb_bytes)
finally:
thumb_bytes.close()
assert thumb.size == (250, 166)
# and Shape thumbnail...
request_url = reverse('webgateway_render_shape_thumbnail',
kwargs={'shapeId': shape.id.val})
rsp = get(self.django_client, request_url)
thumb_bytes = BytesIO(rsp.content)
try:
thumb = Image.open(thumb_bytes)
finally:
thumb_bytes.close()
assert thumb.size == (250, 166)
| gpl-2.0 | -7,647,566,067,223,346,000 | 34.100478 | 77 | 0.582606 | false |
KublaikhanGeek/scrapy | scrapy/xlib/tx/endpoints.py | 164 | 41184 | # -*- test-case-name: twisted.internet.test.test_endpoints -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementations of L{IStreamServerEndpoint} and L{IStreamClientEndpoint} that
wrap the L{IReactorTCP}, L{IReactorSSL}, and L{IReactorUNIX} interfaces.
This also implements an extensible mini-language for describing endpoints,
parsed by the L{clientFromString} and L{serverFromString} functions.
@since: 10.1
"""
from __future__ import division, absolute_import
import os
import socket
from zope.interface import implementer, directlyProvides
import warnings
from twisted.internet import interfaces, defer, error, fdesc, threads
from twisted.internet.protocol import (
ClientFactory, Protocol, ProcessProtocol, Factory)
from twisted.internet.interfaces import IStreamServerEndpointStringParser
from twisted.internet.interfaces import IStreamClientEndpointStringParser
from twisted.python.filepath import FilePath
from twisted.python.failure import Failure
from twisted.python import log
from twisted.python.components import proxyForInterface
from twisted.plugin import IPlugin, getPlugins
from twisted.internet import stdio
from .interfaces import IFileDescriptorReceiver
__all__ = ["TCP4ClientEndpoint", "SSL4ServerEndpoint"]
class _WrappingProtocol(Protocol):
"""
Wrap another protocol in order to notify my user when a connection has
been made.
"""
def __init__(self, connectedDeferred, wrappedProtocol):
"""
@param connectedDeferred: The L{Deferred} that will callback
with the C{wrappedProtocol} when it is connected.
@param wrappedProtocol: An L{IProtocol} provider that will be
connected.
"""
self._connectedDeferred = connectedDeferred
self._wrappedProtocol = wrappedProtocol
for iface in [interfaces.IHalfCloseableProtocol,
IFileDescriptorReceiver]:
if iface.providedBy(self._wrappedProtocol):
directlyProvides(self, iface)
def logPrefix(self):
"""
Transparently pass through the wrapped protocol's log prefix.
"""
if interfaces.ILoggingContext.providedBy(self._wrappedProtocol):
return self._wrappedProtocol.logPrefix()
return self._wrappedProtocol.__class__.__name__
def connectionMade(self):
"""
Connect the C{self._wrappedProtocol} to our C{self.transport} and
callback C{self._connectedDeferred} with the C{self._wrappedProtocol}
"""
self._wrappedProtocol.makeConnection(self.transport)
self._connectedDeferred.callback(self._wrappedProtocol)
def dataReceived(self, data):
"""
Proxy C{dataReceived} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.dataReceived(data)
def fileDescriptorReceived(self, descriptor):
"""
Proxy C{fileDescriptorReceived} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.fileDescriptorReceived(descriptor)
def connectionLost(self, reason):
"""
Proxy C{connectionLost} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.connectionLost(reason)
def readConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.readConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.readConnectionLost()
def writeConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.writeConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.writeConnectionLost()
class _WrappingFactory(ClientFactory):
"""
Wrap a factory in order to wrap the protocols it builds.
@ivar _wrappedFactory: A provider of I{IProtocolFactory} whose buildProtocol
method will be called and whose resulting protocol will be wrapped.
@ivar _onConnection: A L{Deferred} that fires when the protocol is
connected
@ivar _connector: A L{connector <twisted.internet.interfaces.IConnector>}
that is managing the current or previous connection attempt.
"""
protocol = _WrappingProtocol
def __init__(self, wrappedFactory):
"""
@param wrappedFactory: A provider of I{IProtocolFactory} whose
buildProtocol method will be called and whose resulting protocol
will be wrapped.
"""
self._wrappedFactory = wrappedFactory
self._onConnection = defer.Deferred(canceller=self._canceller)
def startedConnecting(self, connector):
"""
A connection attempt was started. Remember the connector which started
said attempt, for use later.
"""
self._connector = connector
def _canceller(self, deferred):
"""
The outgoing connection attempt was cancelled. Fail that L{Deferred}
with an L{error.ConnectingCancelledError}.
@param deferred: The L{Deferred <defer.Deferred>} that was cancelled;
should be the same as C{self._onConnection}.
@type deferred: L{Deferred <defer.Deferred>}
@note: This relies on startedConnecting having been called, so it may
seem as though there's a race condition where C{_connector} may not
have been set. However, using public APIs, this condition is
impossible to catch, because a connection API
(C{connectTCP}/C{SSL}/C{UNIX}) is always invoked before a
L{_WrappingFactory}'s L{Deferred <defer.Deferred>} is returned to
C{connect()}'s caller.
@return: C{None}
"""
deferred.errback(
error.ConnectingCancelledError(
self._connector.getDestination()))
self._connector.stopConnecting()
def doStart(self):
"""
Start notifications are passed straight through to the wrapped factory.
"""
self._wrappedFactory.doStart()
def doStop(self):
"""
Stop notifications are passed straight through to the wrapped factory.
"""
self._wrappedFactory.doStop()
def buildProtocol(self, addr):
"""
Proxy C{buildProtocol} to our C{self._wrappedFactory} or errback
the C{self._onConnection} L{Deferred}.
@return: An instance of L{_WrappingProtocol} or C{None}
"""
try:
proto = self._wrappedFactory.buildProtocol(addr)
except:
self._onConnection.errback()
else:
return self.protocol(self._onConnection, proto)
def clientConnectionFailed(self, connector, reason):
"""
Errback the C{self._onConnection} L{Deferred} when the
client connection fails.
"""
if not self._onConnection.called:
self._onConnection.errback(reason)
@implementer(interfaces.ITransport)
class _ProcessEndpointTransport(proxyForInterface(
interfaces.IProcessTransport, '_process')):
"""
An L{ITransport} provider for the L{IProtocol} instance passed to the
process endpoint.
@ivar _process: An active process transport which will be used by write
methods on this object to write data to a child process.
@type _process: L{interfaces.IProcessTransport} provider
"""
def write(self, data):
"""
Write to the child process's standard input.
@param data: The data to write on stdin.
"""
self._process.writeToChild(0, data)
def writeSequence(self, data):
"""
Write a list of strings to child process's stdin.
@param data: The list of chunks to write on stdin.
"""
for chunk in data:
self._process.writeToChild(0, chunk)
@implementer(interfaces.IStreamServerEndpoint)
class _TCPServerEndpoint(object):
"""
A TCP server endpoint interface
"""
def __init__(self, reactor, port, backlog, interface):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to
@type interface: str
"""
self._reactor = reactor
self._port = port
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a TCP
socket
"""
return defer.execute(self._reactor.listenTCP,
self._port,
protocolFactory,
backlog=self._backlog,
interface=self._interface)
class TCP4ServerEndpoint(_TCPServerEndpoint):
"""
Implements TCP server endpoint with an IPv4 configuration
"""
def __init__(self, reactor, port, backlog=50, interface=''):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to '' (all)
@type interface: str
"""
_TCPServerEndpoint.__init__(self, reactor, port, backlog, interface)
class TCP6ServerEndpoint(_TCPServerEndpoint):
"""
Implements TCP server endpoint with an IPv6 configuration
"""
def __init__(self, reactor, port, backlog=50, interface='::'):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to '' (all)
@type interface: str
"""
_TCPServerEndpoint.__init__(self, reactor, port, backlog, interface)
@implementer(interfaces.IStreamClientEndpoint)
class TCP4ClientEndpoint(object):
"""
TCP client endpoint with an IPv4 configuration.
"""
def __init__(self, reactor, host, port, timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorTCP} provider
@param host: A hostname, used when connecting
@type host: str
@param port: The port number, used when connecting
@type port: int
@param timeout: The number of seconds to wait before assuming the
connection has failed.
@type timeout: int
@param bindAddress: A (host, port) tuple of local address to bind to,
or None.
@type bindAddress: tuple
"""
self._reactor = reactor
self._host = host
self._port = port
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via TCP.
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectTCP(
self._host, self._port, wf,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamServerEndpoint)
class SSL4ServerEndpoint(object):
"""
SSL secured TCP server endpoint with an IPv4 configuration.
"""
def __init__(self, reactor, port, sslContextFactory,
backlog=50, interface=''):
"""
@param reactor: An L{IReactorSSL} provider.
@param port: The port number used for listening
@type port: int
@param sslContextFactory: An instance of
L{twisted.internet.ssl.ContextFactory}.
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to '' (all)
@type interface: str
"""
self._reactor = reactor
self._port = port
self._sslContextFactory = sslContextFactory
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen for SSL on a
TCP socket.
"""
return defer.execute(self._reactor.listenSSL, self._port,
protocolFactory,
contextFactory=self._sslContextFactory,
backlog=self._backlog,
interface=self._interface)
@implementer(interfaces.IStreamClientEndpoint)
class SSL4ClientEndpoint(object):
"""
SSL secured TCP client endpoint with an IPv4 configuration
"""
def __init__(self, reactor, host, port, sslContextFactory,
timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorSSL} provider.
@param host: A hostname, used when connecting
@type host: str
@param port: The port number, used when connecting
@type port: int
@param sslContextFactory: SSL Configuration information as an instance
of L{twisted.internet.ssl.ContextFactory}.
@param timeout: Number of seconds to wait before assuming the
connection has failed.
@type timeout: int
@param bindAddress: A (host, port) tuple of local address to bind to,
or None.
@type bindAddress: tuple
"""
self._reactor = reactor
self._host = host
self._port = port
self._sslContextFactory = sslContextFactory
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect with SSL over
TCP.
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectSSL(
self._host, self._port, wf, self._sslContextFactory,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamServerEndpoint)
class UNIXServerEndpoint(object):
"""
UnixSocket server endpoint.
"""
def __init__(self, reactor, address, backlog=50, mode=0o666, wantPID=0):
"""
@param reactor: An L{IReactorUNIX} provider.
@param address: The path to the Unix socket file, used when listening
@param backlog: number of connections to allow in backlog.
@param mode: mode to set on the unix socket. This parameter is
deprecated. Permissions should be set on the directory which
contains the UNIX socket.
@param wantPID: If True, create a pidfile for the socket.
"""
self._reactor = reactor
self._address = address
self._backlog = backlog
self._mode = mode
self._wantPID = wantPID
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a UNIX socket.
"""
return defer.execute(self._reactor.listenUNIX, self._address,
protocolFactory,
backlog=self._backlog,
mode=self._mode,
wantPID=self._wantPID)
@implementer(interfaces.IStreamClientEndpoint)
class UNIXClientEndpoint(object):
"""
UnixSocket client endpoint.
"""
def __init__(self, reactor, path, timeout=30, checkPID=0):
"""
@param reactor: An L{IReactorUNIX} provider.
@param path: The path to the Unix socket file, used when connecting
@type path: str
@param timeout: Number of seconds to wait before assuming the
connection has failed.
@type timeout: int
@param checkPID: If True, check for a pid file to verify that a server
is listening.
@type checkPID: bool
"""
self._reactor = reactor
self._path = path
self._timeout = timeout
self._checkPID = checkPID
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via a
UNIX Socket
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectUNIX(
self._path, wf,
timeout=self._timeout,
checkPID=self._checkPID)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamServerEndpoint)
class AdoptedStreamServerEndpoint(object):
"""
An endpoint for listening on a file descriptor initialized outside of
Twisted.
@ivar _used: A C{bool} indicating whether this endpoint has been used to
listen with a factory yet. C{True} if so.
"""
_close = os.close
_setNonBlocking = staticmethod(fdesc.setNonBlocking)
def __init__(self, reactor, fileno, addressFamily):
"""
@param reactor: An L{IReactorSocket} provider.
@param fileno: An integer file descriptor corresponding to a listening
I{SOCK_STREAM} socket.
@param addressFamily: The address family of the socket given by
C{fileno}.
"""
self.reactor = reactor
self.fileno = fileno
self.addressFamily = addressFamily
self._used = False
def listen(self, factory):
"""
Implement L{IStreamServerEndpoint.listen} to start listening on, and
then close, C{self._fileno}.
"""
if self._used:
return defer.fail(error.AlreadyListened())
self._used = True
try:
self._setNonBlocking(self.fileno)
port = self.reactor.adoptStreamPort(
self.fileno, self.addressFamily, factory)
self._close(self.fileno)
except:
return defer.fail()
return defer.succeed(port)
def _parseTCP(factory, port, interface="", backlog=50):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a TCP(IPv4) stream endpoint into the structured arguments.
@param factory: the protocol factory being parsed, or C{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or C{NoneType}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorTCP.listenTCP} (or, modulo argument 2, the factory, arguments
to L{TCP4ServerEndpoint}.
"""
return (int(port), factory), {'interface': interface,
'backlog': int(backlog)}
def _parseUNIX(factory, address, mode='666', backlog=50, lockfile=True):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a UNIX (AF_UNIX/SOCK_STREAM) stream endpoint into the
structured arguments.
@param factory: the protocol factory being parsed, or C{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or C{NoneType}
@param address: the pathname of the unix socket
@type address: C{str}
@param backlog: the length of the listen queue
@type backlog: C{str}
@param lockfile: A string '0' or '1', mapping to True and False
respectively. See the C{wantPID} argument to C{listenUNIX}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorTCP.listenUNIX} (or, modulo argument 2, the factory,
arguments to L{UNIXServerEndpoint}.
"""
return (
(address, factory),
{'mode': int(mode, 8), 'backlog': int(backlog),
'wantPID': bool(int(lockfile))})
def _parseSSL(factory, port, privateKey="server.pem", certKey=None,
sslmethod=None, interface='', backlog=50):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for an SSL (over TCP/IPv4) stream endpoint into the structured
arguments.
@param factory: the protocol factory being parsed, or C{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or C{NoneType}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@param privateKey: The file name of a PEM format private key file.
@type privateKey: C{str}
@param certKey: The file name of a PEM format certificate file.
@type certKey: C{str}
@param sslmethod: The string name of an SSL method, based on the name of a
constant in C{OpenSSL.SSL}. Must be one of: "SSLv23_METHOD",
"SSLv2_METHOD", "SSLv3_METHOD", "TLSv1_METHOD".
@type sslmethod: C{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorSSL.listenSSL} (or, modulo argument 2, the factory, arguments
to L{SSL4ServerEndpoint}.
"""
from twisted.internet import ssl
if certKey is None:
certKey = privateKey
kw = {}
if sslmethod is not None:
kw['method'] = getattr(ssl.SSL, sslmethod)
else:
kw['method'] = ssl.SSL.SSLv23_METHOD
certPEM = FilePath(certKey).getContent()
keyPEM = FilePath(privateKey).getContent()
privateCertificate = ssl.PrivateCertificate.loadPEM(certPEM + keyPEM)
cf = ssl.CertificateOptions(
privateKey=privateCertificate.privateKey.original,
certificate=privateCertificate.original,
**kw
)
return ((int(port), factory, cf),
{'interface': interface, 'backlog': int(backlog)})
@implementer(IPlugin, IStreamServerEndpointStringParser)
class _StandardIOParser(object):
"""
Stream server endpoint string parser for the Standard I/O type.
@ivar prefix: See L{IStreamClientEndpointStringParser.prefix}.
"""
prefix = "stdio"
def _parseServer(self, reactor):
"""
Internal parser function for L{_parseServer} to convert the string
arguments into structured arguments for the L{StandardIOEndpoint}
@param reactor: Reactor for the endpoint
"""
return StandardIOEndpoint(reactor)
def parseStreamServer(self, reactor, *args, **kwargs):
# Redirects to another function (self._parseServer), tricks zope.interface
# into believing the interface is correctly implemented.
return self._parseServer(reactor)
@implementer(IPlugin, IStreamServerEndpointStringParser)
class _TCP6ServerParser(object):
"""
Stream server endpoint string parser for the TCP6ServerEndpoint type.
@ivar prefix: See L{IStreamClientEndpointStringParser.prefix}.
"""
prefix = "tcp6" # Used in _parseServer to identify the plugin with the endpoint type
def _parseServer(self, reactor, port, backlog=50, interface='::'):
"""
Internal parser function for L{_parseServer} to convert the string
arguments into structured arguments for the L{TCP6ServerEndpoint}
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to
@type interface: str
"""
port = int(port)
backlog = int(backlog)
return TCP6ServerEndpoint(reactor, port, backlog, interface)
def parseStreamServer(self, reactor, *args, **kwargs):
# Redirects to another function (self._parseServer), tricks zope.interface
# into believing the interface is correctly implemented.
return self._parseServer(reactor, *args, **kwargs)
_serverParsers = {"tcp": _parseTCP,
"unix": _parseUNIX,
"ssl": _parseSSL,
}
_OP, _STRING = range(2)
def _tokenize(description):
"""
Tokenize a strports string and yield each token.
@param description: a string as described by L{serverFromString} or
L{clientFromString}.
@return: an iterable of 2-tuples of (L{_OP} or L{_STRING}, string). Tuples
starting with L{_OP} will contain a second element of either ':' (i.e.
'next parameter') or '=' (i.e. 'assign parameter value'). For example,
the string 'hello:greet\=ing=world' would result in a generator
yielding these values::
_STRING, 'hello'
_OP, ':'
_STRING, 'greet=ing'
_OP, '='
_STRING, 'world'
"""
current = ''
ops = ':='
nextOps = {':': ':=', '=': ':'}
description = iter(description)
for n in description:
if n in ops:
yield _STRING, current
yield _OP, n
current = ''
ops = nextOps[n]
elif n == '\\':
current += next(description)
else:
current += n
yield _STRING, current
def _parse(description):
"""
Convert a description string into a list of positional and keyword
parameters, using logic vaguely like what Python does.
@param description: a string as described by L{serverFromString} or
L{clientFromString}.
@return: a 2-tuple of C{(args, kwargs)}, where 'args' is a list of all
':'-separated C{str}s not containing an '=' and 'kwargs' is a map of
all C{str}s which do contain an '='. For example, the result of
C{_parse('a:b:d=1:c')} would be C{(['a', 'b', 'c'], {'d': '1'})}.
"""
args, kw = [], {}
def add(sofar):
if len(sofar) == 1:
args.append(sofar[0])
else:
kw[sofar[0]] = sofar[1]
sofar = ()
for (type, value) in _tokenize(description):
if type is _STRING:
sofar += (value,)
elif value == ':':
add(sofar)
sofar = ()
add(sofar)
return args, kw
# Mappings from description "names" to endpoint constructors.
_endpointServerFactories = {
'TCP': TCP4ServerEndpoint,
'SSL': SSL4ServerEndpoint,
'UNIX': UNIXServerEndpoint,
}
_endpointClientFactories = {
'TCP': TCP4ClientEndpoint,
'SSL': SSL4ClientEndpoint,
'UNIX': UNIXClientEndpoint,
}
_NO_DEFAULT = object()
def _parseServer(description, factory, default=None):
"""
Parse a stports description into a 2-tuple of arguments and keyword values.
@param description: A description in the format explained by
L{serverFromString}.
@type description: C{str}
@param factory: A 'factory' argument; this is left-over from
twisted.application.strports, it's not really used.
@type factory: L{IProtocolFactory} or L{None}
@param default: Deprecated argument, specifying the default parser mode to
use for unqualified description strings (those which do not have a ':'
and prefix).
@type default: C{str} or C{NoneType}
@return: a 3-tuple of (plugin or name, arguments, keyword arguments)
"""
args, kw = _parse(description)
if not args or (len(args) == 1 and not kw):
deprecationMessage = (
"Unqualified strport description passed to 'service'."
"Use qualified endpoint descriptions; for example, 'tcp:%s'."
% (description,))
if default is None:
default = 'tcp'
warnings.warn(
deprecationMessage, category=DeprecationWarning, stacklevel=4)
elif default is _NO_DEFAULT:
raise ValueError(deprecationMessage)
# If the default has been otherwise specified, the user has already
# been warned.
args[0:0] = [default]
endpointType = args[0]
parser = _serverParsers.get(endpointType)
if parser is None:
# If the required parser is not found in _server, check if
# a plugin exists for the endpointType
for plugin in getPlugins(IStreamServerEndpointStringParser):
if plugin.prefix == endpointType:
return (plugin, args[1:], kw)
raise ValueError("Unknown endpoint type: '%s'" % (endpointType,))
return (endpointType.upper(),) + parser(factory, *args[1:], **kw)
def _serverFromStringLegacy(reactor, description, default):
"""
Underlying implementation of L{serverFromString} which avoids exposing the
deprecated 'default' argument to anything but L{strports.service}.
"""
nameOrPlugin, args, kw = _parseServer(description, None, default)
if type(nameOrPlugin) is not str:
plugin = nameOrPlugin
return plugin.parseStreamServer(reactor, *args, **kw)
else:
name = nameOrPlugin
# Chop out the factory.
args = args[:1] + args[2:]
return _endpointServerFactories[name](reactor, *args, **kw)
def serverFromString(reactor, description):
"""
Construct a stream server endpoint from an endpoint description string.
The format for server endpoint descriptions is a simple string. It is a
prefix naming the type of endpoint, then a colon, then the arguments for
that endpoint.
For example, you can call it like this to create an endpoint that will
listen on TCP port 80::
serverFromString(reactor, "tcp:80")
Additional arguments may be specified as keywords, separated with colons.
For example, you can specify the interface for a TCP server endpoint to
bind to like this::
serverFromString(reactor, "tcp:80:interface=127.0.0.1")
SSL server endpoints may be specified with the 'ssl' prefix, and the
private key and certificate files may be specified by the C{privateKey} and
C{certKey} arguments::
serverFromString(reactor, "ssl:443:privateKey=key.pem:certKey=crt.pem")
If a private key file name (C{privateKey}) isn't provided, a "server.pem"
file is assumed to exist which contains the private key. If the certificate
file name (C{certKey}) isn't provided, the private key file is assumed to
contain the certificate as well.
You may escape colons in arguments with a backslash, which you will need to
use if you want to specify a full pathname argument on Windows::
serverFromString(reactor,
"ssl:443:privateKey=C\\:/key.pem:certKey=C\\:/cert.pem")
finally, the 'unix' prefix may be used to specify a filesystem UNIX socket,
optionally with a 'mode' argument to specify the mode of the socket file
created by C{listen}::
serverFromString(reactor, "unix:/var/run/finger")
serverFromString(reactor, "unix:/var/run/finger:mode=660")
This function is also extensible; new endpoint types may be registered as
L{IStreamServerEndpointStringParser} plugins. See that interface for more
information.
@param reactor: The server endpoint will be constructed with this reactor.
@param description: The strports description to parse.
@return: A new endpoint which can be used to listen with the parameters
given by by C{description}.
@rtype: L{IStreamServerEndpoint<twisted.internet.interfaces.IStreamServerEndpoint>}
@raise ValueError: when the 'description' string cannot be parsed.
@since: 10.2
"""
return _serverFromStringLegacy(reactor, description, _NO_DEFAULT)
def quoteStringArgument(argument):
"""
Quote an argument to L{serverFromString} and L{clientFromString}. Since
arguments are separated with colons and colons are escaped with
backslashes, some care is necessary if, for example, you have a pathname,
you may be tempted to interpolate into a string like this::
serverFromString("ssl:443:privateKey=%s" % (myPathName,))
This may appear to work, but will have portability issues (Windows
pathnames, for example). Usually you should just construct the appropriate
endpoint type rather than interpolating strings, which in this case would
be L{SSL4ServerEndpoint}. There are some use-cases where you may need to
generate such a string, though; for example, a tool to manipulate a
configuration file which has strports descriptions in it. To be correct in
those cases, do this instead::
serverFromString("ssl:443:privateKey=%s" %
(quoteStringArgument(myPathName),))
@param argument: The part of the endpoint description string you want to
pass through.
@type argument: C{str}
@return: The quoted argument.
@rtype: C{str}
"""
return argument.replace('\\', '\\\\').replace(':', '\\:')
def _parseClientTCP(*args, **kwargs):
"""
Perform any argument value coercion necessary for TCP client parameters.
Valid positional arguments to this function are host and port.
Valid keyword arguments to this function are all L{IReactorTCP.connectTCP}
arguments.
@return: The coerced values as a C{dict}.
"""
if len(args) == 2:
kwargs['port'] = int(args[1])
kwargs['host'] = args[0]
elif len(args) == 1:
if 'host' in kwargs:
kwargs['port'] = int(args[0])
else:
kwargs['host'] = args[0]
try:
kwargs['port'] = int(kwargs['port'])
except KeyError:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
return kwargs
def _loadCAsFromDir(directoryPath):
"""
Load certificate-authority certificate objects in a given directory.
@param directoryPath: a L{FilePath} pointing at a directory to load .pem
files from.
@return: a C{list} of L{OpenSSL.crypto.X509} objects.
"""
from twisted.internet import ssl
caCerts = {}
for child in directoryPath.children():
if not child.basename().split('.')[-1].lower() == 'pem':
continue
try:
data = child.getContent()
except IOError:
# Permission denied, corrupt disk, we don't care.
continue
try:
theCert = ssl.Certificate.loadPEM(data)
except ssl.SSL.Error:
# Duplicate certificate, invalid certificate, etc. We don't care.
pass
else:
caCerts[theCert.digest()] = theCert.original
return caCerts.values()
def _parseClientSSL(*args, **kwargs):
"""
Perform any argument value coercion necessary for SSL client parameters.
Valid keyword arguments to this function are all L{IReactorSSL.connectSSL}
arguments except for C{contextFactory}. Instead, C{certKey} (the path name
of the certificate file) C{privateKey} (the path name of the private key
associated with the certificate) are accepted and used to construct a
context factory.
Valid positional arguments to this function are host and port.
@param caCertsDir: The one parameter which is not part of
L{IReactorSSL.connectSSL}'s signature, this is a path name used to
construct a list of certificate authority certificates. The directory
will be scanned for files ending in C{.pem}, all of which will be
considered valid certificate authorities for this connection.
@type caCertsDir: C{str}
@return: The coerced values as a C{dict}.
"""
from twisted.internet import ssl
kwargs = _parseClientTCP(*args, **kwargs)
certKey = kwargs.pop('certKey', None)
privateKey = kwargs.pop('privateKey', None)
caCertsDir = kwargs.pop('caCertsDir', None)
if certKey is not None:
certx509 = ssl.Certificate.loadPEM(
FilePath(certKey).getContent()).original
else:
certx509 = None
if privateKey is not None:
privateKey = ssl.PrivateCertificate.loadPEM(
FilePath(privateKey).getContent()).privateKey.original
else:
privateKey = None
if caCertsDir is not None:
verify = True
caCerts = _loadCAsFromDir(FilePath(caCertsDir))
else:
verify = False
caCerts = None
kwargs['sslContextFactory'] = ssl.CertificateOptions(
method=ssl.SSL.SSLv23_METHOD,
certificate=certx509,
privateKey=privateKey,
verify=verify,
caCerts=caCerts
)
return kwargs
def _parseClientUNIX(*args, **kwargs):
"""
Perform any argument value coercion necessary for UNIX client parameters.
Valid keyword arguments to this function are all L{IReactorUNIX.connectUNIX}
keyword arguments except for C{checkPID}. Instead, C{lockfile} is accepted
and has the same meaning. Also C{path} is used instead of C{address}.
Valid positional arguments to this function are C{path}.
@return: The coerced values as a C{dict}.
"""
if len(args) == 1:
kwargs['path'] = args[0]
try:
kwargs['checkPID'] = bool(int(kwargs.pop('lockfile')))
except KeyError:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
return kwargs
_clientParsers = {
'TCP': _parseClientTCP,
'SSL': _parseClientSSL,
'UNIX': _parseClientUNIX,
}
def clientFromString(reactor, description):
"""
Construct a client endpoint from a description string.
Client description strings are much like server description strings,
although they take all of their arguments as keywords, aside from host and
port.
You can create a TCP client endpoint with the 'host' and 'port' arguments,
like so::
clientFromString(reactor, "tcp:host=www.example.com:port=80")
or, without specifying host and port keywords::
clientFromString(reactor, "tcp:www.example.com:80")
Or you can specify only one or the other, as in the following 2 examples::
clientFromString(reactor, "tcp:host=www.example.com:80")
clientFromString(reactor, "tcp:www.example.com:port=80")
or an SSL client endpoint with those arguments, plus the arguments used by
the server SSL, for a client certificate::
clientFromString(reactor, "ssl:web.example.com:443:"
"privateKey=foo.pem:certKey=foo.pem")
to specify your certificate trust roots, you can identify a directory with
PEM files in it with the C{caCertsDir} argument::
clientFromString(reactor, "ssl:host=web.example.com:port=443:"
"caCertsDir=/etc/ssl/certs")
You can create a UNIX client endpoint with the 'path' argument and optional
'lockfile' and 'timeout' arguments::
clientFromString(reactor, "unix:path=/var/foo/bar:lockfile=1:timeout=9")
or, with the path as a positional argument with or without optional
arguments as in the following 2 examples::
clientFromString(reactor, "unix:/var/foo/bar")
clientFromString(reactor, "unix:/var/foo/bar:lockfile=1:timeout=9")
This function is also extensible; new endpoint types may be registered as
L{IStreamClientEndpointStringParser} plugins. See that interface for more
information.
@param reactor: The client endpoint will be constructed with this reactor.
@param description: The strports description to parse.
@return: A new endpoint which can be used to connect with the parameters
given by by C{description}.
@rtype: L{IStreamClientEndpoint<twisted.internet.interfaces.IStreamClientEndpoint>}
@since: 10.2
"""
args, kwargs = _parse(description)
aname = args.pop(0)
name = aname.upper()
for plugin in getPlugins(IStreamClientEndpointStringParser):
if plugin.prefix.upper() == name:
return plugin.parseStreamClient(*args, **kwargs)
if name not in _clientParsers:
raise ValueError("Unknown endpoint type: %r" % (aname,))
kwargs = _clientParsers[name](*args, **kwargs)
return _endpointClientFactories[name](reactor, **kwargs)
def connectProtocol(endpoint, protocol):
"""
Connect a protocol instance to an endpoint.
This allows using a client endpoint without having to create a factory.
@param endpoint: A client endpoint to connect to.
@param protocol: A protocol instance.
@return: The result of calling C{connect} on the endpoint, i.e. a
L{Deferred} that will fire with the protocol when connected, or an
appropriate error.
"""
class OneShotFactory(Factory):
def buildProtocol(self, addr):
return protocol
return endpoint.connect(OneShotFactory())
| bsd-3-clause | 7,499,918,760,542,452,000 | 31.453901 | 92 | 0.642264 | false |
j717273419/ibus | ibus/common.py | 6 | 6133 | # vim:set et sts=4 sw=4:
#
# ibus - The Input Bus
#
# Copyright (c) 2007-2010 Peng Huang <[email protected]>
# Copyright (c) 2007-2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
__all__ = (
"IBUS_IFACE_IBUS",
"IBUS_SERVICE_IBUS",
"IBUS_PATH_IBUS",
"IBUS_IFACE_CONFIG",
"IBUS_IFACE_PANEL",
"IBUS_IFACE_ENGINE",
"IBUS_IFACE_ENGINE_FACTORY",
"IBUS_IFACE_INPUT_CONTEXT",
"IBUS_IFACE_NOTIFICATIONS",
"ORIENTATION_HORIZONTAL",
"ORIENTATION_VERTICAL",
"ORIENTATION_SYSTEM",
"BUS_NAME_FLAG_ALLOW_REPLACEMENT",
"BUS_NAME_FLAG_REPLACE_EXISTING",
"BUS_NAME_FLAG_DO_NOT_QUEUE",
"BUS_REQUEST_NAME_REPLY_PRIMARY_OWNER",
"BUS_REQUEST_NAME_REPLY_IN_QUEUE",
"BUS_REQUEST_NAME_REPLY_EXISTS",
"BUS_REQUEST_NAME_REPLY_ALREADY_OWNER",
"default_reply_handler",
"default_error_handler",
"DEFAULT_ASYNC_HANDLERS",
"CONFIG_GENERAL_SHORTCUT_TRIGGER_DEFAULT",
"CONFIG_GENERAL_SHORTCUT_ENABLE_DEFAULT",
"CONFIG_GENERAL_SHORTCUT_DISABLE_DEFAULT",
"CONFIG_GENERAL_SHORTCUT_NEXT_ENGINE_DEFAULT",
"CONFIG_GENERAL_SHORTCUT_PREV_ENGINE_DEFAULT",
"main",
"main_quit",
"main_iteration",
"get_address",
"get_socket_path",
)
import os
import sys
#from xdg import BaseDirectory
import ctypes
import _config
# __display = os.environ["DISPLAY"]
# __hostname, __display_screen = __display.split(":", 1)
# if not __hostname.strip():
# __hostname = "unix"
# __display_number = __display_screen.split(".")[0]
#
# __username = None
# try:
# __username = os.getlogin()
# except:
# pass
# if not __username:
# __username = os.getenv ("LOGNAME")
# if not __username:
# __username = os.getenv ("USER")
# if not __username:
# __username = os.getenv ("LNAME")
# if not __username:
# __username = os.getenv ("USERNAME")
# libibus = ctypes.CDLL("libibus.so")
# id = ctypes.c_char_p(libibus.ibus_get_local_machine_id()).value
#
# IBUS_SOCKET_FILE = os.path.join(BaseDirectory.xdg_config_home,
# "ibus", "bus",
# "%s-%s-%s"% (id, __hostname, __display_number))
# def get_address():
# libibus = ctypes.CDLL("libibus.so")
# address = ctypes.c_char_p(libibus.ibus_get_address()).value
# return address
#
# address = os.getenv("IBUS_ADDRESS")
# if address:
# return address
# try:
# for l in file(IBUS_SOCKET_FILE):
# if not l.startswith("IBUS_ADDRESS="):
# continue
# address = l[13:]
# address = address.strip()
# break
# except:
# return None
# return address
libibus = ctypes.CDLL(_config.LIBIBUS_SONAME)
get_address = libibus.ibus_get_address
get_address.restype=ctypes.c_char_p
get_socket_path = libibus.ibus_get_socket_path
get_socket_path.restype=ctypes.c_char_p
# __session_id = os.getenv ("IBUS_SESSION_ID")
#
# IBUS_ADDR = "unix:path=/tmp/ibus-%s%s/ibus-%s-%s" % (__username,
# "-" + __session_id if __session_id else "",
# __hostname,
# __display_number)
# IBUS_ADDR = "tcp:host=localhost,port=7799"
IBUS_IFACE_IBUS = "org.freedesktop.IBus"
IBUS_PATH_IBUS = "/org/freedesktop/IBus"
IBUS_SERVICE_IBUS = "org.freedesktop.IBus"
IBUS_IFACE_PANEL = "org.freedesktop.IBus.Panel"
IBUS_IFACE_CONFIG = "org.freedesktop.IBus.Config"
IBUS_IFACE_ENGINE = "org.freedesktop.IBus.Engine"
IBUS_IFACE_ENGINE_FACTORY = "org.freedesktop.IBus.Factory"
IBUS_IFACE_INPUT_CONTEXT = "org.freedesktop.IBus.InputContext"
IBUS_IFACE_NOTIFICATIONS = "org.freedesktop.IBus.Notifications"
# define pre-edit commit mode when the focus is lost
IBUS_ENGINE_PREEDIT_CLEAR = 0
IBUS_ENGINE_PREEDIT_COMMIT = 1
# define orientation
ORIENTATION_HORIZONTAL = 0
ORIENTATION_VERTICAL = 1
ORIENTATION_SYSTEM = 2
# define bus name flag
BUS_NAME_FLAG_ALLOW_REPLACEMENT = (1 << 0)
BUS_NAME_FLAG_REPLACE_EXISTING = (1 << 1)
BUS_NAME_FLAG_DO_NOT_QUEUE = (1 << 2)
# define bus request name reply
BUS_REQUEST_NAME_REPLY_PRIMARY_OWNER = 1
BUS_REQUEST_NAME_REPLY_IN_QUEUE = 2
BUS_REQUEST_NAME_REPLY_EXISTS = 3
BUS_REQUEST_NAME_REPLY_ALREADY_OWNER = 4
def default_reply_handler( *args):
pass
def default_error_handler(e):
print >> sys.stderr, e
DEFAULT_ASYNC_HANDLERS = {
"reply_handler" : default_reply_handler,
"error_handler" : default_error_handler
}
CONFIG_GENERAL_SHORTCUT_TRIGGER_DEFAULT = [
"Control+space",
"Zenkaku_Hankaku",
"Hangul"]
CONFIG_GENERAL_SHORTCUT_ENABLE_DEFAULT = []
CONFIG_GENERAL_SHORTCUT_DISABLE_DEFAULT = []
CONFIG_GENERAL_SHORTCUT_NEXT_ENGINE_DEFAULT = []
CONFIG_GENERAL_SHORTCUT_PREV_ENGINE_DEFAULT = []
__mainloop = None
def __init_main_loop():
global __mainloop
if __mainloop == None:
import gobject
__mainloop = gobject.MainLoop()
def main():
__init_main_loop()
__mainloop.run()
def main_quit():
global __mainloop
if __mainloop:
__mainloop.quit()
def main_iteration(may_block=False):
__init_main_loop()
return __mainloop.get_context().iteration(may_block)
| lgpl-2.1 | -3,775,140,702,536,878,600 | 30.777202 | 98 | 0.624327 | false |
SanchayanMaity/gem5 | src/arch/x86/isa/insts/simd128/integer/data_reordering/shuffle.py | 91 | 3586 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PSHUFD_XMM_XMM_I {
shuffle ufp1, xmmlm, xmmhm, size=4, ext="IMMEDIATE"
shuffle xmmh, xmmlm, xmmhm, size=4, ext="IMMEDIATE >> 4"
movfp xmml, ufp1, dataSize=8
};
def macroop PSHUFD_XMM_M_I {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
shuffle xmml, ufp1, ufp2, size=4, ext="IMMEDIATE"
shuffle xmmh, ufp1, ufp2, size=4, ext="IMMEDIATE >> 4"
};
def macroop PSHUFD_XMM_P_I {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
shuffle xmml, ufp1, ufp2, size=4, ext="IMMEDIATE"
shuffle xmmh, ufp1, ufp2, size=4, ext="IMMEDIATE >> 4"
};
def macroop PSHUFHW_XMM_XMM_I {
shuffle xmmh, xmmhm, xmmhm, size=2, ext=imm
};
def macroop PSHUFHW_XMM_M_I {
ldfp ufp1, seg, sib, "DISPLACEMENT + 8", dataSize=8
shuffle xmmh, ufp1, ufp1, size=2, ext=imm
};
def macroop PSHUFHW_XMM_P_I {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT + 8", dataSize=8
shuffle xmmh, ufp1, ufp1, size=2, ext=imm
};
def macroop PSHUFLW_XMM_XMM_I {
shuffle xmml, xmmlm, xmmlm, size=2, ext=imm
};
def macroop PSHUFLW_XMM_M_I {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
shuffle xmml, ufp1, ufp1, size=2, ext=imm
};
def macroop PSHUFLW_XMM_P_I {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
shuffle xmml, ufp1, ufp1, size=2, ext=imm
};
'''
| bsd-3-clause | 8,709,310,040,235,131,000 | 39.292135 | 72 | 0.737869 | false |
lukeiwanski/tensorflow | tensorflow/contrib/ffmpeg/decode_video_op_test.py | 31 | 2478 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for third_party.tensorflow.contrib.ffmpeg.decode_video_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import six # pylint: disable=unused-import
from tensorflow.contrib import ffmpeg
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class DecodeVideoOpTest(test.TestCase):
def _loadFileAndTest(self, filename, width, height, frames, bmp_filename,
index):
"""Loads an video file and validates the output tensor.
Args:
filename: The filename of the input file.
width: The width of the video.
height: The height of the video.
frames: The frames of the video.
bmp_filename: The filename for the bmp file.
index: Index location inside the video.
"""
with self.test_session():
path = os.path.join(resource_loader.get_data_files_path(), 'testdata',
filename)
with open(path, 'rb') as f:
contents = f.read()
bmp_path = os.path.join(resource_loader.get_data_files_path(), 'testdata',
bmp_filename)
with open(bmp_path, 'rb') as f:
bmp_contents = f.read()
image_op = image_ops.decode_bmp(bmp_contents)
image = image_op.eval()
self.assertEqual(image.shape, (height, width, 3))
video_op = ffmpeg.decode_video(contents)
video = video_op.eval()
self.assertEqual(video.shape, (frames, height, width, 3))
self.assertAllEqual(video[index, :, :, :], image)
def testMp4(self):
self._loadFileAndTest('small.mp4', 560, 320, 166, 'small_100.bmp', 99)
if __name__ == '__main__':
test.main()
| apache-2.0 | -7,127,018,586,020,921,000 | 34.913043 | 80 | 0.656174 | false |
rbrito/pkg-youtube-dl | youtube_dl/extractor/ceskatelevize.py | 12 | 10657 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
float_or_none,
sanitized_Request,
unescapeHTML,
update_url_query,
urlencode_postdata,
USER_AGENTS,
)
class CeskaTelevizeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ceskatelevize\.cz/ivysilani/(?:[^/?#&]+/)*(?P<id>[^/#?]+)'
_TESTS = [{
'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
'info_dict': {
'id': '61924494877246241',
'ext': 'mp4',
'title': 'Hyde Park Civilizace: Život v Grónsku',
'description': 'md5:3fec8f6bb497be5cdb0c9e8781076626',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 3350,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.ceskatelevize.cz/ivysilani/10441294653-hyde-park-civilizace/215411058090502/bonus/20641-bonus-01-en',
'info_dict': {
'id': '61924494877028507',
'ext': 'mp4',
'title': 'Hyde Park Civilizace: Bonus 01 - En',
'description': 'English Subtittles',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 81.3,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# live stream
'url': 'http://www.ceskatelevize.cz/ivysilani/zive/ct4/',
'info_dict': {
'id': 402,
'ext': 'mp4',
'title': r're:^ČT Sport \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
'is_live': True,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Georestricted to Czech Republic',
}, {
'url': 'http://www.ceskatelevize.cz/ivysilani/embed/iFramePlayer.php?hash=d6a3e1370d2e4fa76296b90bad4dfc19673b641e&IDEC=217 562 22150/0004&channelID=1&width=100%25',
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
type_ = None
episode_id = None
playlist = self._parse_json(
self._search_regex(
r'getPlaylistUrl\(\[({.+?})\]', webpage, 'playlist',
default='{}'), playlist_id)
if playlist:
type_ = playlist.get('type')
episode_id = playlist.get('id')
if not type_:
type_ = self._html_search_regex(
r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],',
webpage, 'type')
if not episode_id:
episode_id = self._html_search_regex(
r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],',
webpage, 'episode_id')
data = {
'playlist[0][type]': type_,
'playlist[0][id]': episode_id,
'requestUrl': compat_urllib_parse_urlparse(url).path,
'requestSource': 'iVysilani',
}
entries = []
for user_agent in (None, USER_AGENTS['Safari']):
req = sanitized_Request(
'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
data=urlencode_postdata(data))
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('x-addr', '127.0.0.1')
req.add_header('X-Requested-With', 'XMLHttpRequest')
if user_agent:
req.add_header('User-Agent', user_agent)
req.add_header('Referer', url)
playlistpage = self._download_json(req, playlist_id, fatal=False)
if not playlistpage:
continue
playlist_url = playlistpage['url']
if playlist_url == 'error_region':
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
req = sanitized_Request(compat_urllib_parse_unquote(playlist_url))
req.add_header('Referer', url)
playlist_title = self._og_search_title(webpage, default=None)
playlist_description = self._og_search_description(webpage, default=None)
playlist = self._download_json(req, playlist_id, fatal=False)
if not playlist:
continue
playlist = playlist.get('playlist')
if not isinstance(playlist, list):
continue
playlist_len = len(playlist)
for num, item in enumerate(playlist):
is_live = item.get('type') == 'LIVE'
formats = []
for format_id, stream_url in item.get('streamUrls', {}).items():
if 'drmOnly=true' in stream_url:
continue
if 'playerType=flash' in stream_url:
stream_formats = self._extract_m3u8_formats(
stream_url, playlist_id, 'mp4', 'm3u8_native',
m3u8_id='hls-%s' % format_id, fatal=False)
else:
stream_formats = self._extract_mpd_formats(
stream_url, playlist_id,
mpd_id='dash-%s' % format_id, fatal=False)
# See https://github.com/ytdl-org/youtube-dl/issues/12119#issuecomment-280037031
if format_id == 'audioDescription':
for f in stream_formats:
f['source_preference'] = -10
formats.extend(stream_formats)
if user_agent and len(entries) == playlist_len:
entries[num]['formats'].extend(formats)
continue
item_id = item.get('id') or item['assetId']
title = item['title']
duration = float_or_none(item.get('duration'))
thumbnail = item.get('previewImageUrl')
subtitles = {}
if item.get('type') == 'VOD':
subs = item.get('subtitles')
if subs:
subtitles = self.extract_subtitles(episode_id, subs)
if playlist_len == 1:
final_title = playlist_title or title
if is_live:
final_title = self._live_title(final_title)
else:
final_title = '%s (%s)' % (playlist_title, title)
entries.append({
'id': item_id,
'title': final_title,
'description': playlist_description if playlist_len == 1 else None,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
'is_live': is_live,
})
for e in entries:
self._sort_formats(e['formats'])
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
def _get_subtitles(self, episode_id, subs):
original_subtitles = self._download_webpage(
subs[0]['url'], episode_id, 'Downloading subtitles')
srt_subs = self._fix_subtitles(original_subtitles)
return {
'cs': [{
'ext': 'srt',
'data': srt_subs,
}]
}
@staticmethod
def _fix_subtitles(subtitles):
""" Convert millisecond-based subtitles to SRT """
def _msectotimecode(msec):
""" Helper utility to convert milliseconds to timecode """
components = []
for divider in [1000, 60, 60, 100]:
components.append(msec % divider)
msec //= divider
return '{3:02}:{2:02}:{1:02},{0:03}'.format(*components)
def _fix_subtitle(subtitle):
for line in subtitle.splitlines():
m = re.match(r'^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$', line)
if m:
yield m.group(1)
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
yield '{0} --> {1}'.format(start, stop)
else:
yield line
return '\r\n'.join(_fix_subtitle(subtitles))
class CeskaTelevizePoradyIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ceskatelevize\.cz/porady/(?:[^/?#&]+/)*(?P<id>[^/#?]+)'
_TESTS = [{
# video with 18+ caution trailer
'url': 'http://www.ceskatelevize.cz/porady/10520528904-queer/215562210900007-bogotart/',
'info_dict': {
'id': '215562210900007-bogotart',
'title': 'Queer: Bogotart',
'description': 'Alternativní průvodce současným queer světem',
},
'playlist': [{
'info_dict': {
'id': '61924494876844842',
'ext': 'mp4',
'title': 'Queer: Bogotart (Varování 18+)',
'duration': 10.2,
},
}, {
'info_dict': {
'id': '61924494877068022',
'ext': 'mp4',
'title': 'Queer: Bogotart (Queer)',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 1558.3,
},
}],
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# iframe embed
'url': 'http://www.ceskatelevize.cz/porady/10614999031-neviditelni/21251212048/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data_url = update_url_query(unescapeHTML(self._search_regex(
(r'<span[^>]*\bdata-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?ceskatelevize\.cz/ivysilani/embed/iFramePlayer\.php.*?)\1'),
webpage, 'iframe player url', group='url')), query={
'autoStart': 'true',
})
return self.url_result(data_url, ie=CeskaTelevizeIE.ie_key())
| unlicense | -1,979,965,866,312,919,300 | 35.84083 | 173 | 0.498732 | false |
lsigithub/axxia_yocto_linux_4.1 | scripts/gdb/linux/symbols.py | 367 | 6324 | #
# gdb helper commands and functions for Linux kernel debugging
#
# load kernel and module symbols
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import os
import re
import string
from linux import modules, utils
if hasattr(gdb, 'Breakpoint'):
class LoadModuleBreakpoint(gdb.Breakpoint):
def __init__(self, spec, gdb_command):
super(LoadModuleBreakpoint, self).__init__(spec, internal=True)
self.silent = True
self.gdb_command = gdb_command
def stop(self):
module = gdb.parse_and_eval("mod")
module_name = module['name'].string()
cmd = self.gdb_command
# enforce update if object file is not found
cmd.module_files_updated = False
# Disable pagination while reporting symbol (re-)loading.
# The console input is blocked in this context so that we would
# get stuck waiting for the user to acknowledge paged output.
show_pagination = gdb.execute("show pagination", to_string=True)
pagination = show_pagination.endswith("on.\n")
gdb.execute("set pagination off")
if module_name in cmd.loaded_modules:
gdb.write("refreshing all symbols to reload module "
"'{0}'\n".format(module_name))
cmd.load_all_symbols()
else:
cmd.load_module_symbols(module)
# restore pagination state
gdb.execute("set pagination %s" % ("on" if pagination else "off"))
return False
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
The kernel (vmlinux) is taken from the current working directly. Modules (.ko)
are scanned recursively, starting in the same directory. Optionally, the module
search path can be extended by a space separated list of paths passed to the
lx-symbols command."""
module_paths = []
module_files = []
module_files_updated = False
loaded_modules = []
breakpoint = None
def __init__(self):
super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES,
gdb.COMPLETE_FILENAME)
def _update_module_files(self):
self.module_files = []
for path in self.module_paths:
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".ko"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
module_pattern = ".*/{0}\.ko$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
return name
return None
def _section_arguments(self, module):
try:
sect_attrs = module['sect_attrs'].dereference()
except gdb.error:
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string() : attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
name=section_name, addr=str(address)))
return "".join(args)
def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['module_core']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
if module_file:
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
filename=module_file,
addr=module_addr,
sections=self._section_arguments(module))
gdb.execute(cmdline, to_string=True)
if not module_name in self.loaded_modules:
self.loaded_modules.append(module_name)
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
# Dropping symbols will disable all breakpoints. So save their states
# and restore them afterward.
saved_states = []
if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None:
for bp in gdb.breakpoints():
saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
# drop all current symbols and reload vmlinux
gdb.execute("symbol-file", to_string=True)
gdb.execute("symbol-file vmlinux")
self.loaded_modules = []
module_list = modules.module_list()
if not module_list:
gdb.write("no modules found\n")
else:
[self.load_module_symbols(module) for module in module_list]
for saved_state in saved_states:
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
self.module_paths = arg.split()
self.module_paths.append(os.getcwd())
# enforce update
self.module_files = []
self.module_files_updated = False
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
if not self.breakpoint is None:
self.breakpoint.delete()
self.breakpoint = None
self.breakpoint = LoadModuleBreakpoint(
"kernel/module.c:do_init_module", self)
else:
gdb.write("Note: symbol update on module loading not supported "
"with this gdb version\n")
LxSymbols()
| gpl-2.0 | -3,188,376,766,379,216,400 | 34.728814 | 79 | 0.583017 | false |
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGLContext/shadow/volume.py | 2 | 12772 | """Shadow-volume implementation
A volume is cast by a light from an edgeset, it's
basically the volume of space which is shadowed by
the given edgeset/object.
"""
from OpenGLContext.arrays import *
from OpenGL.GL import *
import weakref
class Volume( object ):
"""A shadow-volume object
This object represents the shadow cast by a single
light and a single occluder of that light. It is
rendered (along with all other volumes for a given
light) into the stencil buffer to determine what
parts of the scene are lit by the light.
XXX doesn't yet handle single-edges for faces or
(more cricitally) clock-wise windings
"""
forwardIndices = ()
sideType = 0 # GL_TRIANGLES or GL_QUADS
backFaces = ()
edgeSet = None # weakref to edge-set
light = None # weakref to light
def __init__( self, edgeSet, sourceVector ):
"""Initialize the shadow volume
edgeSet -- pointer to the edge set from which we
retrieve most of our geometric data
sourceVector -- the homogenous coordinates of the
shadow-casting light.
"""
self.edgeSet = weakref.proxy( edgeSet )
self.sourceVector = sourceVector
self.calculate()
def calculate( self ):
"""Calculate the shadow-volume's shape
Returns segments*2*3 array where each item in the array
has the property that it defines a face of the shadow
volume like so:
A,B = segment (2 3-item arrays)
for point lights (Quads):
Ax,Ay,Az,1.0
Bx,By,Bz,1.0
Bx,By,Bz,0.0
Ax,Ay,Az,0.0
for directional lights:
Ax,Ay,Az,1.0
Bx,By,Bz,1.0
0,0,0,0
Which is fed into the "equation 14" or "equation 15"
of the article. Note that this is pre-calculated to not
require switching calculations on the face when doing
the later transformation to a shadow volume. (i.e. in
the article there are two different cases for
whether the "first" or "second" face is facing the lights,
I've folded them together).
need certain things when we're done:
set of light-facing-faces
set of rearward-facing-faces (projected to infinity)
set of edge-faces (silouhette projected to infinity)
# the first two are currently missing
# should do those as indices into the points array
"""
#sourceVector is the light position, with the fourth-item
#of the vector being 1.0 for point lights and 0.0
#for directional lights
sourceVector = self.sourceVector
positional = sourceVector[-1]
if positional:
# is a positional light
self.sideType = GL_QUADS
else:
self.sideType = GL_TRIANGLES
edges1 = self.singleEdges( sourceVector )
edges2 = self.doubleEdges( sourceVector )
self.shadowCaps( sourceVector )
# now compound the two sources together
# these are all now edges to be turned into
# faces for the sides of the shadow-volume
edges1 = concatenate( (edges1, edges2) )
# calculate the edge-faces here...
if self.sideType == GL_QUADS:
l = array( sourceVector[:3], 'd' )
points = zeros( (len(edges1)*4,4), 'd' )
points[1::4,:3] = edges1[:,1] # B
points[0::4,:3] = edges1[:,0] # A
# A.w and B.w are always one (in this code)
# so we can simplify a few of the equations...
points[3::4,:3] = (
edges1[:,0] * positional # A*l.w
-
l # l*A.w == l* 1.0 == l
)
points[2::4,:3] = (
edges1[:,1] * positional
-
l # B*l.w - l*B.w
)
points[0::4,3] = 1
points[1::4,3] = 1
else: # Triangles
l = - array( sourceVector, 'd' )
points = zeros( (len(edges1)*3,4), 'd' )
points[0::3,:3] = edges1[:,1] # B
points[1::3,:3] = edges1[:,0] # A
points[2::3,:] = l # A*l.w - l*A.w
points[0::3,3] = 1
points[1::3,3] = 1
self.edges = points
def doubleEdges( self, sourceVector ):
"""Calculate double-face-edges for given sourceVector
Returns an Nx2x3 array of line-segment coordinates
"""
doubleEdges = self.edgeSet.doubleEdges
doubleVectors = self.edgeSet.doubleVectors
if not doubleEdges:
return zeros( (0,2,3),'d')
indices = arange(0,len(doubleVectors))
### Calculate the forward and backward-facing triangle-sets...
mults = greater(
dot(doubleVectors, sourceVector ),
0
)
#indices -> only those which are edges
# if one is and one isn't, then it's a silouhette edge
indices = nonzero(
equal(
sum(
mults,
1 # second axis
),
1 # everything that isn't 0 or 2 in this case
)
)[0] # just care about first dimension
# vectors is now just those which are edges...
vectors = take( doubleVectors, indices, 0 )
edges = take( doubleEdges, indices, 0 )
# mults gives a map which filters the doubleIndices value
# mults is now just those edges which are part of the silouhette
mults = take( mults, indices, 0 )
# the set of silouhette vertices where the second face faces...
vectors1 = compress( mults[:,1], edges,0 )
# the set of vertices where the first face faces...
vectors2 = compress( mults[:,0], edges,0 )
# these need to have their coord-order swapped to allow
# for uniform treatment...
a = vectors2[:,1::2][:]
b = vectors2[:,::2][:]
vectors2 = zeros(shape(vectors2),'d')
vectors2[:,1::2] = b
vectors2[:,::2] = a
# the vector-sets are now homogenous, so we concatenate and
# return the result
return concatenate((vectors2,vectors1))
def singleEdges( self, sourceVector ):
"""Calculate single-face-edges for given sourceVector
Returns an Nx2x3 array of line-segment coordinates
"""
# if the face is facing, then is an edge, otherwise not
singleEdges = self.edgeSet.singleEdges
singleVectors = self.edgeSet.singleVectors
if not singleVectors:
return zeros( (0,2,3),'d')
indices = nonzero(
greater(
dot(singleVectors, sourceVector ),
0
)
)
return take(
singleEdges,
indices,
0
)
def shadowCaps( self, sourceVector):
"""Calculate the shadow-volume caps
Forward cap is just indices into the points array
Backward cap requires a new points array
"""
### Calculate the forward/backward face-sets
directions = dot(self.edgeSet.planeEquations, sourceVector)
def expand( forwardIndices ):
"""Expand a set into point-indices from tri-indices"""
forwardIndices = repeat(forwardIndices,3,0)
forwardIndices[1::3] +=1
forwardIndices[2::3] +=2
return forwardIndices
self.forwardIndices = expand(nonzero(greater(directions,0))[0])
# backward is trickier, as we need to project to infinity
# from the light position
if sourceVector[-1]:
backwardIndices = expand(nonzero(less_equal(directions,0))[0])
### Now need to project backward with this equation:
## Vertex4f(V.x*L.w-L.x*V.w, V.y*L.w-L.y*V.w,V.z*L.w-L.z*V.w, 0);
## where V is the given vertex and L is our sourceVector
## and the source V.w is 1.0 (model-space)
## V.x *L.w - L.x,
L = array(sourceVector,'d')
V = take( self.edgeSet.points, backwardIndices,0 )
set = zeros((len(V),4),'d')
set[:,0:3] = (V[:,0:3]*L[-1])-L[0:3]
self.backwardPoints = set
def render( self, mode=None ):
"""Render the shadow-volume
"""
if mode.stencil:
# XXX these shouldn't be here, but we're making sure
# the state really is what we want during testing
if not self.edgeSet.ccw:
glFrontFace( GL_CW )
try:
if __debug__:
if mode.debugShadowNoStencil:
glStencilMask( 0 )
if not mode.debugShadowNoFrontFaces:
# now render front-facing polygons
glStencilOp(GL_KEEP, GL_KEEP, GL_INCR);
glCullFace(GL_BACK);
if __debug__:
if mode.debugShadowVolume:
glColorMask(0,1,0,0)
## as far as I can see, there is no way for either
## the cap or the boot to change anything on this pass,
## so why bother rendering them?
if not mode.debugShadowNoCaps:
self._render_cap()
if not mode.debugShadowNoEdges:
self._render_edges()
if not mode.debugShadowNoBoots:
self._render_boot()
if mode.debugShadowSilouhette:
glColorMask(0,1,1,0)
self._debug_render_silouhette()
if __debug__:
glColorMask(0,0,0,0)
if not mode.debugShadowNoBackFaces:
glStencilOp(GL_KEEP,GL_KEEP,GL_DECR);
glCullFace(GL_FRONT);
if __debug__:
if mode.debugShadowVolume:
glColorMask(1,0,0,0)
if not mode.debugShadowNoCaps:
self._render_cap()
if not mode.debugShadowNoEdges:
self._render_edges()
if not mode.debugShadowNoBoots:
self._render_boot()
finally:
glFrontFace( GL_CCW )
if __debug__:
glColorMask(0,0,0,0);
glStencilMask( ~0 )
def _render_cap( self ):
"""Render the shadow-volume cap (forward-facing faces)"""
if self.forwardIndices is not None:
glVertexPointerd( self.edgeSet.points )
glEnableClientState(GL_VERTEX_ARRAY);
glDrawElementsui(
GL_TRIANGLES,
self.forwardIndices,
)
glDisableClientState(GL_VERTEX_ARRAY);
def _render_boot( self ):
"""Render the shadow-volume boot (backward-facing faces projected)"""
if self.sideType != GL_TRIANGLES and self.backwardPoints is not None:
# if triangles, the volume converges to a point, so there
# can be no back-facing polygons...
glVertexPointerd(self.backwardPoints )
glEnableClientState(GL_VERTEX_ARRAY);
glDrawArrays(GL_TRIANGLES, 0, len(self.backwardPoints))
glDisableClientState(GL_VERTEX_ARRAY);
def _render_edges( self ):
"""Render the shadow-volume edges"""
# ignore mode while building...
if self.edges is not None:
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointerd(self.edges )
assert self.sideType != 0, """%s _render_edges called before sideType determined"""%( self.__class__ )
glDrawArrays( self.sideType, 0, len(self.edges))
glDisableClientState(GL_VERTEX_ARRAY);
def _debug_render_silouhette( self ):
"""Debug render of silouhette as lines with current colour"""
### debug-rendering-mode
## draws edges as blue lines...
from OpenGL.GLUT import glutWireSphere
if self.sideType == GL_TRIANGLES:
step = 3
else:
step = 4
Bs = self.edges[0::step]
As = self.edges[1::step]
glBegin( GL_LINES )
for A,B in map(None, As, Bs):
glColor3f( 0,0,1.0)
glVertex4dv( A )
glColor3f( 0,1.0,.5)
glVertex4dv( B )
glEnd( )
glPushMatrix()
glTranslate( *self.sourceVector[:3])
glutWireSphere( .2,8,8)
glPopMatrix()
| lgpl-3.0 | -8,386,678,043,707,955,000 | 37.472892 | 114 | 0.538522 | false |
teslaji/homebase | venv/HomeBase/lib/python2.7/site-packages/setuptools/command/bdist_rpm.py | 1049 | 1508 | import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
3. Replace dash with underscore in the version numbers for better RPM
compatibility.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command('egg_info')
orig.bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-', '_')
spec = orig.bdist_rpm._make_spec_file(self)
line23 = '%define version ' + version
line24 = '%define version ' + rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23, line24)
for line in spec
]
insert_loc = spec.index(line24) + 1
unmangled_version = "%define unmangled_version " + version
spec.insert(insert_loc, unmangled_version)
return spec
| gpl-3.0 | 3,653,923,585,467,684,400 | 34.069767 | 75 | 0.580902 | false |
JacerOmri/PokemonGo-Bot-Desktop | pywin/Lib/threading.py | 26 | 47132 | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
try:
import thread
except ImportError:
del _sys.modules[__name__]
raise
import warnings
from collections import deque as _deque
from itertools import count as _count
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
# Note regarding PEP 8 compliant aliases
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. While those names are not in any imminent danger of being
# deprecated, starting with Python 2.6, the module now provides a
# PEP 8 compliant alias for any such method name.
# Using the new PEP 8 compliant names also facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'active_count', 'Condition', 'currentThread',
'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# sys.exc_clear is used to work around the fact that except blocks
# don't fully clear the exception until 3.0.
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='threading', message='sys.exc_clear')
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
# Issue #4188: calling current_thread() can incur an infinite
# recursion if it has to create a DummyThread on the fly.
ident = _get_ident()
try:
name = _active[ident].name
except KeyError:
name = "<OS thread %d>" % ident
format = "%s: %s\n" % (name, format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
"""A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
owner = self.__owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self.__count)
def acquire(self, blocking=1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
"""
me = _get_ident()
if self.__owner == me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self.__owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self.__block.acquire()
self.__count = count
self.__owner = owner
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner == _get_ident()
def Condition(*args, **kwargs):
"""Factory function that returns a new condition variable object.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
"""Condition variables allow one or more threads to wait until they are
notified by another thread.
"""
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __enter__(self):
return self.__lock.__enter__()
def __exit__(self, *args):
return self.__lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notifyAll() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self.__waiters))
notify_all = notifyAll
def Semaphore(*args, **kwargs):
"""A factory function that returns a new semaphore.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
"""Semaphores manage a counter representing the number of release() calls
minus the number of acquire() calls, plus an initial value. The acquire()
method blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
"""
rc = False
with self.__cond:
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self.__cond:
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
"""A factory function that returns a new bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""A bounded semaphore checks to make sure its current value doesn't exceed
its initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
with self._Semaphore__cond:
if self._Semaphore__value >= self._initial_value:
raise ValueError("Semaphore released too many times")
self._Semaphore__value += 1
self._Semaphore__cond.notify()
def Event(*args, **kwargs):
"""A factory function that returns a new event.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true.
"""
return _Event(*args, **kwargs)
class _Event(_Verbose):
"""A factory function that returns a new event object. An event manages a
flag that can be set to true with the set() method and reset to false
with the clear() method. The wait() method blocks until the flag is true.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self.__cond.__init__(Lock())
def isSet(self):
'Return true if and only if the internal flag is true.'
return self.__flag
is_set = isSet
def set(self):
"""Set the internal flag to true.
All threads waiting for the flag to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
with self.__cond:
self.__flag = True
self.__cond.notify_all()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
with self.__cond:
self.__flag = False
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
with self.__cond:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
# Helper to generate new thread names
_counter = _count().next
_counter() # Consume 0 so first non-main thread has id 1.
def _newname(template="Thread-%d"):
return template % _counter()
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# Main class for threads
class Thread(_Verbose):
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion.
"""
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
__exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__ident = None
self.__started = Event()
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_Thread__block'): # DummyThread deletes self.__block
self.__block.__init__()
self.__started._reset_internal_locks()
@property
def _block(self):
# used by a unittest
return self.__block
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return current_thread().daemon
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started.is_set():
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status += " daemon"
if self.__ident is not None:
status += " %s" % self.__ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self.__initialized:
raise RuntimeError("thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("threads can only be started once")
if __debug__:
self._note("%s.start(): starting thread", self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self.__bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self.__started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self.__target, self.__args, self.__kwargs
def __bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# __bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# __bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self.__bootstrap_inner()
except:
if self.__daemonic and _sys is None:
return
raise
def _set_ident(self):
self.__ident = _get_ident()
def __bootstrap_inner(self):
try:
self._set_ident()
self.__started.set()
with _active_limbo_lock:
_active[self.__ident] = self
del _limbo[self]
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys and _sys.stderr is not None:
print>>_sys.stderr, ("Exception in thread %s:\n%s" %
(self.name, _format_exc()))
elif self.__stderr is not None:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
print>>self.__stderr, (
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):")
print>>self.__stderr, (
"Traceback (most recent call last):")
while exc_tb:
print>>self.__stderr, (
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
self.__exc_clear()
finally:
with _active_limbo_lock:
self.__stop()
try:
# We don't call self.__delete() because it also
# grabs _active_limbo_lock.
del _active[_get_ident()]
except:
pass
def __stop(self):
# DummyThreads delete self.__block, but they have no waiters to
# notify anyway (join() is forbidden on them).
if not hasattr(self, '_Thread__block'):
return
self.__block.acquire()
self.__stopped = True
self.__block.notify_all()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[_get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
isAlive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if not self.__started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self.__initialized, "Thread.__init__() not called"
return self.__name
@name.setter
def name(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the thread.get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self.__initialized, "Thread.__init__() not called"
return self.__ident
def isAlive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self.__initialized, "Thread.__init__() not called"
return self.__started.is_set() and not self.__stopped
is_alive = isAlive
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread (True) or not (False).
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when no alive non-daemon threads are
left.
"""
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
@daemon.setter
def daemon(self, daemonic):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self.__daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
"""Factory function to create a Timer object.
Timers call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return False
def _exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread.__block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._Thread__block
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[_get_ident()]
except KeyError:
##print "current_thread(): no current thread for", _get_ident()
return _DummyThread()
current_thread = currentThread
def activeCount():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
active_count = activeCount
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return _active.values() + _limbo.values()
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return _active.values() + _limbo.values()
from thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _enumerate():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if hasattr(thread, '_reset_internal_locks'):
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = _get_ident()
thread._Thread__ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._Thread__stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
# Self-test code
def _test():
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = _deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.name, counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print item
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.name = ("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
| mit | -2,244,524,759,658,020,000 | 34.652042 | 99 | 0.586141 | false |
kickstandproject/asterisk-testsuite-temporary | tests/channels/pjsip/publish/asterisk_event_mwi/mwi_sender.py | 5 | 1084 | #!/usr/bin/env python
'''
Copyright (C) 2014, Digium, Inc.
Mark Michelson <[email protected]>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
'''
class MWISender(object):
'''Test module that updates MWI on the sending Asterisk server.
This reads test module configuration from test-config.yaml to determine
what MWIUpdate AMI commands to send in order to complete the test.
'''
def __init__(self, module_config, test_object):
self.config = module_config
test_object.register_ami_observer(self.ami_connect)
def ami_connect(self, ami):
'''Send configured AMI MWIUpdate commands'''
if ami.id == 1:
# ID 1 is the receiving Asterisk server.
return
for msg in self.config['messages']:
ami_msg = {
'Action': 'MWIUpdate',
'Mailbox': self.config['mailbox'],
'NewMessages': msg['new'],
'OldMessages': msg['old'],
}
ami.sendMessage(ami_msg)
| gpl-2.0 | 8,493,740,453,852,712,000 | 30.882353 | 75 | 0.610701 | false |
dreamhost/akanda-rug | akanda/rug/openstack/common/gettextutils.py | 2 | 1560 | # Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from akanda.rug.openstack.common.gettextutils import _
"""
import gettext
t = gettext.translation('openstack-common', 'locale', fallback=True)
def _(msg):
return t.ugettext(msg)
| apache-2.0 | 5,463,567,517,911,340,000 | 30.2 | 78 | 0.737179 | false |
ohanar/PolyBoRi | pyroot/polybori/memusage.py | 1 | 1523 | import os
from sys import maxint
_proc_status = '/proc/%d/status' % os.getpid()
#_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
# 'KB': 1024.0, 'MB': 1024.0*1024.0}
_scale = {'kB': 1, 'mB': 1024, 'gB': 1024 * 1024,
'KB': 1, 'MB': 1024, 'GB': 1024 * 1024}
def _VmB(VmKey):
'''Private.
'''
global _proc_status, _scale
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except:
return float('nan') # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
if len(v) < 3:
return float('nan') # invalid format?
# convert Vm value to bytes
# return float(v[1]) * _scale[v[2]]
return int(v[1]) * _scale[v[2]]
def memory(since=0):
'''Return memory usage in kilobytes.
'''
return _VmB('VmSize:') - since
def resident(since=0):
'''Return resident memory usage in kilobytes.
'''
return _VmB('VmRSS:') - since
def memorypeak(since=0):
'''Return memory usage peak in kilobytes.
'''
try:
return _VmB('VmPeak:') - since
except:
return float('nan') # old Linux?
def residentpeak(since=0):
'''Return resident memory usage peak in kilobytes.
'''
try:
return _VmB('VmHWM:') - since
except:
return float('nan') # old Linux?
def stacksize(since=0):
'''Return stack size in kilobytes.
'''
return _VmB('VmStk:') - since
| gpl-2.0 | 3,194,151,537,511,053,000 | 22.430769 | 54 | 0.544977 | false |
dnozay/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/localflavor/kw/forms.py | 310 | 1988 | """
Kuwait-specific Form helpers
"""
import re
from datetime import date
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField
from django.utils.translation import gettext as _
id_re = re.compile(r'^(?P<initial>\d{1})(?P<yy>\d\d)(?P<mm>\d\d)(?P<dd>\d\d)(?P<mid>\d{4})(?P<checksum>\d{1})')
class KWCivilIDNumberField(Field):
"""
Kuwaiti Civil ID numbers are 12 digits, second to seventh digits
represents the person's birthdate.
Checks the following rules to determine the validty of the number:
* The number consist of 12 digits.
* The birthdate of the person is a valid date.
* The calculated checksum equals to the last digit of the Civil ID.
"""
default_error_messages = {
'invalid': _('Enter a valid Kuwaiti Civil ID number'),
}
def has_valid_checksum(self, value):
weight = (2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2)
calculated_checksum = 0
for i in range(11):
calculated_checksum += int(value[i]) * weight[i]
remainder = calculated_checksum % 11
checkdigit = 11 - remainder
if checkdigit != int(value[11]):
return False
return True
def clean(self, value):
super(KWCivilIDNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not re.match(r'^\d{12}$', value):
raise ValidationError(self.error_messages['invalid'])
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
gd = match.groupdict()
try:
d = date(int(gd['yy']), int(gd['mm']), int(gd['dd']))
except ValueError:
raise ValidationError(self.error_messages['invalid'])
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['invalid'])
return value
| gpl-3.0 | 2,829,958,518,768,291,300 | 30.555556 | 111 | 0.617706 | false |
ecino/compassion-modules | mobile_app_connector/models/compassion_child_pictures.py | 2 | 1958 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from odoo import models, api
from ..mappings.compassion_child_pictures_mapping \
import MobileChildPicturesMapping
logger = logging.getLogger(__name__)
class CompassionChildPictures(models.Model):
""" A sponsored child """
_inherit = 'compassion.child.pictures'
@property
def image_url_compassion(self, type='fullshot'):
if type.lower() not in ['headshot', 'fullshot']:
raise ValueError("Expected argument 'type' to be 'headshot' or 'fullshot'")
base_url = self.env['ir.config_parameter'].get_param('web.external.url')
endpoint = base_url + "/web/image/compassion.child.pictures"
return "{}/{}/{}/{}_{}.jpg".format(endpoint,
self.id,
type,
self.date,
self.child_id.id)
@api.multi
def get_app_json(self, multi=False):
"""
Called by HUB when data is needed for a tile
:param multi: used to change the wrapper if needed
:return: dictionary with JSON data of the children
"""
if not self:
return {}
mapping = MobileChildPicturesMapping(self.env)
# wrapper = 'Images' if multi else 'Images'
if len(self) == 1:
data = [mapping.get_connect_data(self)]
else:
data = []
for child in self:
data.append(mapping.get_connect_data(child))
return data
| agpl-3.0 | -8,698,730,183,214,458,000 | 34.6 | 87 | 0.515322 | false |
dhp-denero/LibrERP | massive_category_change/wizard/wizard.py | 2 | 1662 | # -*- coding: utf-8 -*-
# Copyright 2012 Francesco OpenCode Apruzzese <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from osv import fields,osv
from tools.translate import _
class wzd_massive_category_change(osv.osv_memory):
_name = "wzd.massive_category_change"
_columns = {
'name' : fields.many2one('product.category', 'Category'),
}
def change(self, cr, uid, ids, context={}):
wzd = self.browse(cr, uid, ids[0], context)
res={}
categ = wzd.name
res['categ_id'] = categ.id
if categ.provision_type:
res['type'] = categ.provision_type
if categ.procure_method:
res['procure_method'] = categ.procure_method
if categ.supply_method:
res['supply_method'] = categ.supply_method
self.pool.get('product.product').write(cr, uid, context['active_ids'], res)
return {'type': 'ir.actions.act_window_close'}
wzd_massive_category_change()
| agpl-3.0 | -3,864,974,103,749,511,700 | 34.361702 | 81 | 0.679302 | false |
kustodian/ansible | lib/ansible/modules/network/avi/avi_cloudconnectoruser.py | 28 | 4856 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudconnectoruser
author: Gaurav Rastogi (@grastogi23) <[email protected]>
short_description: Module for setup of CloudConnectorUser Avi RESTful Object
description:
- This module is used to configure CloudConnectorUser object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
azure_serviceprincipal:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
azure_userpass:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
gcp_credentials:
description:
- Credentials for google cloud platform.
- Field introduced in 18.2.1.
version_added: "2.9"
name:
description:
- Name of the object.
required: true
oci_credentials:
description:
- Credentials for oracle cloud infrastructure.
- Field introduced in 18.2.1,18.1.3.
version_added: "2.9"
private_key:
description:
- Private_key of cloudconnectoruser.
public_key:
description:
- Public_key of cloudconnectoruser.
tenant_ref:
description:
- It is a reference to an object of type tenant.
tencent_credentials:
description:
- Credentials for tencent cloud.
- Field introduced in 18.2.3.
version_added: "2.9"
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a Cloud connector user that is used for integration into cloud platforms
avi_cloudconnectoruser:
controller: '{{ controller }}'
name: root
password: '{{ password }}'
private_key: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----'
public_key: 'ssh-rsa ...'
tenant_ref: admin
username: '{{ username }}'
"""
RETURN = '''
obj:
description: CloudConnectorUser (api/cloudconnectoruser) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
azure_serviceprincipal=dict(type='dict',),
azure_userpass=dict(type='dict',),
gcp_credentials=dict(type='dict',),
name=dict(type='str', required=True),
oci_credentials=dict(type='dict',),
private_key=dict(type='str', no_log=True,),
public_key=dict(type='str',),
tenant_ref=dict(type='str',),
tencent_credentials=dict(type='dict',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudconnectoruser',
set(['private_key']))
if __name__ == '__main__':
main()
| gpl-3.0 | -6,427,750,039,231,484,000 | 31.15894 | 92 | 0.598641 | false |
sbkolate/sap_frappe_v6 | frappe/custom/doctype/custom_field/custom_field.py | 3 | 4659 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr
from frappe import _
import json
from frappe.model.document import Document
class CustomField(Document):
def autoname(self):
self.set_fieldname()
self.name = self.dt + "-" + self.fieldname
def set_fieldname(self):
if not self.fieldname:
if not self.label:
frappe.throw(_("Label is mandatory"))
# remove special characters from fieldname
self.fieldname = filter(lambda x: x.isdigit() or x.isalpha() or '_',
cstr(self.label).lower().replace(' ','_'))
# fieldnames should be lowercase
self.fieldname = self.fieldname.lower()
def validate(self):
if not self.idx:
self.idx = len(frappe.get_meta(self.dt).get("fields")) + 1
if not self.fieldname:
frappe.throw(_("Fieldname not set for Custom Field"))
def on_update(self):
frappe.clear_cache(doctype=self.dt)
if not self.flags.ignore_validate:
# validate field
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
validate_fields_for_doctype(self.dt)
# create property setter to emulate insert after
if self.insert_after:
self.set_property_setter_for_idx()
# update the schema
# if not frappe.flags.in_test:
from frappe.model.db_schema import updatedb
updatedb(self.dt)
def on_trash(self):
# delete property setter entries
frappe.db.sql("""\
DELETE FROM `tabProperty Setter`
WHERE doc_type = %s
AND field_name = %s""",
(self.dt, self.fieldname))
# Remove custom field from _idx
existing_idx_property_setter = frappe.db.get_value("Property Setter",
{"doc_type": self.dt, "property": "_idx"}, ["name", "value"], as_dict=1)
if existing_idx_property_setter:
_idx = json.loads(existing_idx_property_setter.value)
if self.fieldname in _idx:
_idx.remove(self.fieldname)
frappe.db.set_value("Property Setter", existing_idx_property_setter.name,
"value", json.dumps(_idx))
frappe.clear_cache(doctype=self.dt)
def set_property_setter_for_idx(self):
dt_meta = frappe.get_meta(self.dt)
self.validate_insert_after(dt_meta)
_idx = []
existing_property_setter = frappe.db.get_value("Property Setter",
{"doc_type": self.dt, "property": "_idx"}, ["name", "value"], as_dict=1)
# if no existsing property setter, build based on meta
if not existing_property_setter:
for df in sorted(dt_meta.get("fields"), key=lambda x: x.idx):
if df.fieldname != self.fieldname:
_idx.append(df.fieldname)
else:
_idx = json.loads(existing_property_setter.value)
# Delete existing property setter if field is not there
if self.fieldname not in _idx:
frappe.delete_doc("Property Setter", existing_property_setter.name)
existing_property_setter = None
# Create new peroperty setter if order changed
if _idx and not existing_property_setter:
field_idx = (_idx.index(self.insert_after) + 1) if (self.insert_after in _idx) else len(_idx)
_idx.insert(field_idx, self.fieldname)
frappe.make_property_setter({
"doctype":self.dt,
"doctype_or_field": "DocType",
"property": "_idx",
"value": json.dumps(_idx),
"property_type": "Text"
}, validate_fields_for_doctype=False)
def validate_insert_after(self, meta):
if not meta.get_field(self.insert_after):
frappe.throw(_("Insert After field '{0}' mentioned in Custom Field '{1}', does not exist")
.format(self.insert_after, self.label), frappe.DoesNotExistError)
if self.fieldname == self.insert_after:
frappe.throw(_("Insert After cannot be set as {0}").format(meta.get_label(self.insert_after)))
@frappe.whitelist()
def get_fields_label(doctype=None):
return [{"value": df.fieldname or "", "label": _(df.label or "")} for df in frappe.get_meta(doctype).get("fields")]
def create_custom_field_if_values_exist(doctype, df):
df = frappe._dict(df)
if df.fieldname in frappe.db.get_table_columns(doctype) and \
frappe.db.sql("""select count(*) from `tab{doctype}`
where ifnull({fieldname},'')!=''""".format(doctype=doctype, fieldname=df.fieldname))[0][0]:
create_custom_field(doctype, df)
def create_custom_field(doctype, df):
if not frappe.db.get_value("Custom Field", {"dt": doctype, "fieldname": df.fieldname}):
frappe.get_doc({
"doctype":"Custom Field",
"dt": doctype,
"permlevel": df.get("permlevel") or 0,
"label": df.get("label"),
"fieldname": df.get("fieldname"),
"fieldtype": df.get("fieldtype"),
"options": df.get("options"),
"insert_after": df.get("insert_after"),
"print_hide": df.get("print_hide")
}).insert()
| mit | -1,475,740,865,231,869,000 | 32.517986 | 116 | 0.687916 | false |
firmlyjin/brython | www/tests/unittests/test/test_wsgiref.py | 25 | 21880 | from __future__ import nested_scopes # Backward compat for 2.1
from unittest import TestCase
from wsgiref.util import setup_testing_defaults
from wsgiref.headers import Headers
from wsgiref.handlers import BaseHandler, BaseCGIHandler
from wsgiref import util
from wsgiref.validate import validator
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler, demo_app
from wsgiref.simple_server import make_server
from io import StringIO, BytesIO, BufferedReader
from socketserver import BaseServer
from platform import python_implementation
import os
import re
import sys
from test import support
class MockServer(WSGIServer):
"""Non-socket HTTP server"""
def __init__(self, server_address, RequestHandlerClass):
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.server_bind()
def server_bind(self):
host, port = self.server_address
self.server_name = host
self.server_port = port
self.setup_environ()
class MockHandler(WSGIRequestHandler):
"""Non-socket HTTP handler"""
def setup(self):
self.connection = self.request
self.rfile, self.wfile = self.connection
def finish(self):
pass
def hello_app(environ,start_response):
start_response("200 OK", [
('Content-Type','text/plain'),
('Date','Mon, 05 Jun 2006 18:49:54 GMT')
])
return [b"Hello, world!"]
def run_amock(app=hello_app, data=b"GET / HTTP/1.0\n\n"):
server = make_server("", 80, app, MockServer, MockHandler)
inp = BufferedReader(BytesIO(data))
out = BytesIO()
olderr = sys.stderr
err = sys.stderr = StringIO()
try:
server.finish_request((inp, out), ("127.0.0.1",8888))
finally:
sys.stderr = olderr
return out.getvalue(), err.getvalue()
def compare_generic_iter(make_it,match):
"""Utility to compare a generic 2.1/2.2+ iterator with an iterable
If running under Python 2.2+, this tests the iterator using iter()/next(),
as well as __getitem__. 'make_it' must be a function returning a fresh
iterator to be tested (since this may test the iterator twice)."""
it = make_it()
n = 0
for item in match:
if not it[n]==item: raise AssertionError
n+=1
try:
it[n]
except IndexError:
pass
else:
raise AssertionError("Too many items from __getitem__",it)
try:
iter, StopIteration
except NameError:
pass
else:
# Only test iter mode under 2.2+
it = make_it()
if not iter(it) is it: raise AssertionError
for item in match:
if not next(it) == item: raise AssertionError
try:
next(it)
except StopIteration:
pass
else:
raise AssertionError("Too many items from .__next__()", it)
class IntegrationTests(TestCase):
def check_hello(self, out, has_length=True):
pyver = (python_implementation() + "/" +
sys.version.split()[0])
self.assertEqual(out,
("HTTP/1.0 200 OK\r\n"
"Server: WSGIServer/0.2 " + pyver +"\r\n"
"Content-Type: text/plain\r\n"
"Date: Mon, 05 Jun 2006 18:49:54 GMT\r\n" +
(has_length and "Content-Length: 13\r\n" or "") +
"\r\n"
"Hello, world!").encode("iso-8859-1")
)
def test_plain_hello(self):
out, err = run_amock()
self.check_hello(out)
def test_validated_hello(self):
out, err = run_amock(validator(hello_app))
# the middleware doesn't support len(), so content-length isn't there
self.check_hello(out, has_length=False)
def test_simple_validation_error(self):
def bad_app(environ,start_response):
start_response("200 OK", ('Content-Type','text/plain'))
return ["Hello, world!"]
out, err = run_amock(validator(bad_app))
self.assertTrue(out.endswith(
b"A server error occurred. Please contact the administrator."
))
self.assertEqual(
err.splitlines()[-2],
"AssertionError: Headers (('Content-Type', 'text/plain')) must"
" be of type list: <class 'tuple'>"
)
def test_wsgi_input(self):
def bad_app(e,s):
e["wsgi.input"].read()
s("200 OK", [("Content-Type", "text/plain; charset=utf-8")])
return [b"data"]
out, err = run_amock(validator(bad_app))
self.assertTrue(out.endswith(
b"A server error occurred. Please contact the administrator."
))
self.assertEqual(
err.splitlines()[-2], "AssertionError"
)
def test_bytes_validation(self):
def app(e, s):
s("200 OK", [
("Content-Type", "text/plain; charset=utf-8"),
("Date", "Wed, 24 Dec 2008 13:29:32 GMT"),
])
return [b"data"]
out, err = run_amock(validator(app))
self.assertTrue(err.endswith('"GET / HTTP/1.0" 200 4\n'))
ver = sys.version.split()[0].encode('ascii')
py = python_implementation().encode('ascii')
pyver = py + b"/" + ver
self.assertEqual(
b"HTTP/1.0 200 OK\r\n"
b"Server: WSGIServer/0.2 "+ pyver + b"\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Date: Wed, 24 Dec 2008 13:29:32 GMT\r\n"
b"\r\n"
b"data",
out)
class UtilityTests(TestCase):
def checkShift(self,sn_in,pi_in,part,sn_out,pi_out):
env = {'SCRIPT_NAME':sn_in,'PATH_INFO':pi_in}
util.setup_testing_defaults(env)
self.assertEqual(util.shift_path_info(env),part)
self.assertEqual(env['PATH_INFO'],pi_out)
self.assertEqual(env['SCRIPT_NAME'],sn_out)
return env
def checkDefault(self, key, value, alt=None):
# Check defaulting when empty
env = {}
util.setup_testing_defaults(env)
if isinstance(value, StringIO):
self.assertIsInstance(env[key], StringIO)
elif isinstance(value,BytesIO):
self.assertIsInstance(env[key],BytesIO)
else:
self.assertEqual(env[key], value)
# Check existing value
env = {key:alt}
util.setup_testing_defaults(env)
self.assertTrue(env[key] is alt)
def checkCrossDefault(self,key,value,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(kw[key],value)
def checkAppURI(self,uri,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(util.application_uri(kw),uri)
def checkReqURI(self,uri,query=1,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(util.request_uri(kw,query),uri)
def checkFW(self,text,size,match):
def make_it(text=text,size=size):
return util.FileWrapper(StringIO(text),size)
compare_generic_iter(make_it,match)
it = make_it()
self.assertFalse(it.filelike.closed)
for item in it:
pass
self.assertFalse(it.filelike.closed)
it.close()
self.assertTrue(it.filelike.closed)
def testSimpleShifts(self):
self.checkShift('','/', '', '/', '')
self.checkShift('','/x', 'x', '/x', '')
self.checkShift('/','', None, '/', '')
self.checkShift('/a','/x/y', 'x', '/a/x', '/y')
self.checkShift('/a','/x/', 'x', '/a/x', '/')
def testNormalizedShifts(self):
self.checkShift('/a/b', '/../y', '..', '/a', '/y')
self.checkShift('', '/../y', '..', '', '/y')
self.checkShift('/a/b', '//y', 'y', '/a/b/y', '')
self.checkShift('/a/b', '//y/', 'y', '/a/b/y', '/')
self.checkShift('/a/b', '/./y', 'y', '/a/b/y', '')
self.checkShift('/a/b', '/./y/', 'y', '/a/b/y', '/')
self.checkShift('/a/b', '///./..//y/.//', '..', '/a', '/y/')
self.checkShift('/a/b', '///', '', '/a/b/', '')
self.checkShift('/a/b', '/.//', '', '/a/b/', '')
self.checkShift('/a/b', '/x//', 'x', '/a/b/x', '/')
self.checkShift('/a/b', '/.', None, '/a/b', '')
def testDefaults(self):
for key, value in [
('SERVER_NAME','127.0.0.1'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL','HTTP/1.0'),
('HTTP_HOST','127.0.0.1'),
('REQUEST_METHOD','GET'),
('SCRIPT_NAME',''),
('PATH_INFO','/'),
('wsgi.version', (1,0)),
('wsgi.run_once', 0),
('wsgi.multithread', 0),
('wsgi.multiprocess', 0),
('wsgi.input', BytesIO()),
('wsgi.errors', StringIO()),
('wsgi.url_scheme','http'),
]:
self.checkDefault(key,value)
def testCrossDefaults(self):
self.checkCrossDefault('HTTP_HOST',"foo.bar",SERVER_NAME="foo.bar")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="on")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="1")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="yes")
self.checkCrossDefault('wsgi.url_scheme',"http",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"80",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"443",HTTPS="on")
def testGuessScheme(self):
self.assertEqual(util.guess_scheme({}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"foo"}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"on"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"yes"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"1"}), "https")
def testAppURIs(self):
self.checkAppURI("http://127.0.0.1/")
self.checkAppURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
self.checkAppURI("http://127.0.0.1/sp%C3%A4m", SCRIPT_NAME="/späm")
self.checkAppURI("http://spam.example.com:2071/",
HTTP_HOST="spam.example.com:2071", SERVER_PORT="2071")
self.checkAppURI("http://spam.example.com/",
SERVER_NAME="spam.example.com")
self.checkAppURI("http://127.0.0.1/",
HTTP_HOST="127.0.0.1", SERVER_NAME="spam.example.com")
self.checkAppURI("https://127.0.0.1/", HTTPS="on")
self.checkAppURI("http://127.0.0.1:8000/", SERVER_PORT="8000",
HTTP_HOST=None)
def testReqURIs(self):
self.checkReqURI("http://127.0.0.1/")
self.checkReqURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
self.checkReqURI("http://127.0.0.1/sp%C3%A4m", SCRIPT_NAME="/späm")
self.checkReqURI("http://127.0.0.1/spammity/spam",
SCRIPT_NAME="/spammity", PATH_INFO="/spam")
self.checkReqURI("http://127.0.0.1/spammity/spam;ham",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;ham")
self.checkReqURI("http://127.0.0.1/spammity/spam;cookie=1234,5678",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;cookie=1234,5678")
self.checkReqURI("http://127.0.0.1/spammity/spam?say=ni",
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
self.checkReqURI("http://127.0.0.1/spammity/spam", 0,
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
def testFileWrapper(self):
self.checkFW("xyz"*50, 120, ["xyz"*40,"xyz"*10])
def testHopByHop(self):
for hop in (
"Connection Keep-Alive Proxy-Authenticate Proxy-Authorization "
"TE Trailers Transfer-Encoding Upgrade"
).split():
for alt in hop, hop.title(), hop.upper(), hop.lower():
self.assertTrue(util.is_hop_by_hop(alt))
# Not comprehensive, just a few random header names
for hop in (
"Accept Cache-Control Date Pragma Trailer Via Warning"
).split():
for alt in hop, hop.title(), hop.upper(), hop.lower():
self.assertFalse(util.is_hop_by_hop(alt))
class HeaderTests(TestCase):
def testMappingInterface(self):
test = [('x','y')]
self.assertEqual(len(Headers([])),0)
self.assertEqual(len(Headers(test[:])),1)
self.assertEqual(Headers(test[:]).keys(), ['x'])
self.assertEqual(Headers(test[:]).values(), ['y'])
self.assertEqual(Headers(test[:]).items(), test)
self.assertFalse(Headers(test).items() is test) # must be copy!
h=Headers([])
del h['foo'] # should not raise an error
h['Foo'] = 'bar'
for m in h.__contains__, h.get, h.get_all, h.__getitem__:
self.assertTrue(m('foo'))
self.assertTrue(m('Foo'))
self.assertTrue(m('FOO'))
self.assertFalse(m('bar'))
self.assertEqual(h['foo'],'bar')
h['foo'] = 'baz'
self.assertEqual(h['FOO'],'baz')
self.assertEqual(h.get_all('foo'),['baz'])
self.assertEqual(h.get("foo","whee"), "baz")
self.assertEqual(h.get("zoo","whee"), "whee")
self.assertEqual(h.setdefault("foo","whee"), "baz")
self.assertEqual(h.setdefault("zoo","whee"), "whee")
self.assertEqual(h["foo"],"baz")
self.assertEqual(h["zoo"],"whee")
def testRequireList(self):
self.assertRaises(TypeError, Headers, "foo")
def testExtras(self):
h = Headers([])
self.assertEqual(str(h),'\r\n')
h.add_header('foo','bar',baz="spam")
self.assertEqual(h['foo'], 'bar; baz="spam"')
self.assertEqual(str(h),'foo: bar; baz="spam"\r\n\r\n')
h.add_header('Foo','bar',cheese=None)
self.assertEqual(h.get_all('foo'),
['bar; baz="spam"', 'bar; cheese'])
self.assertEqual(str(h),
'foo: bar; baz="spam"\r\n'
'Foo: bar; cheese\r\n'
'\r\n'
)
class ErrorHandler(BaseCGIHandler):
"""Simple handler subclass for testing BaseHandler"""
# BaseHandler records the OS environment at import time, but envvars
# might have been changed later by other tests, which trips up
# HandlerTests.testEnviron().
os_environ = dict(os.environ.items())
def __init__(self,**kw):
setup_testing_defaults(kw)
BaseCGIHandler.__init__(
self, BytesIO(), BytesIO(), StringIO(), kw,
multithread=True, multiprocess=True
)
class TestHandler(ErrorHandler):
"""Simple handler subclass for testing BaseHandler, w/error passthru"""
def handle_error(self):
raise # for testing, we want to see what's happening
class HandlerTests(TestCase):
def checkEnvironAttrs(self, handler):
env = handler.environ
for attr in [
'version','multithread','multiprocess','run_once','file_wrapper'
]:
if attr=='file_wrapper' and handler.wsgi_file_wrapper is None:
continue
self.assertEqual(getattr(handler,'wsgi_'+attr),env['wsgi.'+attr])
def checkOSEnviron(self,handler):
empty = {}; setup_testing_defaults(empty)
env = handler.environ
from os import environ
for k,v in environ.items():
if k not in empty:
self.assertEqual(env[k],v)
for k,v in empty.items():
self.assertIn(k, env)
def testEnviron(self):
h = TestHandler(X="Y")
h.setup_environ()
self.checkEnvironAttrs(h)
self.checkOSEnviron(h)
self.assertEqual(h.environ["X"],"Y")
def testCGIEnviron(self):
h = BaseCGIHandler(None,None,None,{})
h.setup_environ()
for key in 'wsgi.url_scheme', 'wsgi.input', 'wsgi.errors':
self.assertIn(key, h.environ)
def testScheme(self):
h=TestHandler(HTTPS="on"); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'https')
h=TestHandler(); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'http')
def testAbstractMethods(self):
h = BaseHandler()
for name in [
'_flush','get_stdin','get_stderr','add_cgi_vars'
]:
self.assertRaises(NotImplementedError, getattr(h,name))
self.assertRaises(NotImplementedError, h._write, "test")
def testContentLength(self):
# Demo one reason iteration is better than write()... ;)
def trivial_app1(e,s):
s('200 OK',[])
return [e['wsgi.url_scheme'].encode('iso-8859-1')]
def trivial_app2(e,s):
s('200 OK',[])(e['wsgi.url_scheme'].encode('iso-8859-1'))
return []
def trivial_app3(e,s):
s('200 OK',[])
return ['\u0442\u0435\u0441\u0442'.encode("utf-8")]
def trivial_app4(e,s):
# Simulate a response to a HEAD request
s('200 OK',[('Content-Length', '12345')])
return []
h = TestHandler()
h.run(trivial_app1)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"Content-Length: 4\r\n"
"\r\n"
"http").encode("iso-8859-1"))
h = TestHandler()
h.run(trivial_app2)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"\r\n"
"http").encode("iso-8859-1"))
h = TestHandler()
h.run(trivial_app3)
self.assertEqual(h.stdout.getvalue(),
b'Status: 200 OK\r\n'
b'Content-Length: 8\r\n'
b'\r\n'
b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82')
h = TestHandler()
h.run(trivial_app4)
self.assertEqual(h.stdout.getvalue(),
b'Status: 200 OK\r\n'
b'Content-Length: 12345\r\n'
b'\r\n')
def testBasicErrorOutput(self):
def non_error_app(e,s):
s('200 OK',[])
return []
def error_app(e,s):
raise AssertionError("This should be caught by handler")
h = ErrorHandler()
h.run(non_error_app)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"Content-Length: 0\r\n"
"\r\n").encode("iso-8859-1"))
self.assertEqual(h.stderr.getvalue(),"")
h = ErrorHandler()
h.run(error_app)
self.assertEqual(h.stdout.getvalue(),
("Status: %s\r\n"
"Content-Type: text/plain\r\n"
"Content-Length: %d\r\n"
"\r\n" % (h.error_status,len(h.error_body))).encode('iso-8859-1')
+ h.error_body)
self.assertIn("AssertionError", h.stderr.getvalue())
def testErrorAfterOutput(self):
MSG = b"Some output has been sent"
def error_app(e,s):
s("200 OK",[])(MSG)
raise AssertionError("This should be caught by handler")
h = ErrorHandler()
h.run(error_app)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"\r\n".encode("iso-8859-1")+MSG))
self.assertIn("AssertionError", h.stderr.getvalue())
def testHeaderFormats(self):
def non_error_app(e,s):
s('200 OK',[])
return []
stdpat = (
r"HTTP/%s 200 OK\r\n"
r"Date: \w{3}, [ 0123]\d \w{3} \d{4} \d\d:\d\d:\d\d GMT\r\n"
r"%s" r"Content-Length: 0\r\n" r"\r\n"
)
shortpat = (
"Status: 200 OK\r\n" "Content-Length: 0\r\n" "\r\n"
).encode("iso-8859-1")
for ssw in "FooBar/1.0", None:
sw = ssw and "Server: %s\r\n" % ssw or ""
for version in "1.0", "1.1":
for proto in "HTTP/0.9", "HTTP/1.0", "HTTP/1.1":
h = TestHandler(SERVER_PROTOCOL=proto)
h.origin_server = False
h.http_version = version
h.server_software = ssw
h.run(non_error_app)
self.assertEqual(shortpat,h.stdout.getvalue())
h = TestHandler(SERVER_PROTOCOL=proto)
h.origin_server = True
h.http_version = version
h.server_software = ssw
h.run(non_error_app)
if proto=="HTTP/0.9":
self.assertEqual(h.stdout.getvalue(),b"")
else:
self.assertTrue(
re.match((stdpat%(version,sw)).encode("iso-8859-1"),
h.stdout.getvalue()),
((stdpat%(version,sw)).encode("iso-8859-1"),
h.stdout.getvalue())
)
def testBytesData(self):
def app(e, s):
s("200 OK", [
("Content-Type", "text/plain; charset=utf-8"),
])
return [b"data"]
h = TestHandler()
h.run(app)
self.assertEqual(b"Status: 200 OK\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 4\r\n"
b"\r\n"
b"data",
h.stdout.getvalue())
def testCloseOnError(self):
side_effects = {'close_called': False}
MSG = b"Some output has been sent"
def error_app(e,s):
s("200 OK",[])(MSG)
class CrashyIterable(object):
def __iter__(self):
while True:
yield b'blah'
raise AssertionError("This should be caught by handler")
def close(self):
side_effects['close_called'] = True
return CrashyIterable()
h = ErrorHandler()
h.run(error_app)
self.assertEqual(side_effects['close_called'], True)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| bsd-3-clause | -107,042,960,670,653,100 | 33.671949 | 80 | 0.543605 | false |
ammaraskar/gunicorn | gunicorn/pidfile.py | 31 | 2291 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import with_statement
import errno
import os
import tempfile
class Pidfile(object):
"""\
Manage a PID file. If a specific name is provided
it and '"%s.oldpid" % name' will be used. Otherwise
we create a temp file using os.mkstemp.
"""
def __init__(self, fname):
self.fname = fname
self.pid = None
def create(self, pid):
oldpid = self.validate()
if oldpid:
if oldpid == os.getpid():
return
raise RuntimeError("Already running on PID %s " \
"(or pid file '%s' is stale)" % (os.getpid(), self.fname))
self.pid = pid
# Write pidfile
fdir = os.path.dirname(self.fname)
if fdir and not os.path.isdir(fdir):
raise RuntimeError("%s doesn't exist. Can't create pidfile." % fdir)
fd, fname = tempfile.mkstemp(dir=fdir)
os.write(fd, ("%s\n" % self.pid).encode('utf-8'))
if self.fname:
os.rename(fname, self.fname)
else:
self.fname = fname
os.close(fd)
# set permissions to -rw-r--r--
os.chmod(self.fname, 420)
def rename(self, path):
self.unlink()
self.fname = path
self.create(self.pid)
def unlink(self):
""" delete pidfile"""
try:
with open(self.fname, "r") as f:
pid1 = int(f.read() or 0)
if pid1 == self.pid:
os.unlink(self.fname)
except:
pass
def validate(self):
""" Validate pidfile and make it stale if needed"""
if not self.fname:
return
try:
with open(self.fname, "r") as f:
wpid = int(f.read() or 0)
if wpid <= 0:
return
try:
os.kill(wpid, 0)
return wpid
except OSError as e:
if e.args[0] == errno.ESRCH:
return
raise
except IOError as e:
if e.args[0] == errno.ENOENT:
return
raise
| mit | -8,384,642,754,495,060,000 | 25.639535 | 80 | 0.494544 | false |
tweemeterjop/thug | thug/ActiveX/modules/ZenturiProgramCheckerAttack.py | 1 | 1826 |
import logging
log = logging.getLogger("Thug")
def DownloadFile(self, *arg):
log.ThugLogging.add_behavior_warn('[ZenturiProgramChecker ActiveX] Attack in DownloadFile function')
log.ThugLogging.add_behavior_warn('[ZenturiProgramChecker ActiveX] Downloading from %s' % (arg[0], ))
log.ThugLogging.add_behavior_warn("[ZenturiProgramChecker ActiveX] Saving downloaded file as: %s" % (arg[1], ))
log.ThugLogging.log_exploit_event(self._window.url,
"ZenturiProgramChecker ActiveX",
"DownloadFile function",
forward = False,
data = {
"url" : arg[0],
"filename": arg[1]
}
)
try:
self._window._navigator.fetch(arg[0], redirect_type = "ZenturiProgramChecker Exploit")
except: # pylint:disable=bare-except
log.ThugLogging.add_behavior_warn('[ZenturiProgramChecker ActiveX] Fetch failed')
def DebugMsgLog(self, *arg):
log.ThugLogging.add_behavior_warn('[ZenturiProgramChecker ActiveX] Attack in DebugMsgLog function')
log.ThugLogging.log_exploit_event(self._window.url,
"ZenturiProgramChecker ActiveX",
"Attack in DebugMsgLog function")
def NavigateUrl(self, *arg):
log.ThugLogging.add_behavior_warn('[ZenturiProgramChecker ActiveX] Attack in NavigateUrl function')
log.ThugLogging.log_exploit_event(self._window.url,
"ZenturiProgramChecker ActiveX",
"Attack in NavigateUrl function")
| gpl-2.0 | -1,450,692,979,710,471,200 | 47.052632 | 115 | 0.548193 | false |
herow/planning_qgis | python/plugins/processing/algs/qgis/PointsToPaths.py | 8 | 5638 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PointsToPaths.py
---------------------
Date : April 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'April 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from datetime import datetime
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsFeature, QgsFields, QgsField, QgsGeometry, QgsDistanceArea
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputVector
from processing.core.outputs import OutputDirectory
from processing.tools import dataobjects, vector
class PointsToPaths(GeoAlgorithm):
VECTOR = 'VECTOR'
GROUP_FIELD = 'GROUP_FIELD'
ORDER_FIELD = 'ORDER_FIELD'
DATE_FORMAT = 'DATE_FORMAT'
#GAP_PERIOD = 'GAP_PERIOD'
OUTPUT_LINES = 'OUTPUT_LINES'
OUTPUT_TEXT = 'OUTPUT_TEXT'
def defineCharacteristics(self):
self.name = 'Points to path'
self.group = 'Vector creation tools'
self.addParameter(ParameterVector(self.VECTOR,
self.tr('Input point layer'), [ParameterVector.VECTOR_TYPE_POINT]))
self.addParameter(ParameterTableField(self.GROUP_FIELD,
self.tr('Group field'), self.VECTOR))
self.addParameter(ParameterTableField(self.ORDER_FIELD,
self.tr('Order field'), self.VECTOR))
self.addParameter(ParameterString(self.DATE_FORMAT,
self.tr('Date format (if order field is DateTime)'), '', optional=True))
#self.addParameter(ParameterNumber(
# self.GAP_PERIOD,
# 'Gap period (if order field is DateTime)', 0, 60, 0))
self.addOutput(OutputVector(self.OUTPUT_LINES, self.tr('Paths')))
self.addOutput(OutputDirectory(self.OUTPUT_TEXT, self.tr('Directory')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.VECTOR))
groupField = self.getParameterValue(self.GROUP_FIELD)
orderField = self.getParameterValue(self.ORDER_FIELD)
dateFormat = unicode(self.getParameterValue(self.DATE_FORMAT))
#gap = int(self.getParameterValue(self.GAP_PERIOD))
dirName = self.getOutputValue(self.OUTPUT_TEXT)
fields = QgsFields()
fields.append(QgsField('group', QVariant.String, '', 254, 0))
fields.append(QgsField('begin', QVariant.String, '', 254, 0))
fields.append(QgsField('end', QVariant.String, '', 254, 0))
writer = self.getOutputFromName(self.OUTPUT_LINES).getVectorWriter(
fields, QGis.WKBLineString, layer.dataProvider().crs())
points = dict()
features = vector.features(layer)
total = 100.0 / len(features)
for count, f in enumerate(features):
point = f.geometry().asPoint()
group = f[groupField]
order = f[orderField]
if dateFormat != '':
order = datetime.strptime(unicode(order), dateFormat)
if group in points:
points[group].append((order, point))
else:
points[group] = [(order, point)]
progress.setPercentage(int(count * total))
progress.setPercentage(0)
da = QgsDistanceArea()
count = 0
total = 100.0 / len(points)
for group, vertices in points.iteritems():
vertices.sort()
f = QgsFeature()
f.initAttributes(len(fields))
f.setFields(fields)
f['group'] = group
f['begin'] = vertices[0][0]
f['end'] = vertices[-1][0]
fileName = os.path.join(dirName, '%s.txt' % group)
fl = open(fileName, 'w')
fl.write('angle=Azimuth\n')
fl.write('heading=Coordinate_System\n')
fl.write('dist_units=Default\n')
line = []
i = 0
for node in vertices:
line.append(node[1])
if i == 0:
fl.write('startAt=%f;%f;90\n' % (node[1].x(), node[1].y()))
fl.write('survey=Polygonal\n')
fl.write('[data]\n')
else:
angle = line[i-1].azimuth(line[i])
distance = da.measureLine(line[i-1], line[i])
fl.write('%f;%f;90\n' % (angle, distance))
i += 1
f.setGeometry(QgsGeometry.fromPolyline(line))
writer.addFeature(f)
count += 1
progress.setPercentage(int(count * total))
del writer
fl.close()
| gpl-2.0 | 5,652,513,696,830,232,000 | 37.616438 | 89 | 0.554984 | false |
jazzij/powertoken | background/database.py | 1 | 6870 | #imports for database engine
import os
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
from sqlalchemy.orm import scoped_session, sessionmaker
#imports for models
from datetime import datetime, timedelta
from sqlalchemy import MetaData, Column, ForeignKey, Integer, String, DateTime, Float, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import event
#from sqlalchemy.event import listens
#CONSTANTS
DB_REL_PATH = "sqlite:///" + "data/pt_data.db"
DB_PATH = "sqlite:///" + os.environ.get("DB_PATH")
TALLY="tally"
CHARGE="charge"
WEIGHT="weight"
PLAN="plan"
Base = declarative_base()
class Admin(Base):
"""
Represents a PowerToken administrator, capable of viewing the admin
dashboard and supervising user progress.
"""
__tablename__ = "admin"
id = Column(Integer, primary_key=True)
username = Column(String(32), nullable=False, index=True, unique=True)
email = Column(String(64), nullable=False, index=True, unique=True)
password_hash = Column(String(128))
def __repr__(self):
return "<Admin {}>".format(self.username)
def __init__(self, username):
self.username = username
class User(Base):
"""
Represents a PowerToken user who is in recovery.
"""
__tablename__ = "user"
__table_args__ = {'extend_existing':True}
id = Column(Integer, primary_key=True)
username = Column(String(32), nullable=False, index=True, unique=True)
registered_on = Column(DateTime, index=True, default=datetime.now())
metaphor = Column(String(16), default=TALLY)
wc_id = Column(Integer, unique=True)
wc_token = Column(String(128))
fb_token = Column(String(256))
#logs = relationship("Log", backref="user", lazy="dynamic")
activities = relationship("Activity", backref="user", lazy="dynamic")
errors = relationship("Error", backref="user", lazy="dynamic")
days = relationship("Day", backref="user", lazy="dynamic")
def thisday(self):
d = datetime.now()
today = datetime(d.year, d.month, d.day)
return self.days.filter(Day.date == today).first()
def yesterday(self):
d = datetime.now().date()
yester = d - timedelta(days=1)
ydt = datetime.combine(yester, datetime.min.time())
return self.days.filter(Day.date == ydt).first()
def __repr__(self):
return "<User {}>".format(self.username)
def __init__(self, username):
self.username = username
class Activity(Base):
"""
Represents a WEconnect activity.
"""
__tablename__ = "activity"
id = Column(Integer, primary_key=True)
wc_act_id = Column(Integer, index=True, unique=True)
name = Column(String(256))
expiration = Column(DateTime, index=True)
weight = Column(Integer, default=3)
user_id = Column(Integer, ForeignKey("user.wc_id"))
events = relationship("Event", backref="activity", lazy="dynamic")
def __repr__(self):
return "<Activity '{}'>".format(self.name)
class Error(Base):
"""
Represents an error that occurred somewhere in the application(s).
"""
__tablename__ = "error"
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime, default=datetime.now())
summary = Column(String(64))
origin = Column(String(256))
message = Column(String(256))
traceback = Column(String(1048))
user_id = Column(Integer, ForeignKey("user.id"))
def __repr__(self):
return "<Error '{}', '{}'>".format(self.summary, self.message)
class Day(Base):
"""
Represents a day of progress (which activities are completed, etc).
"""
__tablename__ = "day"
__table_args__ = {'extend_existing':True}
id = Column(Integer, primary_key=True)
date = Column(DateTime, index=True) # Time portion is ignored
computed_progress = Column(Float, default=0.0) #calculated
user_id = Column(Integer, ForeignKey("user.id"))
complete_count = Column(Integer, default = 0) #raw
events = relationship("Event", backref="day", lazy="dynamic")
def __repr__(self):
return "<Day {}>".format(self.date.strftime("%Y-%m-%d"))
class Event(Base):
"""
Represents a WEconnect event (an activity on a particular date).
"""
__tablename__ = "event"
id = Column(Integer, primary_key=True)
eid = Column(String, index=True)
start_time = Column(DateTime) # Date portion is ignored
end_time = Column(DateTime) # Date portion is ignored
completed = Column(Boolean)
day_id = Column(Integer, ForeignKey("day.id"))
activity_id = Column(Integer, ForeignKey("activity.wc_act_id"))
def __repr__(self):
output = "<Event '{}'>".format(self.eid)
return output
''' ---- EVENT LISTENERS ----'''
@event.listens_for(User, 'after_insert')
def db_update_handler(mapper, connection, target):
''' WHEN NEW USER IS ADDED, populate their events in the db (run poll & save)'''
print("(db)After insert triggered")
''' --------- ####
'''
# Set up the SQLAlchemy engine and connect it to the Sqlite database
#using poolclass=NullPool makes it so that the entire connection to the database is cut when the session is closed
#engine = create_engine(DB_REL_PATH, poolclass=NullPool)
def get_engine():
engine = create_engine(DB_PATH, poolclass=NullPool)
return engine
def get_metadata():
engine = get_engine()
Base.metadata.reflect(engine)
return Base.metadata
def get_session():
engine = get_engine()
DBSession = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
db_session = DBSession()
return db_session
def close_connection(session):
session.close()
#SETUP - RUN ONCE
def setup_db():
'''
For some reason this doesn't work, so use
/createDB route via flask instead
'''
mt = MetaData()
Base = declarative_base(metadata=mt)
engine = get_engine()
Base.metadata.create_all(engine)
#TEST
def printTables():
''' So you can see the database tables in pt_data.db
See models.py for tables written out in class format
'''
metadata = get_metadata()
User = metadata.tables['user']
Day = metadata.tables['day']
Activity = metadata.tables['activity']
Event = metadata.tables['event']
[print(c.name) for c in Day.columns]
[print(d.name) for d in User.columns]
[print(e.name) for e in Event.columns]
[print(f.name) for f in Activity.columns]
#CLEAR
def clear_db(username, session):
user = session.query(User).filter_by(username=username).first()
if user is None:
print("User {} does not exist".format(username))
return
all_days = session.query(Day).filter_by(user_id=user.id)#.all()
all_activities = session.query(Activity).filter_by(user_id=user.wc_id or 0)#.all()
all_events = session.query(Event).filter(Event.activity in all_activities)#.all()
all_errors = session.query(Error).filter_by(user_id=user.id)#.all()
all_errors.delete()
all_events.delete()
all_activities.delete()
all_days.delete()
session.delete(user)
try:
session.commit()
except:
print("Could not delete {}".format(username))
session.rollback()
if __name__ == "__main__":
print("Running new database as main")
printTables()
| mit | -7,145,989,785,779,802,000 | 28.110169 | 114 | 0.703202 | false |
Aeron/django-evolution | django_evolution/evolve.py | 6 | 2860 | import os
from django_evolution import EvolutionException, is_multi_db
from django_evolution.builtin_evolutions import BUILTIN_SEQUENCES
from django_evolution.models import Evolution
from django_evolution.mutations import SQLMutation
def get_evolution_sequence(app):
"Obtain the full evolution sequence for an application"
app_name = '.'.join(app.__name__.split('.')[:-1])
if app_name in BUILTIN_SEQUENCES:
return BUILTIN_SEQUENCES[app_name]
try:
evolution_module = __import__(app_name + '.evolutions',{},{},[''])
return evolution_module.SEQUENCE
except:
return []
def get_unapplied_evolutions(app, database):
"Obtain the list of unapplied evolutions for an application"
sequence = get_evolution_sequence(app)
app_label = app.__name__.split('.')[-2]
evolutions = Evolution.objects.filter(app_label=app_label)
if is_multi_db():
evolutions = evolutions.using(database)
applied = [evo.label for evo in evolutions]
return [seq for seq in sequence if seq not in applied]
def get_mutations(app, evolution_labels, database):
"""
Obtain the list of mutations described by the named evolutions.
"""
# For each item in the evolution sequence. Check each item to see if it is
# a python file or an sql file.
try:
app_name = '.'.join(app.__name__.split('.')[:-1])
if app_name in BUILTIN_SEQUENCES:
module_name = 'django_evolution.builtin_evolutions'
else:
module_name = '%s.evolutions' % app_name
evolution_module = __import__(module_name, {}, {}, [''])
except ImportError:
return []
mutations = []
for label in evolution_labels:
directory_name = os.path.dirname(evolution_module.__file__)
# The first element is used for compatibility purposes.
filenames = [
os.path.join(directory_name, label + '.sql'),
os.path.join(directory_name, "%s_%s.sql" % (database, label)),
]
found = False
for filename in filenames:
if os.path.exists(filename):
sql = []
sql_file = open(sql_file_name)
for line in sql_file:
sql.append(line)
mutations.append(SQLMutation(label, sql))
found = True
break
if not found:
try:
module_name = [evolution_module.__name__, label]
module = __import__('.'.join(module_name),
{}, {}, [module_name]);
mutations.extend(module.MUTATIONS)
except ImportError:
raise EvolutionException(
'Error: Failed to find an SQL or Python evolution named %s'
% label)
return mutations
| bsd-3-clause | 5,478,306,128,026,732,000 | 29.752688 | 79 | 0.586014 | false |
sahiljain/catapult | third_party/gsutil/third_party/protorpc/protorpc/remote.py | 19 | 39078 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Remote service library.
This module contains classes that are useful for building remote services that
conform to a standard request and response model. To conform to this model
a service must be like the following class:
# Each service instance only handles a single request and is then discarded.
# Make these objects light weight.
class Service(object):
# It must be possible to construct service objects without any parameters.
# If your constructor needs extra information you should provide a
# no-argument factory function to create service instances.
def __init__(self):
...
# Each remote method must use the 'method' decorator, passing the request
# and response message types. The remote method itself must take a single
# parameter which is an instance of RequestMessage and return an instance
# of ResponseMessage.
@method(RequestMessage, ResponseMessage)
def remote_method(self, request):
# Return an instance of ResponseMessage.
# A service object may optionally implement an 'initialize_request_state'
# method that takes as a parameter a single instance of a RequestState. If
# a service does not implement this method it will not receive the request
# state.
def initialize_request_state(self, state):
...
The 'Service' class is provided as a convenient base class that provides the
above functionality. It implements all required and optional methods for a
service. It also has convenience methods for creating factory functions that
can pass persistent global state to a new service instance.
The 'method' decorator is used to declare which methods of a class are
meant to service RPCs. While this decorator is not responsible for handling
actual remote method invocations, such as handling sockets, handling various
RPC protocols and checking messages for correctness, it does attach information
to methods that responsible classes can examine and ensure the correctness
of the RPC.
When the method decorator is used on a method, the wrapper method will have a
'remote' property associated with it. The 'remote' property contains the
request_type and response_type expected by the methods implementation.
On its own, the method decorator does not provide any support for subclassing
remote methods. In order to extend a service, one would need to redecorate
the sub-classes methods. For example:
class MyService(Service):
@method(DoSomethingRequest, DoSomethingResponse)
def do_stuff(self, request):
... implement do_stuff ...
class MyBetterService(MyService):
@method(DoSomethingRequest, DoSomethingResponse)
def do_stuff(self, request):
response = super(MyBetterService, self).do_stuff.remote.method(request)
... do stuff with response ...
return response
A Service subclass also has a Stub class that can be used with a transport for
making RPCs. When a stub is created, it is capable of doing both synchronous
and asynchronous RPCs if the underlying transport supports it. To make a stub
using an HTTP transport do:
my_service = MyService.Stub(HttpTransport('<my service URL>'))
For synchronous calls, just call the expected methods on the service stub:
request = DoSomethingRequest()
...
response = my_service.do_something(request)
Each stub instance has an async object that can be used for initiating
asynchronous RPCs if the underlying protocol transport supports it. To
make an asynchronous call, do:
rpc = my_service.async.do_something(request)
response = rpc.get_response()
"""
from __future__ import with_statement
import six
__author__ = '[email protected] (Rafe Kaplan)'
import logging
import sys
import threading
from wsgiref import headers as wsgi_headers
from . import message_types
from . import messages
from . import protobuf
from . import protojson
from . import util
__all__ = [
'ApplicationError',
'MethodNotFoundError',
'NetworkError',
'RequestError',
'RpcError',
'ServerError',
'ServiceConfigurationError',
'ServiceDefinitionError',
'HttpRequestState',
'ProtocolConfig',
'Protocols',
'RequestState',
'RpcState',
'RpcStatus',
'Service',
'StubBase',
'check_rpc_status',
'get_remote_method_info',
'is_error_status',
'method',
'remote',
]
class ServiceDefinitionError(messages.Error):
"""Raised when a service is improperly defined."""
class ServiceConfigurationError(messages.Error):
"""Raised when a service is incorrectly configured."""
# TODO: Use error_name to map to specific exception message types.
class RpcStatus(messages.Message):
"""Status of on-going or complete RPC.
Fields:
state: State of RPC.
error_name: Error name set by application. Only set when
status is APPLICATION_ERROR. For use by application to transmit
specific reason for error.
error_message: Error message associated with status.
"""
class State(messages.Enum):
"""Enumeration of possible RPC states.
Values:
OK: Completed successfully.
RUNNING: Still running, not complete.
REQUEST_ERROR: Request was malformed or incomplete.
SERVER_ERROR: Server experienced an unexpected error.
NETWORK_ERROR: An error occured on the network.
APPLICATION_ERROR: The application is indicating an error.
When in this state, RPC should also set application_error.
"""
OK = 0
RUNNING = 1
REQUEST_ERROR = 2
SERVER_ERROR = 3
NETWORK_ERROR = 4
APPLICATION_ERROR = 5
METHOD_NOT_FOUND_ERROR = 6
state = messages.EnumField(State, 1, required=True)
error_message = messages.StringField(2)
error_name = messages.StringField(3)
RpcState = RpcStatus.State
class RpcError(messages.Error):
"""Base class for RPC errors.
Each sub-class of RpcError is associated with an error value from RpcState
and has an attribute STATE that refers to that value.
"""
def __init__(self, message, cause=None):
super(RpcError, self).__init__(message)
self.cause = cause
@classmethod
def from_state(cls, state):
"""Get error class from RpcState.
Args:
state: RpcState value. Can be enum value itself, string or int.
Returns:
Exception class mapped to value if state is an error. Returns None
if state is OK or RUNNING.
"""
return _RPC_STATE_TO_ERROR.get(RpcState(state))
class RequestError(RpcError):
"""Raised when wrong request objects received during method invocation."""
STATE = RpcState.REQUEST_ERROR
class MethodNotFoundError(RequestError):
"""Raised when unknown method requested by RPC."""
STATE = RpcState.METHOD_NOT_FOUND_ERROR
class NetworkError(RpcError):
"""Raised when network error occurs during RPC."""
STATE = RpcState.NETWORK_ERROR
class ServerError(RpcError):
"""Unexpected error occured on server."""
STATE = RpcState.SERVER_ERROR
class ApplicationError(RpcError):
"""Raised for application specific errors.
Attributes:
error_name: Application specific error name for exception.
"""
STATE = RpcState.APPLICATION_ERROR
def __init__(self, message, error_name=None):
"""Constructor.
Args:
message: Application specific error message.
error_name: Application specific error name. Must be None, string
or unicode string.
"""
super(ApplicationError, self).__init__(message)
self.error_name = error_name
def __str__(self):
return self.args[0]
def __repr__(self):
if self.error_name is None:
error_format = ''
else:
error_format = ', %r' % self.error_name
return '%s(%r%s)' % (type(self).__name__, self.args[0], error_format)
_RPC_STATE_TO_ERROR = {
RpcState.REQUEST_ERROR: RequestError,
RpcState.NETWORK_ERROR: NetworkError,
RpcState.SERVER_ERROR: ServerError,
RpcState.APPLICATION_ERROR: ApplicationError,
RpcState.METHOD_NOT_FOUND_ERROR: MethodNotFoundError,
}
class _RemoteMethodInfo(object):
"""Object for encapsulating remote method information.
An instance of this method is associated with the 'remote' attribute
of the methods 'invoke_remote_method' instance.
Instances of this class are created by the remote decorator and should not
be created directly.
"""
def __init__(self,
method,
request_type,
response_type):
"""Constructor.
Args:
method: The method which implements the remote method. This is a
function that will act as an instance method of a class definition
that is decorated by '@method'. It must always take 'self' as its
first parameter.
request_type: Expected request type for the remote method.
response_type: Expected response type for the remote method.
"""
self.__method = method
self.__request_type = request_type
self.__response_type = response_type
@property
def method(self):
"""Original undecorated method."""
return self.__method
@property
def request_type(self):
"""Expected request type for remote method."""
if isinstance(self.__request_type, six.string_types):
self.__request_type = messages.find_definition(
self.__request_type,
relative_to=sys.modules[self.__method.__module__])
return self.__request_type
@property
def response_type(self):
"""Expected response type for remote method."""
if isinstance(self.__response_type, six.string_types):
self.__response_type = messages.find_definition(
self.__response_type,
relative_to=sys.modules[self.__method.__module__])
return self.__response_type
def method(request_type=message_types.VoidMessage,
response_type=message_types.VoidMessage):
"""Method decorator for creating remote methods.
Args:
request_type: Message type of expected request.
response_type: Message type of expected response.
Returns:
'remote_method_wrapper' function.
Raises:
TypeError: if the request_type or response_type parameters are not
proper subclasses of messages.Message.
"""
if (not isinstance(request_type, six.string_types) and
(not isinstance(request_type, type) or
not issubclass(request_type, messages.Message) or
request_type is messages.Message)):
raise TypeError(
'Must provide message class for request-type. Found %s',
request_type)
if (not isinstance(response_type, six.string_types) and
(not isinstance(response_type, type) or
not issubclass(response_type, messages.Message) or
response_type is messages.Message)):
raise TypeError(
'Must provide message class for response-type. Found %s',
response_type)
def remote_method_wrapper(method):
"""Decorator used to wrap method.
Args:
method: Original method being wrapped.
Returns:
'invoke_remote_method' function responsible for actual invocation.
This invocation function instance is assigned an attribute 'remote'
which contains information about the remote method:
request_type: Expected request type for remote method.
response_type: Response type returned from remote method.
Raises:
TypeError: If request_type or response_type is not a subclass of Message
or is the Message class itself.
"""
def invoke_remote_method(service_instance, request):
"""Function used to replace original method.
Invoke wrapped remote method. Checks to ensure that request and
response objects are the correct types.
Does not check whether messages are initialized.
Args:
service_instance: The service object whose method is being invoked.
This is passed to 'self' during the invocation of the original
method.
request: Request message.
Returns:
Results of calling wrapped remote method.
Raises:
RequestError: Request object is not of the correct type.
ServerError: Response object is not of the correct type.
"""
if not isinstance(request, remote_method_info.request_type):
raise RequestError('Method %s.%s expected request type %s, '
'received %s' %
(type(service_instance).__name__,
method.__name__,
remote_method_info.request_type,
type(request)))
response = method(service_instance, request)
if not isinstance(response, remote_method_info.response_type):
raise ServerError('Method %s.%s expected response type %s, '
'sent %s' %
(type(service_instance).__name__,
method.__name__,
remote_method_info.response_type,
type(response)))
return response
remote_method_info = _RemoteMethodInfo(method,
request_type,
response_type)
invoke_remote_method.remote = remote_method_info
invoke_remote_method.__name__ = method.__name__
return invoke_remote_method
return remote_method_wrapper
def remote(request_type, response_type):
"""Temporary backward compatibility alias for method."""
logging.warning('The remote decorator has been renamed method. It will be '
'removed in very soon from future versions of ProtoRPC.')
return method(request_type, response_type)
def get_remote_method_info(method):
"""Get remote method info object from remote method.
Returns:
Remote method info object if method is a remote method, else None.
"""
if not callable(method):
return None
try:
method_info = method.remote
except AttributeError:
return None
if not isinstance(method_info, _RemoteMethodInfo):
return None
return method_info
class StubBase(object):
"""Base class for client side service stubs.
The remote method stubs are created by the _ServiceClass meta-class
when a Service class is first created. The resulting stub will
extend both this class and the service class it handles communications for.
Assume that there is a service:
class NewContactRequest(messages.Message):
name = messages.StringField(1, required=True)
phone = messages.StringField(2)
email = messages.StringField(3)
class NewContactResponse(message.Message):
contact_id = messages.StringField(1)
class AccountService(remote.Service):
@remote.method(NewContactRequest, NewContactResponse):
def new_contact(self, request):
... implementation ...
A stub of this service can be called in two ways. The first is to pass in a
correctly initialized NewContactRequest message:
request = NewContactRequest()
request.name = 'Bob Somebody'
request.phone = '+1 415 555 1234'
response = account_service_stub.new_contact(request)
The second way is to pass in keyword parameters that correspond with the root
request message type:
account_service_stub.new_contact(name='Bob Somebody',
phone='+1 415 555 1234')
The second form will create a request message of the appropriate type.
"""
def __init__(self, transport):
"""Constructor.
Args:
transport: Underlying transport to communicate with remote service.
"""
self.__transport = transport
@property
def transport(self):
"""Transport used to communicate with remote service."""
return self.__transport
class _ServiceClass(type):
"""Meta-class for service class."""
def __new_async_method(cls, remote):
"""Create asynchronous method for Async handler.
Args:
remote: RemoteInfo to create method for.
"""
def async_method(self, *args, **kwargs):
"""Asynchronous remote method.
Args:
self: Instance of StubBase.Async subclass.
Stub methods either take a single positional argument when a full
request message is passed in, or keyword arguments, but not both.
See docstring for StubBase for more information on how to use remote
stub methods.
Returns:
Rpc instance used to represent asynchronous RPC.
"""
if args and kwargs:
raise TypeError('May not provide both args and kwargs')
if not args:
# Construct request object from arguments.
request = remote.request_type()
for name, value in six.iteritems(kwargs):
setattr(request, name, value)
else:
# First argument is request object.
request = args[0]
return self.transport.send_rpc(remote, request)
async_method.__name__ = remote.method.__name__
async_method = util.positional(2)(async_method)
async_method.remote = remote
return async_method
def __new_sync_method(cls, async_method):
"""Create synchronous method for stub.
Args:
async_method: asynchronous method to delegate calls to.
"""
def sync_method(self, *args, **kwargs):
"""Synchronous remote method.
Args:
self: Instance of StubBase.Async subclass.
args: Tuple (request,):
request: Request object.
kwargs: Field values for request. Must be empty if request object
is provided.
Returns:
Response message from synchronized RPC.
"""
return async_method(self.async, *args, **kwargs).response
sync_method.__name__ = async_method.__name__
sync_method.remote = async_method.remote
return sync_method
def __create_async_methods(cls, remote_methods):
"""Construct a dictionary of asynchronous methods based on remote methods.
Args:
remote_methods: Dictionary of methods with associated RemoteInfo objects.
Returns:
Dictionary of asynchronous methods with assocaited RemoteInfo objects.
Results added to AsyncStub subclass.
"""
async_methods = {}
for method_name, method in remote_methods.items():
async_methods[method_name] = cls.__new_async_method(method.remote)
return async_methods
def __create_sync_methods(cls, async_methods):
"""Construct a dictionary of synchronous methods based on remote methods.
Args:
async_methods: Dictionary of async methods to delegate calls to.
Returns:
Dictionary of synchronous methods with assocaited RemoteInfo objects.
Results added to Stub subclass.
"""
sync_methods = {}
for method_name, async_method in async_methods.items():
sync_methods[method_name] = cls.__new_sync_method(async_method)
return sync_methods
def __new__(cls, name, bases, dct):
"""Instantiate new service class instance."""
if StubBase not in bases:
# Collect existing remote methods.
base_methods = {}
for base in bases:
try:
remote_methods = base.__remote_methods
except AttributeError:
pass
else:
base_methods.update(remote_methods)
# Set this class private attribute so that base_methods do not have
# to be recacluated in __init__.
dct['_ServiceClass__base_methods'] = base_methods
for attribute, value in dct.items():
base_method = base_methods.get(attribute, None)
if base_method:
if not callable(value):
raise ServiceDefinitionError(
'Must override %s in %s with a method.' % (
attribute, name))
if get_remote_method_info(value):
raise ServiceDefinitionError(
'Do not use method decorator when overloading remote method %s '
'on service %s.' %
(attribute, name))
base_remote_method_info = get_remote_method_info(base_method)
remote_decorator = method(
base_remote_method_info.request_type,
base_remote_method_info.response_type)
new_remote_method = remote_decorator(value)
dct[attribute] = new_remote_method
return type.__new__(cls, name, bases, dct)
def __init__(cls, name, bases, dct):
"""Create uninitialized state on new class."""
type.__init__(cls, name, bases, dct)
# Only service implementation classes should have remote methods and stub
# sub classes created. Stub implementations have their own methods passed
# in to the type constructor.
if StubBase not in bases:
# Create list of remote methods.
cls.__remote_methods = dict(cls.__base_methods)
for attribute, value in dct.items():
value = getattr(cls, attribute)
remote_method_info = get_remote_method_info(value)
if remote_method_info:
cls.__remote_methods[attribute] = value
# Build asynchronous stub class.
stub_attributes = {'Service': cls}
async_methods = cls.__create_async_methods(cls.__remote_methods)
stub_attributes.update(async_methods)
async_class = type('AsyncStub', (StubBase, cls), stub_attributes)
cls.AsyncStub = async_class
# Constructor for synchronous stub class.
def __init__(self, transport):
"""Constructor.
Args:
transport: Underlying transport to communicate with remote service.
"""
super(cls.Stub, self).__init__(transport)
self.async = cls.AsyncStub(transport)
# Build synchronous stub class.
stub_attributes = {'Service': cls,
'__init__': __init__}
stub_attributes.update(cls.__create_sync_methods(async_methods))
cls.Stub = type('Stub', (StubBase, cls), stub_attributes)
@staticmethod
def all_remote_methods(cls):
"""Get all remote methods of service.
Returns:
Dict from method name to unbound method.
"""
return dict(cls.__remote_methods)
class RequestState(object):
"""Request state information.
Properties:
remote_host: Remote host name where request originated.
remote_address: IP address where request originated.
server_host: Host of server within which service resides.
server_port: Post which service has recevied request from.
"""
@util.positional(1)
def __init__(self,
remote_host=None,
remote_address=None,
server_host=None,
server_port=None):
"""Constructor.
Args:
remote_host: Assigned to property.
remote_address: Assigned to property.
server_host: Assigned to property.
server_port: Assigned to property.
"""
self.__remote_host = remote_host
self.__remote_address = remote_address
self.__server_host = server_host
self.__server_port = server_port
@property
def remote_host(self):
return self.__remote_host
@property
def remote_address(self):
return self.__remote_address
@property
def server_host(self):
return self.__server_host
@property
def server_port(self):
return self.__server_port
def _repr_items(self):
for name in ['remote_host',
'remote_address',
'server_host',
'server_port']:
yield name, getattr(self, name)
def __repr__(self):
"""String representation of state."""
state = [self.__class__.__name__]
for name, value in self._repr_items():
if value:
state.append('%s=%r' % (name, value))
return '<%s>' % (' '.join(state),)
class HttpRequestState(RequestState):
"""HTTP request state information.
NOTE: Does not attempt to represent certain types of information from the
request such as the query string as query strings are not permitted in
ProtoRPC URLs unless required by the underlying message format.
Properties:
headers: wsgiref.headers.Headers instance of HTTP request headers.
http_method: HTTP method as a string.
service_path: Path on HTTP service where service is mounted. This path
will not include the remote method name.
"""
@util.positional(1)
def __init__(self,
http_method=None,
service_path=None,
headers=None,
**kwargs):
"""Constructor.
Args:
Same as RequestState, including:
http_method: Assigned to property.
service_path: Assigned to property.
headers: HTTP request headers. If instance of Headers, assigned to
property without copying. If dict, will convert to name value pairs
for use with Headers constructor. Otherwise, passed as parameters to
Headers constructor.
"""
super(HttpRequestState, self).__init__(**kwargs)
self.__http_method = http_method
self.__service_path = service_path
# Initialize headers.
if isinstance(headers, dict):
header_list = []
for key, value in sorted(headers.items()):
if not isinstance(value, list):
value = [value]
for item in value:
header_list.append((key, item))
headers = header_list
self.__headers = wsgi_headers.Headers(headers or [])
@property
def http_method(self):
return self.__http_method
@property
def service_path(self):
return self.__service_path
@property
def headers(self):
return self.__headers
def _repr_items(self):
for item in super(HttpRequestState, self)._repr_items():
yield item
for name in ['http_method', 'service_path']:
yield name, getattr(self, name)
yield 'headers', list(self.headers.items())
class Service(six.with_metaclass(_ServiceClass, object)):
"""Service base class.
Base class used for defining remote services. Contains reflection functions,
useful helpers and built-in remote methods.
Services are expected to be constructed via either a constructor or factory
which takes no parameters. However, it might be required that some state or
configuration is passed in to a service across multiple requests.
To do this, define parameters to the constructor of the service and use
the 'new_factory' class method to build a constructor that will transmit
parameters to the constructor. For example:
class MyService(Service):
def __init__(self, configuration, state):
self.configuration = configuration
self.state = state
configuration = MyServiceConfiguration()
global_state = MyServiceState()
my_service_factory = MyService.new_factory(configuration,
state=global_state)
The contract with any service handler is that a new service object is created
to handle each user request, and that the construction does not take any
parameters. The factory satisfies this condition:
new_instance = my_service_factory()
assert new_instance.state is global_state
Attributes:
request_state: RequestState set via initialize_request_state.
"""
__request_state = None
@classmethod
def all_remote_methods(cls):
"""Get all remote methods for service class.
Built-in methods do not appear in the dictionary of remote methods.
Returns:
Dictionary mapping method name to remote method.
"""
return _ServiceClass.all_remote_methods(cls)
@classmethod
def new_factory(cls, *args, **kwargs):
"""Create factory for service.
Useful for passing configuration or state objects to the service. Accepts
arbitrary parameters and keywords, however, underlying service must accept
also accept not other parameters in its constructor.
Args:
args: Args to pass to service constructor.
kwargs: Keyword arguments to pass to service constructor.
Returns:
Factory function that will create a new instance and forward args and
keywords to the constructor.
"""
def service_factory():
return cls(*args, **kwargs)
# Update docstring so that it is easier to debug.
full_class_name = '%s.%s' % (cls.__module__, cls.__name__)
service_factory.__doc__ = (
'Creates new instances of service %s.\n\n'
'Returns:\n'
' New instance of %s.'
% (cls.__name__, full_class_name))
# Update name so that it is easier to debug the factory function.
service_factory.__name__ = '%s_service_factory' % cls.__name__
service_factory.service_class = cls
return service_factory
def initialize_request_state(self, request_state):
"""Save request state for use in remote method.
Args:
request_state: RequestState instance.
"""
self.__request_state = request_state
@classmethod
def definition_name(cls):
"""Get definition name for Service class.
Package name is determined by the global 'package' attribute in the
module that contains the Service definition. If no 'package' attribute
is available, uses module name. If no module is found, just uses class
name as name.
Returns:
Fully qualified service name.
"""
try:
return cls.__definition_name
except AttributeError:
outer_definition_name = cls.outer_definition_name()
if outer_definition_name is None:
cls.__definition_name = cls.__name__
else:
cls.__definition_name = '%s.%s' % (outer_definition_name, cls.__name__)
return cls.__definition_name
@classmethod
def outer_definition_name(cls):
"""Get outer definition name.
Returns:
Package for service. Services are never nested inside other definitions.
"""
return cls.definition_package()
@classmethod
def definition_package(cls):
"""Get package for service.
Returns:
Package name for service.
"""
try:
return cls.__definition_package
except AttributeError:
cls.__definition_package = util.get_package_for_module(cls.__module__)
return cls.__definition_package
@property
def request_state(self):
"""Request state associated with this Service instance."""
return self.__request_state
def is_error_status(status):
"""Function that determines whether the RPC status is an error.
Args:
status: Initialized RpcStatus message to check for errors.
"""
status.check_initialized()
return RpcError.from_state(status.state) is not None
def check_rpc_status(status):
"""Function converts an error status to a raised exception.
Args:
status: Initialized RpcStatus message to check for errors.
Raises:
RpcError according to state set on status, if it is an error state.
"""
status.check_initialized()
error_class = RpcError.from_state(status.state)
if error_class is not None:
if error_class is ApplicationError:
raise error_class(status.error_message, status.error_name)
else:
raise error_class(status.error_message)
class ProtocolConfig(object):
"""Configuration for single protocol mapping.
A read-only protocol configuration provides a given protocol implementation
with a name and a set of content-types that it recognizes.
Properties:
protocol: The protocol implementation for configuration (usually a module,
for example, protojson, protobuf, etc.). This is an object that has the
following attributes:
CONTENT_TYPE: Used as the default content-type if default_content_type
is not set.
ALTERNATIVE_CONTENT_TYPES (optional): A list of alternative
content-types to the default that indicate the same protocol.
encode_message: Function that matches the signature of
ProtocolConfig.encode_message. Used for encoding a ProtoRPC message.
decode_message: Function that matches the signature of
ProtocolConfig.decode_message. Used for decoding a ProtoRPC message.
name: Name of protocol configuration.
default_content_type: The default content type for the protocol. Overrides
CONTENT_TYPE defined on protocol.
alternative_content_types: A list of alternative content-types supported
by the protocol. Must not contain the default content-type, nor
duplicates. Overrides ALTERNATIVE_CONTENT_TYPE defined on protocol.
content_types: A list of all content-types supported by configuration.
Combination of default content-type and alternatives.
"""
def __init__(self,
protocol,
name,
default_content_type=None,
alternative_content_types=None):
"""Constructor.
Args:
protocol: The protocol implementation for configuration.
name: The name of the protocol configuration.
default_content_type: The default content-type for protocol. If none
provided it will check protocol.CONTENT_TYPE.
alternative_content_types: A list of content-types. If none provided,
it will check protocol.ALTERNATIVE_CONTENT_TYPES. If that attribute
does not exist, will be an empty tuple.
Raises:
ServiceConfigurationError if there are any duplicate content-types.
"""
self.__protocol = protocol
self.__name = name
self.__default_content_type = (default_content_type or
protocol.CONTENT_TYPE).lower()
if alternative_content_types is None:
alternative_content_types = getattr(protocol,
'ALTERNATIVE_CONTENT_TYPES',
())
self.__alternative_content_types = tuple(
content_type.lower() for content_type in alternative_content_types)
self.__content_types = (
(self.__default_content_type,) + self.__alternative_content_types)
# Detect duplicate content types in definition.
previous_type = None
for content_type in sorted(self.content_types):
if content_type == previous_type:
raise ServiceConfigurationError(
'Duplicate content-type %s' % content_type)
previous_type = content_type
@property
def protocol(self):
return self.__protocol
@property
def name(self):
return self.__name
@property
def default_content_type(self):
return self.__default_content_type
@property
def alternate_content_types(self):
return self.__alternative_content_types
@property
def content_types(self):
return self.__content_types
def encode_message(self, message):
"""Encode message.
Args:
message: Message instance to encode.
Returns:
String encoding of Message instance encoded in protocol's format.
"""
return self.__protocol.encode_message(message)
def decode_message(self, message_type, encoded_message):
"""Decode buffer to Message instance.
Args:
message_type: Message type to decode data to.
encoded_message: Encoded version of message as string.
Returns:
Decoded instance of message_type.
"""
return self.__protocol.decode_message(message_type, encoded_message)
class Protocols(object):
"""Collection of protocol configurations.
Used to describe a complete set of content-type mappings for multiple
protocol configurations.
Properties:
names: Sorted list of the names of registered protocols.
content_types: Sorted list of supported content-types.
"""
__default_protocols = None
__lock = threading.Lock()
def __init__(self):
"""Constructor."""
self.__by_name = {}
self.__by_content_type = {}
def add_protocol_config(self, config):
"""Add a protocol configuration to protocol mapping.
Args:
config: A ProtocolConfig.
Raises:
ServiceConfigurationError if protocol.name is already registered
or any of it's content-types are already registered.
"""
if config.name in self.__by_name:
raise ServiceConfigurationError(
'Protocol name %r is already in use' % config.name)
for content_type in config.content_types:
if content_type in self.__by_content_type:
raise ServiceConfigurationError(
'Content type %r is already in use' % content_type)
self.__by_name[config.name] = config
self.__by_content_type.update((t, config) for t in config.content_types)
def add_protocol(self, *args, **kwargs):
"""Add a protocol configuration from basic parameters.
Simple helper method that creates and registeres a ProtocolConfig instance.
"""
self.add_protocol_config(ProtocolConfig(*args, **kwargs))
@property
def names(self):
return tuple(sorted(self.__by_name))
@property
def content_types(self):
return tuple(sorted(self.__by_content_type))
def lookup_by_name(self, name):
"""Look up a ProtocolConfig by name.
Args:
name: Name of protocol to look for.
Returns:
ProtocolConfig associated with name.
Raises:
KeyError if there is no protocol for name.
"""
return self.__by_name[name.lower()]
def lookup_by_content_type(self, content_type):
"""Look up a ProtocolConfig by content-type.
Args:
content_type: Content-type to find protocol configuration for.
Returns:
ProtocolConfig associated with content-type.
Raises:
KeyError if there is no protocol for content-type.
"""
return self.__by_content_type[content_type.lower()]
@classmethod
def new_default(cls):
"""Create default protocols configuration.
Returns:
New Protocols instance configured for protobuf and protorpc.
"""
protocols = cls()
protocols.add_protocol(protobuf, 'protobuf')
protocols.add_protocol(protojson.ProtoJson.get_default(), 'protojson')
return protocols
@classmethod
def get_default(cls):
"""Get the global default Protocols instance.
Returns:
Current global default Protocols instance.
"""
default_protocols = cls.__default_protocols
if default_protocols is None:
with cls.__lock:
default_protocols = cls.__default_protocols
if default_protocols is None:
default_protocols = cls.new_default()
cls.__default_protocols = default_protocols
return default_protocols
@classmethod
def set_default(cls, protocols):
"""Set the global default Protocols instance.
Args:
protocols: A Protocols instance.
Raises:
TypeError: If protocols is not an instance of Protocols.
"""
if not isinstance(protocols, Protocols):
raise TypeError(
'Expected value of type "Protocols", found %r' % protocols)
with cls.__lock:
cls.__default_protocols = protocols
| bsd-3-clause | 7,059,193,963,409,829,000 | 30.33761 | 79 | 0.6746 | false |
diorcety/intellij-community | python/lib/Lib/site-packages/django/conf/locale/id/formats.py | 78 | 1855 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G:i:s"
TIME_FORMAT = 'G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G:i:s'
FIRST_DAY_OF_WEEK = 1 #Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%y', '%d/%m/%y', # '25-10-09' , 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009' , 25/10/2009'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d-%m-%Y %H:%M:%S', # '25-10-2009 14:30:59'
'%d-%m-%Y %H:%M', # '25-10-2009 14:30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H:%M:%S', # '25-10-09' 14:30:59'
'%d-%m-%y %H:%M', # '25-10-09' 14:30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H:%M:%S', # '25/10/2009 14:30:59'
'%m/%d/%Y %H:%M', # '25/10/2009 14:30'
'%m/%d/%Y', # '10/25/2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| apache-2.0 | -982,166,138,323,838,000 | 38.468085 | 79 | 0.442588 | false |
reshadh/Keepnote-LaTeX | dist/keepnote-0.7.5.win/extensions/latex_prompt/editor_richtext.py | 3 | 57757 | """
KeepNote
Editor widget in main window
"""
#
# KeepNote
# Copyright (c) 2008-2011 Matt Rasmussen
# Author: Matt Rasmussen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import gettext
import sys, os, re
# pygtk imports
import pygtk
pygtk.require('2.0')
from gtk import gdk
import gtk.glade
import gobject
# keepnote imports
import keepnote
from keepnote import \
KeepNoteError, is_url, unicode_gtk
from keepnote.notebook import \
NoteBookError, \
get_node_url, \
parse_node_url, \
is_node_url
from keepnote import notebook as notebooklib
from keepnote.gui import richtext
from keepnote.gui.richtext import \
RichTextView, RichTextBuffer, \
RichTextIO, RichTextError, RichTextImage
from keepnote.gui.richtext.richtext_tags import \
RichTextTagTable, RichTextLinkTag
from keepnote.gui.icons import \
get_node_icon, lookup_icon_filename
from keepnote.gui.font_selector import FontSelector
from keepnote.gui.colortool import FgColorTool, BgColorTool
from keepnote.gui.richtext.richtext_tags import color_tuple_to_string
from keepnote.gui.popupwindow import PopupWindow
from keepnote.gui.linkcomplete import LinkPickerPopup
from keepnote.gui.link_editor import LinkEditor
from keepnote.gui.editor import KeepNoteEditor
from keepnote.gui import extension
from keepnote.gui import dialog_app_options
import dialog_latex
from keepnote.gui import \
CONTEXT_MENU_ACCEL_PATH, \
DEFAULT_FONT, \
FileChooserDialog, \
get_pixbuf, \
get_resource, \
get_resource_image, \
get_resource_pixbuf, \
Action, \
ToggleAction, \
add_actions, \
update_file_preview, \
dialog_find, \
dialog_image_resize
_ = keepnote.translate
def is_relative_file(filename):
"""Returns True if filename is relative"""
return (not re.match("[^:/]+://", filename) and
not os.path.isabs(filename))
def is_local_file(filename):
return filename and ("/" not in filename) and ("\\" not in filename)
class NodeIO (RichTextIO):
"""Read/Writes the contents of a RichTextBuffer to disk"""
def __init__(self):
RichTextIO.__init__(self)
self._node = None
self._image_files = set()
self._saved_image_files = set()
def set_node(self, node):
self._node = node
def save(self, textbuffer, filename, title=None, stream=None):
"""Save buffer contents to file"""
RichTextIO.save(self, textbuffer, filename, title, stream=stream)
def load(self, textview, textbuffer, filename, stream=None):
RichTextIO.load(self, textview, textbuffer, filename, stream=stream)
def _load_images(self, textbuffer, html_filename):
"""Load images present in textbuffer"""
self._image_files.clear()
RichTextIO._load_images(self, textbuffer, html_filename)
def _save_images(self, textbuffer, html_filename):
"""Save images present in text buffer"""
# reset saved image set
self._saved_image_files.clear()
#print "save_images"
# don't allow the html file to be deleted
if html_filename:
self._saved_image_files.add(os.path.basename(html_filename))
RichTextIO._save_images(self, textbuffer, html_filename)
#print "done"
# delete images not part of the saved set
self._delete_images(html_filename,
self._image_files - self._saved_image_files)
self._image_files = set(self._saved_image_files)
def _delete_images(self, html_filename, image_files):
for image_file in image_files:
# only delete an image file if it is local
if is_local_file(image_file):
try:
self._node.delete_file(image_file)
except:
keepnote.log_error()
pass
def _load_image(self, textbuffer, image, html_filename):
# TODO: generalize url recognition
filename = image.get_filename()
if filename.startswith("http:/") or filename.startswith("file:/"):
image.set_from_url(filename)
elif is_relative_file(filename):
try:
infile = self._node.open_file(filename, mode="r") # rb
image.set_from_stream(infile)
infile.close()
except:
image.set_no_image()
else:
image.set_from_file(filename)
# record loaded images
self._image_files.add(image.get_filename())
def _save_image(self, textbuffer, image, html_filename):
if image.save_needed():
I=image._pixbuf_original
tag=I.get_option("tEXt::tag")
# The following checks if a tag is saved in the image which corresponds to
if tag==None:
# The following function writes the image to the file stream out
out = self._node.open_file(image.get_filename(), mode="w") # wb
image.write_stream(out, image.get_filename())
out.close()
else:
# using save function of pixbuf to save the image
path=self._node.get_path()
image_filename=image.get_filename()
image_path= os.path.join(path, image_filename)
self._save_latex(I,image_path)
# mark image as saved
self._saved_image_files.add(image.get_filename())
def _save_latex(self, pixbuffer, image_path):
tag=pixbuffer.get_option("tEXt::tag")
caption=pixbuffer.get_option("tEXt::caption")
tex=pixbuffer.get_option("tEXt::tex")
label=pixbuffer.get_option("tEXt::label")
if label is None:
label=""
if tag is None:
tag="0"
if tex is None:
tex=""
if caption is None:
caption=""
pixbuffer.save(image_path, "png", {"tEXt::tag":tag,"tEXt::label":label,\
"tEXt::caption":caption,"tEXt::tex":tex})
class RichTextEditor (KeepNoteEditor):
def __init__(self, app):
KeepNoteEditor.__init__(self, app)
self._app = app
self._notebook = None
self._link_picker = None
self._maxlinks = 10 # maximum number of links to show in link picker
# state
self._page = None # current NoteBookPage
self._page_scrolls = {} # remember scroll in each page
self._page_cursors = {}
self._textview_io = NodeIO()
# editor
self.connect("make-link", self._on_make_link)
# textview and its callbacks
self._textview = RichTextView(RichTextBuffer(
self._app.get_richtext_tag_table())) # textview
self._textview.disable()
self._textview.connect("font-change", self._on_font_callback)
self._textview.connect("modified", self._on_modified_callback)
self._textview.connect("child-activated", self._on_child_activated)
self._textview.connect("visit-url", self._on_visit_url)
self._textview.get_buffer().connect("ending-user-action",
self._on_text_changed)
self._textview.connect("key-press-event", self._on_key_press_event)
# scrollbars
self._sw = gtk.ScrolledWindow()
self._sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self._sw.set_shadow_type(gtk.SHADOW_IN)
self._sw.add(self._textview)
self.pack_start(self._sw)
# link editor
self._link_editor = LinkEditor()
self._link_editor.set_textview(self._textview)
self._link_editor.set_search_nodes(self._search_nodes)
self.connect("font-change", self._link_editor.on_font_change)
self.pack_start(self._link_editor, False, True, 0)
self.make_image_menu(self._textview.get_image_menu())
# menus
self.editor_menus = EditorMenus(self._app, self)
self.connect("font-change", self.editor_menus.on_font_change)
# find dialog
self.find_dialog = dialog_find.KeepNoteFindDialog(self)
self.show_all()
def set_notebook(self, notebook):
"""Set notebook for editor"""
# set new notebook
self._notebook = notebook
if self._notebook:
self.load_notebook_preferences()
else:
# no new notebook, clear the view
self.clear_view()
def load_preferences(self, app_pref, first_open=False):
"""Load application preferences"""
self.editor_menus.enable_spell_check(
app_pref.get("editors", "general", "spell_check",
default=True))
self.load_notebook_preferences()
def save_preferences(self, app_pref):
"""Save application preferences"""
# record state in preferences
app_pref.set("editors", "general", "spell_check",
self._textview.is_spell_check_enabled())
def load_notebook_preferences(self):
"""Load notebook-specific preferences"""
if self._notebook:
# read default font
self._textview.set_default_font(
self._notebook.pref.get("default_font",
default=DEFAULT_FONT))
def is_focus(self):
"""Return True if text editor has focus"""
return self._textview.is_focus()
def grab_focus(self):
"""Pass focus to textview"""
self._textview.grab_focus()
def clear_view(self):
"""Clear editor view"""
self._page = None
self._textview.disable()
def undo(self):
"""Undo the last action in the viewer"""
self._textview.undo()
def redo(self):
"""Redo the last action in the viewer"""
self._textview.redo()
def view_pages(self, pages):
"""View a page in the editor"""
# editor cannot view multiple pages at once
# if asked to, it will view none
if len(pages) > 1:
pages = []
# save current page before changing pages
self.save()
self._save_cursor()
pages = [node for node in pages
if node.get_attr("content_type") ==
notebooklib.CONTENT_TYPE_PAGE]
if len(pages) == 0:
self.clear_view()
else:
page = pages[0]
self._page = page
self._textview.enable()
try:
self._textview_io.set_node(self._page)
self._textview_io.load(
self._textview,
self._textview.get_buffer(),
self._page.get_page_file(),
stream=self._page.open_file(
self._page.get_page_file(), "r", "utf-8"))
self._load_cursor()
except RichTextError, e:
self.clear_view()
self.emit("error", e.msg, e)
except Exception, e:
self.clear_view()
self.emit("error", "Unknown error", e)
if len(pages) > 0:
self.emit("view-node", pages[0])
def _save_cursor(self):
if self._page is not None:
it = self._textview.get_buffer().get_insert_iter()
self._page_cursors[self._page] = it.get_offset()
x, y = self._textview.window_to_buffer_coords(
gtk.TEXT_WINDOW_TEXT, 0, 0)
it = self._textview.get_iter_at_location(x, y)
self._page_scrolls[self._page] = it.get_offset()
def _load_cursor(self):
# place cursor in last location
if self._page in self._page_cursors:
offset = self._page_cursors[self._page]
it = self._textview.get_buffer().get_iter_at_offset(offset)
self._textview.get_buffer().place_cursor(it)
# place scroll in last position
if self._page in self._page_scrolls:
offset = self._page_scrolls[self._page]
buf = self._textview.get_buffer()
it = buf.get_iter_at_offset(offset)
mark = buf.create_mark(None, it, True)
self._textview.scroll_to_mark(mark,
0.49, use_align=True, xalign=0.0)
buf.delete_mark(mark)
def save(self):
"""Save the loaded page"""
if self._page is not None and \
self._page.is_valid() and \
self._textview.is_modified():
try:
# save text data
self._textview_io.save(
self._textview.get_buffer(),
self._page.get_page_file(),
self._page.get_title(),
stream=self._page.open_file(
self._page.get_page_file(), "w", "utf-8"))
# save meta data
self._page.set_attr_timestamp("modified_time")
self._page.save()
except RichTextError, e:
self.emit("error", e.msg, e)
except NoteBookError, e:
self.emit("error", e.msg, e)
def save_needed(self):
"""Returns True if textview is modified"""
return self._textview.is_modified()
def add_ui(self, window):
self._textview.set_accel_group(window.get_accel_group())
self._textview.set_accel_path(CONTEXT_MENU_ACCEL_PATH)
self._textview.get_image_menu().set_accel_group(window.get_accel_group())
self.editor_menus.add_ui(window)
def remove_ui(self, window):
self.editor_menus.remove_ui(window)
#===========================================
# callbacks for textview
def _on_font_callback(self, textview, font):
"""Callback for textview font changed"""
self.emit("font-change", font)
self._check_link(False)
def _on_modified_callback(self, textview, modified):
"""Callback for textview modification"""
self.emit("modified", self._page, modified)
# make notebook node modified
if modified:
self._page.mark_modified()
self._page.notify_change(False)
def _on_child_activated(self, textview, child):
"""Callback for activation of textview child widget"""
self.emit("child-activated", textview, child)
def _on_text_changed(self, textview):
"""Callback for textview text change"""
self._check_link()
def _on_key_press_event(self, textview, event):
"""Callback for keypress in textview"""
# decide if keypress should be forwarded to link picker
if (self._link_picker and self._link_picker.shown() and
(event.keyval == gtk.keysyms.Down or
event.keyval == gtk.keysyms.Up or
event.keyval == gtk.keysyms.Return or
event.keyval == gtk.keysyms.Escape)):
return self._link_picker.on_key_press_event(textview, event)
def _on_visit_url(self, textview, url):
"""Callback for textview visiting a URL"""
if is_node_url(url):
host, nodeid = parse_node_url(url)
node = self._notebook.get_node_by_id(nodeid)
if node:
self.emit("visit-node", node)
else:
try:
self._app.open_webpage(url)
except KeepNoteError, e:
self.emit("error", e.msg, e)
def _on_make_link(self, editor):
"""Callback from editor to make a link"""
self._link_editor.edit()
#=====================================
# callback for link editor
def _search_nodes(self, text):
"""Return nodes with titles containing 'text'"""
# TODO: make proper interface
nodes = [(nodeid, title)
for nodeid, title in self._notebook.search_node_titles(text)]
return nodes
#======================================
# link auto-complete
def _check_link(self, popup=True):
"""Check whether complete should be shown for link under cursor"""
# get link
tag, start, end = self._textview.get_link()
if tag is not None and popup:
# perform node search
text = start.get_text(end)
results = []
# TODO: clean up icon handling.
for nodeid, title in self._notebook.search_node_titles(text)[:self._maxlinks]:
icon = self._notebook.get_attr_by_id(nodeid, "icon")
if icon is None:
icon = "note.png"
icon = lookup_icon_filename(self._notebook, icon)
if icon is None:
icon = lookup_icon_filename(self._notebook, "note.png")
pb = keepnote.gui.get_pixbuf(icon)
#if node is not None:
results.append((get_node_url(nodeid), title, pb))
# offer url match
if is_url(text):
results = [(text, text,
get_resource_pixbuf(u"node_icons",
u"web.png"))] + results
# ensure link picker is initialized
if self._link_picker is None:
self._link_picker = LinkPickerPopup(self._textview)
self._link_picker.connect("pick-link", self._on_pick_link)
# set results
self._link_picker.set_links(results)
# move picker to correct location
if len(results) > 0:
rect = self._textview.get_iter_location(start)
x, y = self._textview.buffer_to_window_coords(
gtk.TEXT_WINDOW_WIDGET, rect.x, rect.y)
rect = self._textview.get_iter_location(end)
_, y = self._textview.buffer_to_window_coords(
gtk.TEXT_WINDOW_WIDGET, rect.x, rect.y)
self._link_picker.move_on_parent(x, y + rect.height, y)
elif self._link_picker:
self._link_picker.set_links([])
def _on_pick_link(self, widget, title, url):
"""Callback for when link autocomplete has choosen a link"""
# get current link
tag, start, end = self._textview.get_link()
# make new link tag
tagname = RichTextLinkTag.tag_name(url)
tag = self._textview.get_buffer().tag_table.lookup(tagname)
# remember the start iter
offset = start.get_offset()
self._textview.get_buffer().delete(start, end)
# replace link text with node title
it = self._textview.get_buffer().get_iter_at_offset(offset)
self._textview.get_buffer().place_cursor(it)
self._textview.get_buffer().insert_at_cursor(title)
# get new start and end iters
end = self._textview.get_buffer().get_insert_iter()
start = self._textview.get_buffer().get_iter_at_offset(offset)
# set link tag
self._textview.set_link(url, start, end)
# exit link mode
self._textview.get_buffer().font_handler.clear_current_tag_class(tag)
#==================================================
# Image/screenshot actions
def on_screenshot(self):
"""Take and insert a screen shot image"""
# do nothing if no page is selected
if self._page is None:
return
imgfile = ""
# Minimize window
self.emit("window-request", "minimize")
try:
imgfile = self._app.take_screenshot("keepnote")
self.emit("window-request", "restore")
# insert image
self.insert_image(imgfile, "screenshot.png")
except Exception, e:
# catch exceptions for screenshot program
self.emit("window-request", "restore")
self.emit("error",
_("The screenshot program encountered an error:\n %s")
% str(e), e)
# remove temp file
try:
if os.path.exists(imgfile):
os.remove(imgfile)
except OSError, e:
self.emit("error",
_("%s was unable to remove temp file for screenshot") %
keepnote.PROGRAM_NAME)
def on_insert_hr(self):
"""Insert horizontal rule into editor"""
if self._page is None:
return
self._textview.insert_hr()
def on_insert_image(self):
"""Displays the Insert Image Dialog"""
if self._page is None:
return
dialog = FileChooserDialog(
_("Insert Image From File"), self.get_toplevel(),
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(_("Cancel"), gtk.RESPONSE_CANCEL,
_("Insert"), gtk.RESPONSE_OK),
app=self._app,
persistent_path="insert_image_path")
# add image filters
filter = gtk.FileFilter()
filter.set_name("Images")
filter.add_mime_type("image/png")
filter.add_mime_type("image/jpeg")
filter.add_mime_type("image/gif")
filter.add_pattern("*.png")
filter.add_pattern("*.jpg")
filter.add_pattern("*.gif")
filter.add_pattern("*.tif")
filter.add_pattern("*.xpm")
dialog.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
dialog.add_filter(filter)
# setup preview
preview = gtk.Image()
dialog.set_preview_widget(preview)
dialog.connect("update-preview", update_file_preview, preview)
# run dialog
response = dialog.run()
if response == gtk.RESPONSE_OK:
filename = unicode_gtk(dialog.get_filename())
dialog.destroy()
if filename is None:
return
# TODO: do I need this?
imgname, ext = os.path.splitext(os.path.basename(filename))
if ext.lower() in (u".jpg", u".jpeg"):
ext = u".jpg"
else:
ext = u".png"
imgname2 = self._page.new_filename(imgname, ext=ext)
try:
self.insert_image(filename, imgname2)
except Exception, e:
# TODO: make exception more specific
self.emit("error",
_("Could not insert image '%s'") % filename, e)
else:
dialog.destroy()
def insert_image(self, filename, savename=u"image.png"):
"""Inserts an image into the text editor"""
if self._page is None:
return
print filename
print savename
img = RichTextImage()
img.set_from_pixbuf(gdk.pixbuf_new_from_file(filename))
self._textview.insert_image(img, savename)
img.show()
print img.get_filename()
#=================================================
# Image context menu
def view_image(self, image_filename):
current_page = self._page
if current_page is None:
return
image_path = os.path.join(current_page.get_path(), image_filename)
self._app.run_external_app("image_viewer", image_path)
def _on_view_image(self, menuitem):
"""View image in Image Viewer"""
# get image filename
image_filename = menuitem.get_parent().get_child().get_filename()
self.view_image(image_filename)
def _on_edit_image(self, menuitem):
"""Edit image in Image Editor"""
current_page = self._page
if current_page is None:
return
# get image filename
image_filename = menuitem.get_parent().get_child().get_filename()
image_path = os.path.join(current_page.get_path(), image_filename)
self._app.run_external_app("image_editor", image_path)
def _on_resize_image(self, menuitem):
"""Resize image"""
current_page = self._page
if current_page is None:
return
image = menuitem.get_parent().get_child()
image_resize_dialog = \
dialog_image_resize.ImageResizeDialog(self.get_toplevel(),
self._app.pref)
image_resize_dialog.on_resize(image)
def _on_new_image(self):
"""New image"""
current_page = self._page
if current_page is None:
return
dialog = dialog_image_new.NewImageDialog(self, self._app)
dialog.show()
def _on_save_image_as(self, menuitem):
"""Save image as a new file"""
current_page = self._page
if current_page is None:
return
# get image filename
image = menuitem.get_parent().get_child()
image_filename = image.get_filename()
image_path = os.path.join(current_page.get_path(), image_filename)
dialog = FileChooserDialog(
_("Save Image As..."), self.get_toplevel(),
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(_("Cancel"), gtk.RESPONSE_CANCEL,
_("Save"), gtk.RESPONSE_OK),
app=self._app,
persistent_path="save_image_path")
dialog.set_default_response(gtk.RESPONSE_OK)
response = dialog.run()
if response == gtk.RESPONSE_OK:
if not dialog.get_filename():
self.emit("error", _("Must specify a filename for the image."),
None, None)
else:
filename = unicode_gtk(dialog.get_filename())
try:
image.write(filename)
except Exception, e:
self.error(_("Could not save image '%s'.") % filename)
dialog.destroy()
def get_type(self, image_path):
# 0 is for image
# 3 is for inline equation
# 4 is for equation
# 5 is for tables
# 6 is for citation
# 7 is for LaTeX object
# 8 is for footnote
I=gdk.pixbuf_new_from_file(image_path)
tag=I.get_option("tEXt::tag")
return (tag)
def _on_edit_caption(self, menuitem):
"""Edit image in Image Editor"""
current_page = self._page
if current_page is None:
return
# get image filename
image_filename = menuitem.get_parent().get_child().get_filename()
image_path = os.path.join(current_page.get_path(), image_filename)
# Check for some objects there is no caption
LaTeX = self.get_type(image_path)
if LaTeX == "3":
self.emit("error","There is no Caption possible for inline equations",None)
return
if LaTeX == "4":
self.emit("error","There is no Caption possible for equations",None)
return
elif LaTeX == "8":
self.emit("error","There is no Caption possible for Footnotes",None)
return
elif LaTeX == "7":
self.emit("error","There is no Caption possible for General LaTeX code",None)
return
elif LaTeX == "6":
self.emit("error","There is no Caption possible for Citation",None)
return
tag="1"
dialog = dialog_latex.LaTeXDialog(self,tag,image_path)
dialog.show()
def _on_edit_latex(self, menuitem):
"""Edit LaTeX in Python Prompt"""
current_page = self._page
if current_page is None:
return
# get image filename
image_filename = menuitem.get_parent().get_child().get_filename()
image_path = os.path.join(current_page.get_path(), image_filename)
# Getting the correct tag based on the text file which is saved
tag = self.get_type(image_path)
if tag in ("0" ,None):
self.emit("error","There is no LateX Code for Figures",None)
return
else:
dialog = dialog_latex.LaTeXDialog(self,tag,image_path)
dialog.show()
def _on_edit_label(self, menuitem):
"""Edit lable in Python Prompt"""
current_page = self._page
if current_page is None:
return
# get image filename
image_filename = menuitem.get_parent().get_child().get_filename()
image_path = os.path.join(current_page.get_path(), image_filename)
LaTeX = self.get_type(image_path)
if LaTeX == "3":
self.emit("error","There is no Label possible for inline equations",None)
return
if LaTeX == "8":
self.emit("error","There is no Label possible for footnotes",None)
return
elif LaTeX == "7":
self.emit("error","There is no Caption possible for General LaTeX code",None)
return
elif LaTeX == "6":
self.emit("error","There is no Caption possible for Citation",None)
return
# Second tag is for editting the label (it is exactly like editing caption
tag="2"
dialog = dialog_latex.LaTeXDialog(self,tag,image_path)
dialog.show()
def make_image_menu(self, menu):
"""image context menu"""
# TODO: convert into UIManager?
# TODO: move to EditorMenus?
# TODO: add accelerators back
menu.set_accel_path(CONTEXT_MENU_ACCEL_PATH)
item = gtk.SeparatorMenuItem()
item.show()
menu.append(item)
# image/edit
item = gtk.MenuItem(_("_View Image..."))
item.connect("activate", self._on_view_image)
item.child.set_markup_with_mnemonic(_("<b>_View Image...</b>"))
item.show()
menu.append(item)
item = gtk.MenuItem(_("_Edit Image..."))
item.connect("activate", self._on_edit_image)
item.show()
menu.append(item)
item = gtk.MenuItem(_("_Resize Image..."))
item.connect("activate", self._on_resize_image)
item.show()
menu.append(item)
# image/save
item = gtk.ImageMenuItem(_("_Save Image As..."))
item.connect("activate", self._on_save_image_as)
item.show()
menu.append(item)
# image/edit LaTeX Code
item = gtk.ImageMenuItem(_("_Edit LaTeX Text..."))
item.connect("activate", self._on_edit_latex)
item.show()
menu.append(item)
# image/edit LaTeX Code
item = gtk.ImageMenuItem(_("_Edit Label..."))
item.connect("activate", self._on_edit_label)
item.show()
menu.append(item)
# image/edit caption
item = gtk.ImageMenuItem(_("_Edit Caption..."))
item.connect("activate", self._on_edit_caption)
item.show()
menu.append(item)
class FontUI (object):
def __init__(self, widget, signal, update_func=lambda ui, font: None,
block=None, unblock=None):
self.widget = widget
self.signal = signal
self.update_func = update_func
if block is None:
self.block = lambda: self.widget.handler_block(self.signal)
else:
self.block = block
if unblock is None:
self.unblock = lambda: self.widget.handler_unblock(self.signal)
else:
self.unblock = unblock
class EditorMenus (gobject.GObject):
def __init__(self, app, editor):
gobject.GObject.__init__(self)
self._editor = editor
self._app = app
self._action_group = None
self._uis = []
self._font_ui_signals = [] # list of font ui widgets
self.spell_check_toggle = None
self._removed_widgets = []
#=============================================================
# Update UI (menubar) from font under cursor
def on_font_change(self, editor, font):
"""Update the toolbar reflect the font under the cursor"""
# block toolbar handlers
for ui in self._font_ui_signals:
ui.block()
# call update callback
for ui in self._font_ui_signals:
ui.update_func(ui, font)
# unblock toolbar handlers
for ui in self._font_ui_signals:
ui.unblock()
#==================================================
# changing font handlers
def _on_mod(self, mod):
"""Toggle a font modification"""
self._editor.get_textview().toggle_font_mod(mod)
def _on_toggle_link(self):
"""Link mode has been toggled"""
textview = self._editor.get_textview()
textview.toggle_link()
tag, start, end = textview.get_link()
if tag is not None:
url = start.get_text(end)
if tag.get_href() == "" and is_url(url):
# set default url to link text
textview.set_link(url, start, end)
self._editor.emit("make-link")
def _on_justify(self, justify):
"""Set font justification"""
self._editor.get_textview().set_justify(justify)
#font = self._editor.get_textview().get_font()
#self.on_font_change(self._editor, font)
def _on_bullet_list(self):
"""Toggle bullet list"""
self._editor.get_textview().toggle_bullet()
#font = self._editor.get_textview().get_font()
#self.on_font_change(self._editor, font)
def _on_indent(self):
"""Indent current paragraph"""
self._editor.get_textview().indent()
def _on_unindent(self):
"""Unindent current paragraph"""
self._editor.get_textview().unindent()
def _on_family_set(self, font_family_combo):
"""Set the font family"""
self._editor.get_textview().set_font_family(
font_family_combo.get_family())
self._editor.get_textview().grab_focus()
def _on_font_size_change(self, size):
"""Set the font size"""
self._editor.get_textview().set_font_size(size)
self._editor.get_textview().grab_focus()
def _on_font_size_inc(self):
"""Increase font size"""
font = self._editor.get_textview().get_font()
font.size += 2
self._editor.get_textview().set_font_size(font.size)
#self.on_font_change(self._editor, font)
def _on_font_size_dec(self):
"""Decrease font size"""
font = self._editor.get_textview().get_font()
if font.size > 4:
font.size -= 2
self._editor.get_textview().set_font_size(font.size)
#self.on_font_change(self._editor, font)
def _on_color_set(self, kind, widget, color=0):
"""Set text/background color"""
if color == 0:
color = widget.color
if color is not None:
colorstr = color_tuple_to_string(color)
else:
colorstr = None
if kind == "fg":
self._editor.get_textview().set_font_fg_color(colorstr)
elif kind == "bg":
self._editor.get_textview().set_font_bg_color(colorstr)
else:
raise Exception("unknown color type '%s'" % str(kind))
def _on_choose_font(self):
"""Callback for opening Choose Font Dialog"""
font = self._editor.get_textview().get_font()
dialog = gtk.FontSelectionDialog(_("Choose Font"))
dialog.set_font_name("%s %d" % (font.family, font.size))
response = dialog.run()
if response == gtk.RESPONSE_OK:
self._editor.get_textview().set_font(dialog.get_font_name())
self._editor.get_textview().grab_focus()
dialog.destroy()
#=======================================================
# spellcheck
def enable_spell_check(self, enabled):
"""Spell check"""
self._editor.get_textview().enable_spell_check(enabled)
# see if spell check became enabled
enabled = self._editor.get_textview().is_spell_check_enabled()
# update UI to match
if self.spell_check_toggle:
self.spell_check_toggle.set_active(enabled)
return enabled
def on_spell_check_toggle(self, widget):
"""Toggle spell checker"""
self.enable_spell_check(widget.get_active())
#=====================================================
# toolbar and menus
def add_ui(self, window):
self._action_group = gtk.ActionGroup("Editor")
self._uis = []
add_actions(self._action_group, self.get_actions())
window.get_uimanager().insert_action_group(
self._action_group, 0)
for s in self.get_ui():
self._uis.append(window.get_uimanager().add_ui_from_string(s))
window.get_uimanager().ensure_update()
self.setup_menu(window, window.get_uimanager())
def remove_ui(self, window):
# disconnect signals
for ui in self._font_ui_signals:
ui.widget.disconnect(ui.signal)
self._font_ui_signals = []
# remove ui
for ui in reversed(self._uis):
window.get_uimanager().remove_ui(ui)
self._uis = []
#window.get_uimanager().ensure_update()
# remove action group
window.get_uimanager().remove_action_group(self._action_group)
self._action_group = None
def get_actions(self):
def BothAction(name1, *args):
return [Action(name1, *args), ToggleAction(name1 + " Tool", *args)]
return (map(lambda x: Action(*x), [
("Insert Horizontal Rule", None, _("Insert _Horizontal Rule"),
"<control>H", None,
lambda w: self._editor.on_insert_hr()),
("Insert Image", None, _("Insert _Image..."),
"", None,
lambda w: self._editor.on_insert_image()),
("Insert New Image", None, _("Insert _New Image..."),
"", _("Insert a new image"),
lambda w: self._on_new_image()),
("Insert Screenshot", None, _("Insert _Screenshot..."),
"<control>Insert", None,
lambda w: self._editor.on_screenshot()),
# finding
("Find In Page", gtk.STOCK_FIND, _("_Find In Page..."),
"<control>F", None,
lambda w: self._editor.find_dialog.on_find(False)),
("Find Next In Page", gtk.STOCK_FIND, _("Find _Next In Page..."),
"<control>G", None,
lambda w: self._editor.find_dialog.on_find(False, forward=True)),
("Find Previous In Page", gtk.STOCK_FIND,
_("Find Pre_vious In Page..."),
"<control><shift>G", None,
lambda w: self._editor.find_dialog.on_find(False, forward=False)),
("Replace In Page", gtk.STOCK_FIND_AND_REPLACE,
_("_Replace In Page..."),
"<control>R", None,
lambda w: self._editor.find_dialog.on_find(True)),
("Format", None, _("Fo_rmat")) ]) +
BothAction("Bold", gtk.STOCK_BOLD, _("_Bold"),
"<control>B", _("Bold"),
lambda w: self._on_mod("bold"),
"bold.png") +
BothAction("Italic", gtk.STOCK_ITALIC, _("_Italic"),
"<control>I", _("Italic"),
lambda w: self._on_mod("italic"),
"italic.png") +
BothAction("Underline", gtk.STOCK_UNDERLINE, _("_Underline"),
"<control>U", _("Underline"),
lambda w: self._on_mod("underline"),
"underline.png") +
BothAction("Strike", None, _("S_trike"),
"", _("Strike"),
lambda w: self._on_mod("strike"),
"strike.png") +
BothAction("Monospace", None, _("_Monospace"),
"<control>M", _("Monospace"),
lambda w: self._on_mod("tt"),
"fixed-width.png") +
BothAction("Link", None, _("Lin_k"),
"<control>L", _("Make Link"),
lambda w: self._on_toggle_link(),
"link.png") +
BothAction("No Wrapping", None, _("No _Wrapping"),
"", _("No Wrapping"),
lambda w: self._on_mod("nowrap"),
"no-wrap.png") +
BothAction("Left Align", None, _("_Left Align"),
"<shift><control>L", _("Left Align"),
lambda w: self._on_justify("left"),
"alignleft.png") +
BothAction("Center Align", None, _("C_enter Align"),
"<shift><control>E", _("Center Align"),
lambda w: self._on_justify("center"),
"aligncenter.png") +
BothAction("Right Align", None, _("_Right Align"),
"<shift><control>R", _("Right Align"),
lambda w: self._on_justify("right"),
"alignright.png") +
BothAction("Justify Align", None, _("_Justify Align"),
"<shift><control>J", _("Justify Align"),
lambda w: self._on_justify("fill"),
"alignjustify.png") +
BothAction("Bullet List", None, _("_Bullet List"),
"<control>asterisk", _("Bullet List"),
lambda w: self._on_bullet_list(),
"bullet.png") +
map(lambda x: Action(*x), [
("Font Selector Tool", None, "", "", _("Set Font Face")),
("Font Size Tool", None, "", "", _("Set Font Size")),
("Font Fg Color Tool", None, "", "", _("Set Text Color")),
("Font Bg Color Tool", None, "", "", _("Set Background Color")),
("Indent More", None, _("Indent M_ore"),
"<control>parenright", None,
lambda w: self._on_indent(),
"indent-more.png"),
("Indent Less", None, _("Indent Le_ss"),
"<control>parenleft", None,
lambda w: self._on_unindent(),
"indent-less.png"),
("Increase Font Size", None, _("Increase Font _Size"),
"<control>equal", None,
lambda w: self._on_font_size_inc()),
("Decrease Font Size", None, _("_Decrease Font Size"),
"<control>minus", None,
lambda w: self._on_font_size_dec()),
("Apply Text Color", None, _("_Apply Text Color"),
"", None,
lambda w: self._on_color_set("fg", self.fg_color_button),
"font-inc.png"),
("Apply Background Color", None, _("A_pply Background Color"),
"", None,
lambda w: self._on_color_set("bg", self.bg_color_button),
"font-dec.png"),
("Choose Font", None, _("Choose _Font"),
"<control><shift>F", None,
lambda w: self._on_choose_font(),
"font.png"),
("Go to Link", None, _("Go to Lin_k"),
"<control>space", None,
lambda w: self._editor.get_textview().click_iter()),
]) +
[ToggleAction("Spell Check", None, _("_Spell Check"),
"", None,
self.on_spell_check_toggle)]
)
def get_ui(self):
use_minitoolbar = self._app.pref.get("look_and_feel",
"use_minitoolbar",
default=False)
ui = ["""
<ui>
<menubar name="main_menu_bar">
<menu action="Edit">
<placeholder name="Viewer">
<placeholder name="Editor">
<menuitem action="Insert Horizontal Rule"/>
<menuitem action="Insert Image"/>
<!-- <menuitem action="Insert New Image"/> -->
<menuitem action="Insert Screenshot"/>
<placeholder name="Extension"/>
</placeholder>
</placeholder>
</menu>
<menu action="Search">
<placeholder name="Viewer">
<placeholder name="Editor">
<menuitem action="Find In Page"/>
<menuitem action="Find Next In Page"/>
<menuitem action="Find Previous In Page"/>
<menuitem action="Replace In Page"/>
</placeholder>
</placeholder>
</menu>
<placeholder name="Viewer">
<placeholder name="Editor">
<menu action="Format">
<menuitem action="Bold"/>
<menuitem action="Italic"/>
<menuitem action="Underline"/>
<menuitem action="Strike"/>
<menuitem action="Monospace"/>
<menuitem action="Link"/>
<menuitem action="No Wrapping"/>
<separator/>
<menuitem action="Left Align"/>
<menuitem action="Center Align"/>
<menuitem action="Right Align"/>
<menuitem action="Justify Align"/>
<menuitem action="Bullet List"/>
<menuitem action="Indent More"/>
<menuitem action="Indent Less"/>
<separator/>
<menuitem action="Increase Font Size"/>
<menuitem action="Decrease Font Size"/>
<menuitem action="Apply Text Color"/>
<menuitem action="Apply Background Color"/>
<menuitem action="Choose Font"/>
</menu>
</placeholder>
</placeholder>
<menu action="Go">
<placeholder name="Viewer">
<placeholder name="Editor">
<menuitem action="Go to Link"/>
</placeholder>
</placeholder>
</menu>
<menu action="Tools">
<placeholder name="Viewer">
<menuitem action="Spell Check"/>
</placeholder>
</menu>
</menubar>
</ui>
"""]
if use_minitoolbar:
ui.append("""
<ui>
<toolbar name="main_tool_bar">
<placeholder name="Viewer">
<placeholder name="Editor">
<toolitem action="Bold Tool"/>
<toolitem action="Italic Tool"/>
<toolitem action="Underline Tool"/>
<toolitem action="Link Tool"/>
<toolitem action="Font Selector Tool"/>
<toolitem action="Font Size Tool"/>
<toolitem action="Font Fg Color Tool"/>
<toolitem action="Font Bg Color Tool"/>
<separator/>
<toolitem action="Bullet List Tool"/>
</placeholder>
</placeholder>
</toolbar>
</ui>
""")
else:
ui.append("""
<ui>
<toolbar name="main_tool_bar">
<placeholder name="Viewer">
<placeholder name="Editor">
<toolitem action="Bold Tool"/>
<toolitem action="Italic Tool"/>
<toolitem action="Underline Tool"/>
<toolitem action="Strike Tool"/>
<toolitem action="Monospace Tool"/>
<toolitem action="Link Tool"/>
<toolitem action="No Wrapping Tool"/>
<toolitem action="Font Selector Tool"/>
<toolitem action="Font Size Tool"/>
<toolitem action="Font Fg Color Tool"/>
<toolitem action="Font Bg Color Tool"/>
<separator/>
<toolitem action="Left Align Tool"/>
<toolitem action="Center Align Tool"/>
<toolitem action="Right Align Tool"/>
<toolitem action="Justify Align Tool"/>
<toolitem action="Bullet List Tool"/>
<separator/>
</placeholder>
</placeholder>
</toolbar>
</ui>
""")
return ui
def setup_font_toggle(self, uimanager, path, stock=False,
update_func=lambda ui, font: None):
action = uimanager.get_action(path)
# NOTE: action can be none if minimal toolbar is in use.
if action:
proxies = action.get_proxies()
if len(proxies) == 0:
return None
# NOTE: sometimes get_proxies() is zero length after app options
# OK button is clicked. Don't know why this happens yet.
widget = action.get_proxies()[0]
def block():
action.handler_block(action.signal)
action.block_activate_from(widget)
def unblock():
action.handler_unblock(action.signal)
action.unblock_activate_from(widget)
ui = FontUI(action, action.signal, update_func,
block=block,
unblock=unblock)
self._font_ui_signals.append(ui)
return ui
else:
return None
def setup_menu(self, window, uimanager):
u = uimanager
def update_toggle(ui, active):
if len(ui.widget.get_proxies()) > 0:
widget = ui.widget.get_proxies()[0]
widget.set_active(active)
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Bold Tool",
update_func=lambda ui, font: update_toggle(ui, font.mods["bold"]))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Italic Tool",
update_func=lambda ui, font: update_toggle(ui, font.mods["italic"]))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Underline Tool",
update_func=lambda ui, font: update_toggle(ui, font.mods["underline"]))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Strike Tool",
update_func=lambda ui, font: update_toggle(ui, font.mods["strike"]))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Monospace Tool",
update_func=lambda ui, font: update_toggle(ui, font.mods["tt"]))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Link Tool",
update_func=lambda ui, font: update_toggle(ui, font.link is not None))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/No Wrapping Tool",
update_func=lambda ui, font: update_toggle(ui, font.mods["nowrap"]))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Left Align Tool",
update_func=lambda ui, font:
update_toggle(ui, font.justify == "left"))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Center Align Tool",
update_func=lambda ui, font:
update_toggle(ui, font.justify == "center"))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Right Align Tool",
update_func=lambda ui, font:
update_toggle(ui, font.justify == "right"))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Justify Align Tool",
update_func=lambda ui, font:
update_toggle(ui, font.justify == "fill"))
self.setup_font_toggle(
uimanager, "/main_tool_bar/Viewer/Editor/Bullet List Tool",
update_func=lambda ui, font:
update_toggle(ui, font.par_type == "bullet"))
#lambda ui, font:
#ui.widget.set_active(font.par_type == "bullet"))
# family combo
font_family_combo = FontSelector()
font_family_combo.set_size_request(150, 25)
# TODO: make proper custom tools
w = uimanager.get_widget("/main_tool_bar/Viewer/Editor/Font Selector Tool")
if w:
self._removed_widgets.append(w.child)
w.remove(w.child)
w.add(font_family_combo)
font_family_combo.show()
font_family_id = font_family_combo.connect("changed",
self._on_family_set)
self._font_ui_signals.append(
FontUI(font_family_combo,
font_family_id,
update_func=lambda ui, font:
ui.widget.set_family(font.family)))
# font size
DEFAULT_FONT_SIZE = 10
font_size_button = gtk.SpinButton(
gtk.Adjustment(value=DEFAULT_FONT_SIZE, lower=2, upper=500,
step_incr=1))
font_size_button.set_size_request(-1, 25)
font_size_button.set_value(DEFAULT_FONT_SIZE)
font_size_button.set_editable(False)
w = uimanager.get_widget("/main_tool_bar/Viewer/Editor/Font Size Tool")
if w:
self._removed_widgets.append(w.child)
w.remove(w.child)
w.add(font_size_button)
font_size_button.show()
w.set_homogeneous(False)
font_size_id = font_size_button.connect("value-changed",
lambda w:
self._on_font_size_change(font_size_button.get_value()))
self._font_ui_signals.append(
FontUI(font_size_button,
font_size_id,
update_func=lambda ui, font:
ui.widget.set_value(font.size)))
# font fg color
# TODO: code in proper default color
self.fg_color_button = FgColorTool(14, 15, (0, 0, 0))
self.fg_color_button.set_homogeneous(False)
self.fg_color_button.connect("set-color",
lambda w, color: self._on_color_set(
"fg", self.fg_color_button, color))
w = uimanager.get_widget("/main_tool_bar/Viewer/Editor/Font Fg Color Tool")
if w:
self._removed_widgets.append(w.child)
w.remove(w.child)
w.add(self.fg_color_button)
self.fg_color_button.show()
w.set_homogeneous(False)
# font bg color
self.bg_color_button = BgColorTool(14, 15, (65535, 65535, 65535))
self.bg_color_button.set_homogeneous(False)
self.bg_color_button.connect(
"set-color",
lambda w, color: self._on_color_set(
"bg", self.bg_color_button, color))
w = uimanager.get_widget("/main_tool_bar/Viewer/Editor/Font Bg Color Tool")
if w:
self._removed_widgets.append(w.child)
w.remove(w.child)
w.add(self.bg_color_button)
self.bg_color_button.show()
w.set_homogeneous(False)
# get spell check toggle
self.spell_check_toggle = \
uimanager.get_widget("/main_menu_bar/Tools/Viewer/Spell Check")
self.spell_check_toggle.set_sensitive(
self._editor.get_textview().can_spell_check())
self.spell_check_toggle.set_active(window.get_app().pref.get(
"editors", "general", "spell_check", default=True))
class ComboToolItem(gtk.ToolItem):
__gtype_name__ = "ComboToolItem"
def __init__(self):
gtk.ToolItem.__init__(self)
self.set_border_width(2)
self.set_homogeneous(False)
self.set_expand(False)
self.combobox = gtk.combo_box_entry_new_text()
for text in ['a', 'b', 'c', 'd', 'e', 'f']:
self.combobox.append_text(text)
self.combobox.show()
self.add(self.combobox)
def do_set_tooltip(self, tooltips, tip_text=None, tip_private=None):
gtk.ToolItem.set_tooltip(self, tooltips, tip_text, tip_private)
tooltips.set_tip(self.combobox, tip_text, tip_private)
class ComboToolAction(gtk.Action):
__gtype_name__ = "ComboToolAction"
def __init__(self, name, label, tooltip, stock_id):
gtk.Action.__init__(self, name, label, tooltip, stock_id)
ComboToolAction.set_tool_item_type(ComboToolItem)
| gpl-2.0 | -6,252,252,930,391,750,000 | 32.482319 | 90 | 0.531399 | false |
eusoubrasileiro/fatiando_seismic | fatiando/gravmag/sphere.py | 3 | 24356 | r"""
Calculate the potential fields of a homogeneous sphere.
**Magnetic**
Calculates the magnetic effect produced by an sphere. The functions are
based on Blakely (1995).
* :func:`~fatiando.gravmag.sphere.tf`: calculates the total-field anomaly
* :func:`~fatiando.gravmag.sphere.bx`: calculates the x component of the
induction
* :func:`~fatiando.gravmag.sphere.by`: calculates the y component of the
induction
* :func:`~fatiando.gravmag.sphere.bz`: calculates the z component of the
induction
Remember that:
The magnetization :math:`\mathbf{M}` and the dipole moment :math:`\mathbf{m}`
are related with the volume V:
.. math::
\mathbf{M} = \dfrac{\mathbf{m}}{V}.
The total-field anomaly is:
.. math::
\Delta T = |\mathbf{T}| - |\mathbf{F}|,
where :math:`\mathbf{T}` is the measured field and :math:`\mathbf{F}` is a
reference (regional) field. The forward modeling functions
:func:`~fatiando.gravmag.sphere.bx`, :func:`~fatiando.gravmag.sphere.by`,
and :func:`~fatiando.gravmag.sphere.bz` calculate the 3 components of the
field perturbation :math:`\Delta\mathbf{F}`
.. math::
\Delta\mathbf{F} = \mathbf{T} - \mathbf{F}.
Then the total-field anomaly caused by the sphere is
.. math::
\Delta T \approx \hat{\mathbf{F}}\cdot\Delta\mathbf{F}.
**Gravity**
Calculates the gravitational acceleration and gravity gradient tensor
components.
* :func:`~fatiando.gravmag.sphere.gz`
* :func:`~fatiando.gravmag.sphere.gxx`
* :func:`~fatiando.gravmag.sphere.gxy`
* :func:`~fatiando.gravmag.sphere.gxz`
* :func:`~fatiando.gravmag.sphere.gyy`
* :func:`~fatiando.gravmag.sphere.gyz`
* :func:`~fatiando.gravmag.sphere.gzz`
**Auxiliary Functions**
Calculates the second derivatives of the function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi R^3 \frac{1}{r}
with respect to the variables :math:`x`, :math:`y`, and :math:`z`. In
this equation,
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2},
and :math:`R` is the radius of a sphere with centre at the Cartesian
coordinates :math:`\nu`, :math:`\eta` and :math:`\zeta`.
These second derivatives are used to calculate the total field magnetic anomaly
and the gravity gradient tensor components.
* :func:`~fatiando.gravmag.sphere.kernelxx`
* :func:`~fatiando.gravmag.sphere.kernelxy`
* :func:`~fatiando.gravmag.sphere.kernelxz`
* :func:`~fatiando.gravmag.sphere.kernelyy`
* :func:`~fatiando.gravmag.sphere.kernelyz`
* :func:`~fatiando.gravmag.sphere.kernelzz`
**References**
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic Applications,
Cambridge University Press.
----
"""
from __future__ import division
import numpy
from ..constants import SI2MGAL, G, CM, T2NT, SI2EOTVOS
from .. import utils
try:
from . import _sphere
except ImportError:
_sphere = None
def tf(xp, yp, zp, spheres, inc, dec, pmag=None):
"""
Calculate the total-field anomaly of spheres.
.. note:: Input units are SI. Output is in nT
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres without ``'magnetization'`` will be
ignored.
* inc : float
The inclination of the regional field (in degrees)
* dec : float
The declination of the regional field (in degrees)
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the spheres. Use this, e.g., for
sensitivity matrix building.
Returns:
* tf : array
The total-field anomaly
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
# Calculate the 3 components of the unit vector in the direction of the
# regional field
fx, fy, fz = utils.dircos(inc, dec)
if pmag is not None:
if isinstance(pmag, float) or isinstance(pmag, int):
pmx, pmy, pmz = pmag * fx, pmag * fy, pmag * fz
else:
pmx, pmy, pmz = pmag
for sphere in spheres:
if sphere is None or ('magnetization' not in sphere.props
and pmag is None):
continue
# Get the intensity and unit vector from the magnetization
if pmag is None:
mag = sphere.props['magnetization']
if isinstance(mag, float) or isinstance(mag, int):
mx, my, mz = mag * fx, mag * fy, mag * fz
else:
mx, my, mz = mag
else:
mx, my, mz = pmx, pmy, pmz
_sphere.tf(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
mx, my, mz, fx, fy, fz, res)
res *= CM * T2NT
return res
def bx(xp, yp, zp, spheres):
"""
Calculates the x component of the magnetic induction produced by spheres.
.. note:: Input units are SI. Output is in nT
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres without ``'magnetization'`` will be
ignored. The ``'magnetization'`` must be a vector.
Returns:
* bx: array
The x component of the magnetic induction
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('magnetization' not in sphere.props):
continue
# Get the magnetization vector components
mx, my, mz = sphere.props['magnetization']
_sphere.bx(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
mx, my, mz, res)
res *= CM * T2NT
return res
def by(xp, yp, zp, spheres):
"""
Calculates the y component of the magnetic induction produced by spheres.
.. note:: Input units are SI. Output is in nT
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres without ``'magnetization'`` will be
ignored. The ``'magnetization'`` must be a vector.
Returns:
* by: array
The y component of the magnetic induction
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('magnetization' not in sphere.props):
continue
# Get the magnetization vector components
mx, my, mz = sphere.props['magnetization']
_sphere.by(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
mx, my, mz, res)
res *= CM * T2NT
return res
def bz(xp, yp, zp, spheres):
"""
Calculates the z component of the magnetic induction produced by spheres.
.. note:: Input units are SI. Output is in nT
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres without ``'magnetization'`` will be
ignored. The ``'magnetization'`` must be a vector.
Returns:
* bz: array
The z component of the magnetic induction
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('magnetization' not in sphere.props):
continue
# Get the magnetization vector components
mx, my, mz = sphere.props['magnetization']
_sphere.bz(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
mx, my, mz, res)
res *= CM * T2NT
return res
def gz(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_z` gravity acceleration component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in mGal!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
_sphere.gz(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
density, res)
res *= G * SI2MGAL
return res
def gxx(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{xx}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
_sphere.gxx(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
density, res)
res *= G * SI2EOTVOS
return res
def gxy(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{xy}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
_sphere.gxy(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
density, res)
res *= G * SI2EOTVOS
return res
def gxz(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{xz}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
_sphere.gxz(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
density, res)
res *= G * SI2EOTVOS
return res
def gyy(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{yy}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
_sphere.gyy(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
density, res)
res *= G * SI2EOTVOS
return res
def gyz(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{yz}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
_sphere.gyz(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
density, res)
res *= G * SI2EOTVOS
return res
def gzz(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{zz}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
_sphere.gzz(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
density, res)
res *= G * SI2EOTVOS
return res
def kernelxx(xp, yp, zp, sphere):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial x^2},
where
.. math::
\phi(x,y,z) = \frac{4}{3} \pi R^3 \frac{1}{r}
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : object of :class:`fatiando.mesher.Sphere`
Returns:
* res : array
The function calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
_sphere.gxx(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius, 1,
res)
return res
def kernelxy(xp, yp, zp, sphere):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial x \partial y},
where
.. math::
\phi(x,y,z) = \frac{4}{3} \pi R^3 \frac{1}{r}
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : object of :class:`fatiando.mesher.Sphere`
Returns:
* res : array
The function calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
_sphere.gxy(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius, 1,
res)
return res
def kernelxz(xp, yp, zp, sphere):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial x \partial z},
where
.. math::
\phi(x,y,z) = \frac{4}{3} \pi R^3 \frac{1}{r}
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : object of :class:`fatiando.mesher.Sphere`
Returns:
* res : array
The function calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
_sphere.gxz(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius, 1,
res)
return res
def kernelyy(xp, yp, zp, sphere):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial y^2},
where
.. math::
\phi(x,y,z) = \frac{4}{3} \pi R^3 \frac{1}{r}
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : object of :class:`fatiando.mesher.Sphere`
Returns:
* res : array
The function calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
_sphere.gyy(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius, 1,
res)
return res
def kernelyz(xp, yp, zp, sphere):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial y \partial z},
where
.. math::
\phi(x,y,z) = \frac{4}{3} \pi R^3 \frac{1}{r}
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : object of :class:`fatiando.mesher.Sphere`
Returns:
* res : array
The function calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
_sphere.gyz(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius, 1,
res)
return res
def kernelzz(xp, yp, zp, sphere):
r"""
Calculates the function
.. math::
\frac{\partial^2 \phi(x,y,z)}{\partial z^2},
where
.. math::
\phi(x,y,z) = \frac{4}{3} \pi R^3 \frac{1}{r}
and
.. math::
r = \sqrt{(x - \nu)^2 + (y - \eta)^2 + (z - \zeta)^2}.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input and output values in SI!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : object of :class:`fatiando.mesher.Sphere`
Returns:
* res : array
The function calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros(len(xp), dtype=numpy.float)
_sphere.gzz(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius, 1,
res)
return res
| bsd-3-clause | 4,972,256,355,419,756,000 | 27.789598 | 79 | 0.597553 | false |
sitepod/kubernetes | cluster/juju/return-node-ips.py | 310 | 1024 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
# This script helps parse out the private IP addresses from the
# `juju run` command's JSON object, see cluster/juju/util.sh
if len(sys.argv) > 1:
# It takes the JSON output as the first argument.
nodes = json.loads(sys.argv[1])
# There can be multiple nodes to print the Stdout.
for num in nodes:
print num['Stdout'].rstrip()
else:
exit(1)
| apache-2.0 | 7,708,610,732,656,663,000 | 34.310345 | 74 | 0.732422 | false |
mlaitinen/odoo | addons/project_issue/report/project_issue_report.py | 303 | 4652 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
from openerp.addons.crm import crm
class project_issue_report(osv.osv):
_name = "project.issue.report"
_auto = False
_columns = {
'section_id':fields.many2one('crm.case.section', 'Sale Team', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'opening_date': fields.datetime('Date of Opening', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True),
'date_closed': fields.datetime('Date of Closing', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'stage_id': fields.many2one('project.task.type', 'Stage'),
'nbr': fields.integer('# of Issues', readonly=True), # TDE FIXME master: rename into nbr_issues
'working_hours_open': fields.float('Avg. Working Hours to Open', readonly=True, group_operator="avg"),
'working_hours_close': fields.float('Avg. Working Hours to Close', readonly=True, group_operator="avg"),
'delay_open': fields.float('Avg. Delay to Open', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to open the project issue."),
'delay_close': fields.float('Avg. Delay to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the project issue"),
'company_id' : fields.many2one('res.company', 'Company'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'project_id':fields.many2one('project.project', 'Project',readonly=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'user_id' : fields.many2one('res.users', 'Assigned to',readonly=True),
'partner_id': fields.many2one('res.partner','Contact'),
'channel': fields.char('Channel', readonly=True, help="Communication Channel."),
'task_id': fields.many2one('project.task', 'Task'),
'email': fields.integer('# Emails', size=128, readonly=True),
'reviewer_id': fields.many2one('res.users', 'Reviewer', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_issue_report')
cr.execute("""
CREATE OR REPLACE VIEW project_issue_report AS (
SELECT
c.id as id,
c.date_open as opening_date,
c.create_date as create_date,
c.date_last_stage_update as date_last_stage_update,
c.user_id,
c.working_hours_open,
c.working_hours_close,
c.section_id,
c.stage_id,
date(c.date_closed) as date_closed,
c.company_id as company_id,
c.priority as priority,
c.project_id as project_id,
c.version_id as version_id,
1 as nbr,
c.partner_id,
c.channel,
c.task_id,
c.day_open as delay_open,
c.day_close as delay_close,
(SELECT count(id) FROM mail_message WHERE model='project.issue' AND res_id=c.id) AS email,
t.reviewer_id
FROM
project_issue c
LEFT JOIN project_task t on c.task_id = t.id
WHERE c.active= 'true'
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -296,643,432,376,574,340 | 49.021505 | 112 | 0.567498 | false |
SnakeJenny/TensorFlow | tensorflow/contrib/graph_editor/transform.py | 46 | 24592 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to transform an subgraph into another.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from functools import partial
from six import iteritems
from six import iterkeys
from six import string_types
from six import StringIO
from tensorflow.contrib.graph_editor import reroute
from tensorflow.contrib.graph_editor import select
from tensorflow.contrib.graph_editor import subgraph
from tensorflow.contrib.graph_editor import util
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"replace_t_with_placeholder_handler",
"keep_t_if_possible_handler",
"assign_renamed_collections_handler",
"transform_op_if_inside_handler",
"copy_op_handler",
"Transformer",
"TransformerInfo",
"copy",
"copy_with_input_replacements",
"graph_replace",
]
def replace_t_with_placeholder_handler(info, t):
"""Transform a tensor into a placeholder tensor.
This handler is typically used to transform a subgraph input tensor into a
placeholder.
Args:
info: Transform._TmpInfo instance.
t: tensor whose input must be transformed into a place holder.
Returns:
The tensor generated by the newly created place holder.
"""
with info.graph_.as_default():
t_ = util.make_placeholder_from_tensor(t, scope=info.scope_)
return t_
def keep_t_if_possible_handler(info, t):
"""Transform a tensor into itself (identity) if possible.
This handler transform a tensor into itself if the source and destination
graph are the same. Otherwise it will create a placeholder.
This handler is typically used to transform a hidden input tensors.
Args:
info: Transform._TmpInfo instance.
t: tensor whose input must be transformed into a place holder.
Returns:
The tensor generated by the newly created place holder.
"""
if info.graph is info.graph_:
return t
else:
return replace_t_with_placeholder_handler(info, t)
def assign_renamed_collections_handler(info, elem, elem_):
"""Add the transformed elem to the (renamed) collections of elem.
A collection is renamed only if is not a known key, as described in
`tf.GraphKeys`.
Args:
info: Transform._TmpInfo instance.
elem: the original element (`tf.Tensor` or `tf.Operation`)
elem_: the transformed element
"""
known_collection_names = util.get_predefined_collection_names()
for name, collection in iteritems(info.collections):
if elem not in collection:
continue
if name in known_collection_names:
transformed_name = name
else:
transformed_name = info.new_name(name)
info.graph_.add_to_collection(transformed_name, elem_)
def transform_op_if_inside_handler(info, op, keep_if_possible=True):
"""Transform an optional op only if it is inside the subgraph.
This handler is typically use to handle original op: it is fine to keep them
if they are inside the subgraph, otherwise they are just ignored.
Args:
info: Transform._TmpInfo instance.
op: the optional op to transform (or ignore).
keep_if_possible: re-attach to the original op if possible, that is,
if the source graph and the destination graph are the same.
Returns:
The transformed op or None.
"""
if op in info.sgv.ops:
return info.transformed_ops[op]
else:
if keep_if_possible and info.graph is info.graph_:
return op
else:
return None
def copy_op_handler(info, op, copy_shape=True):
"""Copy a `tf.Operation`.
Args:
info: Transform._TmpInfo instance.
op: the `tf.Operation` to be copied.
copy_shape: also copy the shape of the tensor
Returns:
A `(op, op_outputs)` tuple containing the transformed op and its outputs.
"""
# pylint: disable=protected-access
# Clone the node def:
node_def_ = deepcopy(op._node_def)
# Transform name:
name_ = info.new_name(op.name)
name_ = info.graph_.unique_name(name_)
node_def_.name = name_
# Copy the other inputs needed for initialization
output_types_ = op._output_types[:]
input_types_ = op._input_types[:]
# Make a copy of the op_def too.
# Its unique to every _type_ of Operation.
op_def_ = deepcopy(op._op_def)
# Initialize a new Operation instance
op_ = tf_ops.Operation(node_def_, info.graph_, [], output_types_,
[], input_types_, None, op_def_)
# copy the shape over
if copy_shape:
for t, t_ in zip(op.outputs, op_.outputs):
t_.set_shape(t.get_shape())
# Finalize original op.
if op._original_op:
original_op = info.transform_original_op_handler(info, op._original_op)
if original_op is None:
logging.debug("Could not find original op of: %s", op_.name)
else:
op_._original_op = original_op
# Add op to the graph
info.graph_._add_op(op_)
return op_, op_.outputs
class TransformerInfo(object):
""""Contains information about the result of a transform operation."""
def __init__(self, info):
"""Constructor.
Args:
info: an instance of Transformer._TmpInfo containing various internal
information about the transform operation.
"""
self._graph = info.graph
self._scope = info.scope
self._graph_ = info.graph_
self._scope_ = info.scope_
self._transformed_ops = info.transformed_ops
self._transformed_ts = info.transformed_ts
def _get_transformed_map(self, top):
"""Return the correct container depending on the type of `top`."""
if isinstance(top, tf_ops.Operation):
return self._transformed_ops
elif isinstance(top, tf_ops.Tensor):
return self._transformed_ts
else:
raise TypeError(
"Expected a tf.Tensor or a tf.Operation, got a {}".format(
type(top)))
def _transformed_elem(self, original_top, missing_fn=None):
"""Return the transformed op/tensor corresponding to the original one.
Args:
original_top: the original tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the transformed tensor/operation (or None if no match is found).
"""
transformed_map = self._get_transformed_map(original_top)
if isinstance(original_top, string_types):
for original, transformed in iteritems(transformed_map):
if original.name == original_top:
return transformed
return None if missing_fn is None else missing_fn(original_top)
else:
if original_top not in transformed_map:
return None if missing_fn is None else missing_fn(original_top)
return transformed_map[original_top]
def _original_elem(self, transformed_top, missing_fn=None):
"""Return the original op/tensor corresponding to the transformed one.
Args:
transformed_top: the transformed tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the original tensor/operation (or None if no match is found).
"""
transformed_map = self._get_transformed_map(transformed_top)
if isinstance(transformed_top, string_types):
finder = lambda transformed: transformed.name == transformed_top
else:
finder = lambda transformed: transformed == transformed_top
for original, transformed in iteritems(transformed_map):
if finder(transformed):
return original
return None if missing_fn is None else missing_fn(transformed_top)
def transformed(self, original, missing_fn=None):
"""Return the transformed op/tensor corresponding to the original one.
Note that the output of this function mimics the hierarchy
of its input argument `original`.
Given an iterable, it returns a list. Given an operation or a tensor,
it will return an operation or a tensor.
Args:
original: the original tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the transformed tensor/operation (or None if no match is found).
"""
transformed_elem = partial(self._transformed_elem, missing_fn=missing_fn)
return util.transform_tree(original, transformed_elem)
def original(self, transformed, missing_fn=None):
"""Return the original op/tensor corresponding to the transformed one.
Note that the output of this function mimics the hierarchy
of its input argument `transformed`.
Given an iterable, it returns a list. Given an operation or a tensor,
it will return an operation or a tensor.
Args:
transformed: the transformed tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the original tensor/operation (or None if no match is found).
"""
original_elem = partial(self._original_elem, missing_fn=missing_fn)
return util.transform_tree(transformed, original_elem)
def __str__(self):
res = StringIO()
print("Transform result info:", file=res)
if self._graph == self._graph_:
in_place_str = "" if self._scope_ else " IN-PLACE"
print(" Within graph[{}]{}".format(
id(self._graph), in_place_str), file=res)
else:
print(" graph[{}] => graph[{}]".format(
id(self._graph), id(self._graph_)), file=res)
if self._scope:
print(" Relative to source scope: {}".format(self._scope), file=res)
if self._scope_:
print(" Scope destination: {}".format(self._scope_), file=res)
print("Operations mapping:", file=res)
for op, op_ in iteritems(self._transformed_ops):
print(" {} => {}".format(op.name, op_.name), file=res)
return res.getvalue()
class _TmpInfo(object):
"""Transformer temporary data.
An instance of this class holds all the information relevant to a call
to a transformer instance (that is, a call to __call__). An instance
is created for the life-time of the __call__ function and is passed as
argument to the handlers.
"""
def __init__(self, sgv, dst_graph, dst_scope, src_scope):
self.sgv = sgv
self.sgv_inputs_set = frozenset(sgv.inputs)
self.ops = frozenset(sgv.ops)
self.control_outputs = util.ControlOutputs(sgv.graph)
self.graph = sgv.graph
self.scope = src_scope
self.graph_ = dst_graph
self.scope_ = dst_scope
self.transformed_ops = {}
self.transformed_ts = {}
self.collections = dict((key, self.graph.get_collection(key))
for key in self.graph.get_all_collection_keys())
self.cyclic_ops = []
self.transform_original_op_handler = transform_op_if_inside_handler
def new_name(self, name):
"""Compute a destination name from a source name.
Args:
name: the name to be "transformed".
Returns:
The transformed name.
Raises:
ValueError: if the source scope is used (that is, not an empty string)
and the source name does not belong to the source scope.
"""
scope = self.scope
if not name.startswith(scope):
raise ValueError("{} does not belong to source scope: {}.".format(
name, scope))
rel_name = name[len(scope):]
name_ = self.scope_ + rel_name
return name_
class Transformer(object):
"""Transform a subgraph into another one.
By default, the constructor create a transform which copy a subgraph and
replaces inputs with placeholders. This behavior can be modified by changing
the handlers.
"""
def __init__(self):
"""Transformer constructor.
The following members can be modified:
transform_op_handler: handle the transformation of a `tf.Operation`.
This handler defaults to a simple copy.
assign_collections_handler: handle the assignment of collections.
This handler defaults to assigning new collections created under the
given name-scope.
transform_external_input_handler: handle the transform of the inputs to
the given subgraph. This handler defaults to creating placeholders
instead of the ops just before the input tensors of the subgraph.
transform_external_hidden_input_handler: handle the transform of the
hidden inputs of the subgraph, that is, the inputs which are not listed
in sgv.inputs. This handler defaults to a transform which keep the same
input if the source and destination graphs are the same, otherwise
use placeholders.
transform_original_op_handler: handle the transform of original_op. This
handler defaults to transforming original_op only if they are in the
subgraph, otherwise they are ignored.
"""
# handlers
self.transform_op_handler = copy_op_handler
self.transform_control_input_handler = transform_op_if_inside_handler
self.assign_collections_handler = assign_renamed_collections_handler
self.transform_external_input_handler = replace_t_with_placeholder_handler
self.transform_external_hidden_input_handler = keep_t_if_possible_handler
self.transform_original_op_handler = transform_op_if_inside_handler
def __call__(self,
sgv,
dst_graph,
dst_scope,
src_scope="",
reuse_dst_scope=False):
"""Execute the transformation.
Args:
sgv: the source subgraph-view.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope, which specify the path from which the
relative path of the transformed nodes are computed. For instance, if
src_scope is a/ and dst_scoped is b/, then the node a/x/y will have a
relative path of x/y and will be transformed into b/x/y.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
ValueError: if the arguments are invalid.
"""
sgv = subgraph.make_view(sgv)
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
src_scope = util.scope_finalize(src_scope)
dst_scope = util.scope_finalize(dst_scope)
# Potentially create new scope if reuse_dst_scope is False
if dst_scope and not reuse_dst_scope:
dst_scope = util.scope_finalize(dst_graph.unique_name(dst_scope[:-1]))
# Create temporary info used during this transform call
info = _TmpInfo(sgv, dst_graph, dst_scope, src_scope)
info.transform_original_op_handler = self.transform_original_op_handler
self._copy_ops(info)
self._connect_ops(info)
# Compute information about the transformation
res_info = TransformerInfo(info)
sgv_ = self._transform_sgv(info, sgv)
return sgv_, res_info
def _copy_ops(self, info):
"""Copy ops without connecting them."""
for op in info.sgv.ops:
logging.debug("Copying op: %s", op.name)
# TODO(fkp): return a subgraph?
op_, op_outputs_ = self.transform_op_handler(info, op)
if op is op_:
raise ValueError("In-place tranformation not allowed.")
# Process op.
info.transformed_ops[op] = op_
self.assign_collections_handler(info, op, op_)
# Process output tensors.
for op_output, op_output_ in zip(op.outputs, op_outputs_):
info.transformed_ts[op_output] = op_output_
self.assign_collections_handler(info, op_output, op_output_)
def _connect_ops(self, info):
"""Connect the previously copied ops."""
for op in info.sgv.ops:
logging.debug("Finalizing op: %s", op.name)
op_ = info.transformed_ops[op]
# pylint: disable=protected-access
if op_.inputs:
raise ValueError("The newly transformed op should not have "
"any inputs yet: {}".format(op_.name))
inputs_ = [self._transformed_t(info, t) for t in op.inputs]
for t in inputs_:
op_._add_input(t)
# Finalize control inputs:
control_inputs_ = [self.transform_control_input_handler(info, ci)
for ci in op.control_inputs]
control_inputs_ = [ci for ci in control_inputs_ if ci is not None]
reroute.add_control_inputs(op_, control_inputs_)
def _transform_sgv(self, info, sgv):
"""Transform a subgraph view.
For convenience, a transform operation returns a subgraph view of the
transformed graph.
Args:
info: Temporary information for this transorfm call.
sgv: the subgraph to be transformed.
Returns:
The transformed subgraph.
"""
ops_ = [op_ for _, op_ in iteritems(info.transformed_ops)]
sgv_ = subgraph.SubGraphView(ops_)
sgv_inputs_ = sgv_.inputs
sgv_outputs_ = sgv_.outputs
# re-order inputs
input_map_ = []
for input_t in sgv.inputs:
if input_t not in info.transformed_ts:
continue
input_t_ = info.transformed_ts[input_t]
if input_t_ not in sgv_inputs_:
continue
input_t_index_ = sgv_.input_index(input_t_)
input_map_.append(input_t_index_)
# re-order outputs
output_map_ = []
for output_t in sgv.outputs:
if output_t not in info.transformed_ts:
continue
output_t_ = info.transformed_ts[output_t]
if output_t_ not in sgv_outputs_:
continue
output_t_index_ = sgv_.output_index(output_t_)
output_map_.append(output_t_index_)
return sgv_.remap(input_map_, output_map_)
def _transformed_t(self, info, t):
"""Return tre transformed tensor of `t`."""
if t not in info.transformed_ts:
# If op is not in the subgraph.
if t in info.sgv_inputs_set:
# t is an input of the subgraph.
return self.transform_external_input_handler(info, t)
else:
# t is a hidden input of the subgraph.
return self.transform_external_hidden_input_handler(info, t)
else:
# If op is in the subgraph, just return its transformed.
return info.transformed_ts[t]
def copy(sgv, dst_graph=None, dst_scope="", src_scope="",
reuse_dst_scope=False):
"""Copy a subgraph.
Args:
sgv: the source subgraph-view. This argument is converted to a subgraph
using the same rules than the function subgraph.make_view.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
TypeError: if `dst_graph` is not a `tf.Graph`.
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
if dst_graph is None:
dst_graph = sgv.graph
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
copier = Transformer()
return copier(
sgv, dst_graph, dst_scope, src_scope, reuse_dst_scope=reuse_dst_scope)
def copy_with_input_replacements(sgv, replacement_ts,
dst_graph=None, dst_scope="", src_scope="",
reuse_dst_scope=False):
"""Copy a subgraph, replacing some of its inputs.
Note a replacement only happens if the tensor to be replaced
is an input of the given subgraph. The inputs of a subgraph can
be queried using sgv.inputs.
Args:
sgv: the source subgraph-view. This argument is converted to a subgraph
using the same rules as the function subgraph.make_view.
replacement_ts: dictionary mapping from original tensors to the
replaced one.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
TypeError: if dst_graph is not a tf.Graph.
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules as the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
if dst_graph is None:
dst_graph = sgv.graph
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
copier = Transformer()
# Replace tensor if possible.
def replace_t_with_replacement_handler(info, t):
if t in replacement_ts:
return replacement_ts[t]
else:
return keep_t_if_possible_handler(info, t)
copier.transform_external_input_handler = replace_t_with_replacement_handler
return copier(
sgv, dst_graph, dst_scope, src_scope, reuse_dst_scope=reuse_dst_scope)
def graph_replace(target_ts, replacement_ts, dst_scope="",
src_scope="", reuse_dst_scope=False):
"""Create a new graph which compute the targets from the replaced Tensors.
Args:
target_ts: a single tf.Tensor or an iterable of tf.Tensor.
replacement_ts: dictionary mapping from original tensors to replaced tensors
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A single tf.Tensor or a list of target tf.Tensor, depending on
the type of the input argument `target_ts`.
The returned tensors are recomputed using the tensors from replacement_ts.
Raises:
ValueError: if the targets are not connected to replacement_ts.
"""
# Identify operations in the graph that will change.
# Start forward walk at Tensors that will be replaced, and
# backward walk at the target output Tensors.
flatten_target_ts = util.flatten_tree(target_ts)
# Construct the forward control dependencies edges so that
# the get_walks_intersection_ops can also traverse the
# control dependencies.
graph = util.get_unique_graph(flatten_target_ts, check_types=(tf_ops.Tensor))
control_ios = util.ControlOutputs(graph)
ops = select.get_walks_intersection_ops(list(iterkeys(replacement_ts)),
flatten_target_ts,
control_ios=control_ios)
if not ops:
raise ValueError("Targets and replacements are not connected!")
# Create a copy of the relevant subgraph
_, info = copy_with_input_replacements(
ops, replacement_ts, None, dst_scope, src_scope, reuse_dst_scope)
# Return the transformed targets but keep the original if the transformed
# counterpart cannot be found
missing_fn = lambda original_t: original_t
return info.transformed(target_ts, missing_fn)
| apache-2.0 | -6,267,076,126,941,165,000 | 36.317147 | 80 | 0.681685 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.