repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
avanov/django | tests/generic_views/urls.py | 194 | 14571 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from . import models, views
urlpatterns = [
# TemplateView
url(r'^template/no_template/$',
TemplateView.as_view()),
url(r'^template/login_required/$',
login_required(TemplateView.as_view())),
url(r'^template/simple/(?P<foo>\w+)/$',
TemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/custom/(?P<foo>\w+)/$',
views.CustomTemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/content_type/$',
TemplateView.as_view(template_name='generic_views/robots.txt', content_type='text/plain')),
url(r'^template/cached/(?P<foo>\w+)/$',
cache_page(2.0)(TemplateView.as_view(template_name='generic_views/about.html'))),
# DetailView
url(r'^detail/obj/$',
views.ObjectDetail.as_view()),
url(r'^detail/artist/(?P<pk>[0-9]+)/$',
views.ArtistDetail.as_view(),
name="artist_detail"),
url(r'^detail/author/(?P<pk>[0-9]+)/$',
views.AuthorDetail.as_view(),
name="author_detail"),
url(r'^detail/author/bycustompk/(?P<foo>[0-9]+)/$',
views.AuthorDetail.as_view(pk_url_kwarg='foo')),
url(r'^detail/author/byslug/(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bycustomslug/(?P<foo>[\w-]+)/$',
views.AuthorDetail.as_view(slug_url_kwarg='foo')),
url(r'^detail/author/bypkignoreslug/(?P<pk>[0-9]+)-(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bypkandslug/(?P<pk>[0-9]+)-(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view(query_pk_and_slug=True)),
url(r'^detail/author/(?P<pk>[0-9]+)/template_name_suffix/$',
views.AuthorDetail.as_view(template_name_suffix='_view')),
url(r'^detail/author/(?P<pk>[0-9]+)/template_name/$',
views.AuthorDetail.as_view(template_name='generic_views/about.html')),
url(r'^detail/author/(?P<pk>[0-9]+)/context_object_name/$',
views.AuthorDetail.as_view(context_object_name='thingy')),
url(r'^detail/author/(?P<pk>[0-9]+)/dupe_context_object_name/$',
views.AuthorDetail.as_view(context_object_name='object')),
url(r'^detail/page/(?P<pk>[0-9]+)/field/$',
views.PageDetail.as_view()),
url(r'^detail/author/invalid/url/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/invalid/qs/$',
views.AuthorDetail.as_view(queryset=None)),
url(r'^detail/nonmodel/1/$',
views.NonModelDetail.as_view()),
url(r'^detail/doesnotexist/(?P<pk>[0-9]+)/$',
views.ObjectDoesNotExistDetail.as_view()),
# FormView
url(r'^contact/$',
views.ContactView.as_view()),
# Create/UpdateView
url(r'^edit/artists/create/$',
views.ArtistCreate.as_view()),
url(r'^edit/artists/(?P<pk>[0-9]+)/update/$',
views.ArtistUpdate.as_view()),
url(r'^edit/authors/create/naive/$',
views.NaiveAuthorCreate.as_view()),
url(r'^edit/authors/create/redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/authors/create/interpolate_redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/author/%(id)d/update/')),
url(r'^edit/authors/create/interpolate_redirect_nonascii/$',
views.NaiveAuthorCreate.as_view(success_url='/%C3%A9dit/author/{id}/update/')),
url(r'^edit/authors/create/restricted/$',
views.AuthorCreateRestricted.as_view()),
url(r'^[eé]dit/authors/create/$',
views.AuthorCreate.as_view()),
url(r'^edit/authors/create/special/$',
views.SpecializedAuthorCreate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/naive/$',
views.NaiveAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>[0-9]+)/update/interpolate_redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/author/%(id)d/update/')),
url(r'^edit/author/(?P<pk>[0-9]+)/update/interpolate_redirect_nonascii/$',
views.NaiveAuthorUpdate.as_view(success_url='/%C3%A9dit/author/{id}/update/')),
url(r'^[eé]dit/author/(?P<pk>[0-9]+)/update/$',
views.AuthorUpdate.as_view()),
url(r'^edit/author/update/$',
views.OneAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/special/$',
views.SpecializedAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/naive/$',
views.NaiveAuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/interpolate_redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/?deleted=%(id)s')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/interpolate_redirect_nonascii/$',
views.NaiveAuthorDelete.as_view(success_url='/%C3%A9dit/authors/create/?deleted={id}')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/$',
views.AuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/special/$',
views.SpecializedAuthorDelete.as_view()),
# ArchiveIndexView
url(r'^dates/books/$',
views.BookArchive.as_view()),
url(r'^dates/books/context_object_name/$',
views.BookArchive.as_view(context_object_name='thingies')),
url(r'^dates/books/allow_empty/$',
views.BookArchive.as_view(allow_empty=True)),
url(r'^dates/books/template_name/$',
views.BookArchive.as_view(template_name='generic_views/list.html')),
url(r'^dates/books/template_name_suffix/$',
views.BookArchive.as_view(template_name_suffix='_detail')),
url(r'^dates/books/invalid/$',
views.BookArchive.as_view(queryset=None)),
url(r'^dates/books/paginated/$',
views.BookArchive.as_view(paginate_by=10)),
url(r'^dates/books/reverse/$',
views.BookArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/books/by_month/$',
views.BookArchive.as_view(date_list_period='month')),
url(r'^dates/booksignings/$',
views.BookSigningArchive.as_view()),
url(r'^dates/books/sortedbyname/$',
views.BookArchive.as_view(ordering='name')),
url(r'^dates/books/sortedbynamedec/$',
views.BookArchive.as_view(ordering='-name')),
# ListView
url(r'^list/dict/$',
views.DictList.as_view()),
url(r'^list/dict/paginated/$',
views.DictList.as_view(paginate_by=1)),
url(r'^list/artists/$',
views.ArtistList.as_view(),
name="artists_list"),
url(r'^list/authors/$',
views.AuthorList.as_view(),
name="authors_list"),
url(r'^list/authors/paginated/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated/(?P<page>[0-9]+)/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated-orphaned/$',
views.AuthorList.as_view(paginate_by=30, paginate_orphans=2)),
url(r'^list/authors/notempty/$',
views.AuthorList.as_view(allow_empty=False)),
url(r'^list/authors/notempty/paginated/$',
views.AuthorList.as_view(allow_empty=False, paginate_by=2)),
url(r'^list/authors/template_name/$',
views.AuthorList.as_view(template_name='generic_views/list.html')),
url(r'^list/authors/template_name_suffix/$',
views.AuthorList.as_view(template_name_suffix='_objects')),
url(r'^list/authors/context_object_name/$',
views.AuthorList.as_view(context_object_name='author_list')),
url(r'^list/authors/dupe_context_object_name/$',
views.AuthorList.as_view(context_object_name='object_list')),
url(r'^list/authors/invalid/$',
views.AuthorList.as_view(queryset=None)),
url(r'^list/authors/paginated/custom_class/$',
views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator)),
url(r'^list/authors/paginated/custom_page_kwarg/$',
views.AuthorList.as_view(paginate_by=30, page_kwarg='pagina')),
url(r'^list/authors/paginated/custom_constructor/$',
views.AuthorListCustomPaginator.as_view()),
url(r'^list/books/sorted/$',
views.BookList.as_view(ordering='name')),
url(r'^list/books/sortedbypagesandnamedec/$',
views.BookList.as_view(ordering=('pages', '-name'))),
# YearArchiveView
# Mixing keyword and positional captures below is intentional; the views
# ought to be able to accept either.
url(r'^dates/books/(?P<year>[0-9]{4})/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/make_object_list/$',
views.BookYearArchive.as_view(make_object_list=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/allow_empty/$',
views.BookYearArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/allow_future/$',
views.BookYearArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/paginated/$',
views.BookYearArchive.as_view(make_object_list=True, paginate_by=30)),
url(r'^dates/books/(?P<year>\d{4})/sortedbyname/$',
views.BookYearArchive.as_view(make_object_list=True, ordering='name')),
url(r'^dates/books/(?P<year>\d{4})/sortedbypageandnamedec/$',
views.BookYearArchive.as_view(make_object_list=True, ordering=('pages', '-name'))),
url(r'^dates/books/no_year/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/reverse/$',
views.BookYearArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/$',
views.BookSigningYearArchive.as_view()),
# MonthArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/$',
views.BookMonthArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/$',
views.BookMonthArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/allow_empty/$',
views.BookMonthArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/allow_future/$',
views.BookMonthArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/paginated/$',
views.BookMonthArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>[0-9]{4})/no_month/$',
views.BookMonthArchive.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/$',
views.BookSigningMonthArchive.as_view()),
# WeekArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/allow_empty/$',
views.BookWeekArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/allow_future/$',
views.BookWeekArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/paginated/$',
views.BookWeekArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/no_week/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/monday/$',
views.BookWeekArchive.as_view(week_format='%W')),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/$',
views.BookSigningWeekArchive.as_view()),
# DayArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/$',
views.BookDayArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/$',
views.BookDayArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_empty/$',
views.BookDayArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_future/$',
views.BookDayArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_empty_and_future/$',
views.BookDayArchive.as_view(allow_empty=True, allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/paginated/$',
views.BookDayArchive.as_view(paginate_by=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/no_day/$',
views.BookDayArchive.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/$',
views.BookSigningDayArchive.as_view()),
# TodayArchiveView
url(r'^dates/books/today/$',
views.BookTodayArchive.as_view()),
url(r'^dates/books/today/allow_empty/$',
views.BookTodayArchive.as_view(allow_empty=True)),
url(r'^dates/booksignings/today/$',
views.BookSigningTodayArchive.as_view()),
# DateDetailView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetail.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/allow_future/$',
views.BookDetail.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/nopk/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/byslug/(?P<slug>[\w-]+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/get_object_custom_queryset/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetailGetObjectCustomQueryset.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookSigningDetail.as_view()),
# Useful for testing redirects
url(r'^accounts/login/$', auth_views.login)
]
| bsd-3-clause |
kyrias/cjdns | node_build/dependencies/libuv/build/gyp/test/escaping/gyptest-colon.py | 216 | 1428 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests that filenames that contain colons are handled correctly.
(This is important for absolute paths on Windows.)
"""
import os
import sys
import TestGyp
# TODO: Make colons in filenames work with make, if required.
test = TestGyp.TestGyp(formats=['!make', '!android'])
CHDIR = 'colon'
source_name = 'colon/a:b.c'
copies_name = 'colon/a:b.c-d'
if sys.platform == 'win32':
# Windows uses : as drive separator and doesn't allow it in regular filenames.
# Use abspath() to create a path that contains a colon instead.
abs_source = os.path.abspath('colon/file.c')
test.write('colon/test.gyp',
test.read('colon/test.gyp').replace("'a:b.c'", repr(abs_source)))
source_name = abs_source
abs_copies = os.path.abspath('colon/file.txt')
test.write('colon/test.gyp',
test.read('colon/test.gyp').replace("'a:b.c-d'", repr(abs_copies)))
copies_name = abs_copies
# Create the file dynamically, Windows is unhappy if a file with a colon in
# its name is checked in.
test.write(source_name, 'int main() {}')
test.write(copies_name, 'foo')
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
test.built_file_must_exist(os.path.basename(copies_name), chdir=CHDIR)
test.pass_test()
| gpl-3.0 |
paulsoh/moxie | moxie/social/strategies/django_strategy.py | 52 | 5052 | from django.conf import settings
from django.http import HttpResponse
from django.db.models import Model
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import authenticate
from django.shortcuts import redirect
from django.template import TemplateDoesNotExist, RequestContext, loader
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.utils.translation import get_language
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class DjangoTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
template = loader.get_template(tpl)
return template.render(RequestContext(self.strategy.request, context))
def render_string(self, html, context):
template = loader.get_template_from_string(html)
return template.render(RequestContext(self.strategy.request, context))
class DjangoStrategy(BaseStrategy):
DEFAULT_TEMPLATE_STRATEGY = DjangoTemplateStrategy
def __init__(self, storage, request=None, tpl=None):
self.request = request
self.session = request.session if request else {}
super(DjangoStrategy, self).__init__(storage, tpl)
def get_setting(self, name):
value = getattr(settings, name)
# Force text on URL named settings that are instance of Promise
if name.endswith('_URL') and isinstance(value, Promise):
value = force_text(value)
return value
def request_data(self, merge=True):
if not self.request:
return {}
if merge:
data = self.request.GET.copy()
data.update(self.request.POST)
elif self.request.method == 'POST':
data = self.request.POST
else:
data = self.request.GET
return data
def request_host(self):
if self.request:
return self.request.get_host()
def request_is_secure(self):
"""Is the request using HTTPS?"""
return self.request.is_secure()
def request_path(self):
"""path of the current request"""
return self.request.path
def request_port(self):
"""Port in use for this request"""
return self.request.META['SERVER_PORT']
def request_get(self):
"""Request GET data"""
return self.request.GET.copy()
def request_post(self):
"""Request POST data"""
return self.request.POST.copy()
def redirect(self, url):
return redirect(url)
def html(self, content):
return HttpResponse(content, content_type='text/html;charset=UTF-8')
def render_html(self, tpl=None, html=None, context=None):
if not tpl and not html:
raise ValueError('Missing template or html parameters')
context = context or {}
try:
template = loader.get_template(tpl)
except TemplateDoesNotExist:
template = loader.get_template_from_string(html)
return template.render(RequestContext(self.request, context))
def authenticate(self, backend, *args, **kwargs):
kwargs['strategy'] = self
kwargs['storage'] = self.storage
kwargs['backend'] = backend
return authenticate(*args, **kwargs)
def session_get(self, name, default=None):
return self.session.get(name, default)
def session_set(self, name, value):
self.session[name] = value
if hasattr(self.session, 'modified'):
self.session.modified = True
def session_pop(self, name):
return self.session.pop(name, None)
def session_setdefault(self, name, value):
return self.session.setdefault(name, value)
def build_absolute_uri(self, path=None):
if self.request:
return self.request.build_absolute_uri(path)
else:
return path
def random_string(self, length=12, chars=BaseStrategy.ALLOWED_CHARS):
try:
from django.utils.crypto import get_random_string
except ImportError: # django < 1.4
return super(DjangoStrategy, self).random_string(length, chars)
else:
return get_random_string(length, chars)
def to_session_value(self, val):
"""Converts values that are instance of Model to a dictionary
with enough information to retrieve the instance back later."""
if isinstance(val, Model):
val = {
'pk': val.pk,
'ctype': ContentType.objects.get_for_model(val).pk
}
return val
def from_session_value(self, val):
"""Converts back the instance saved by self._ctype function."""
if isinstance(val, dict) and 'pk' in val and 'ctype' in val:
ctype = ContentType.objects.get_for_id(val['ctype'])
ModelClass = ctype.model_class()
val = ModelClass.objects.get(pk=val['pk'])
return val
def get_language(self):
"""Return current language"""
return get_language()
| mit |
ropik/androguard | androguard/core/bytecodes/apk.py | 1 | 58292 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from androguard.core import bytecode
from androguard.core import androconf
from androguard.core.bytecodes.dvm_permissions import DVM_PERMISSIONS
from androguard.util import read
import StringIO
from struct import pack, unpack
from xml.sax.saxutils import escape
from zlib import crc32
import re
from xml.dom import minidom
NS_ANDROID_URI = 'http://schemas.android.com/apk/res/android'
# 0: chilkat
# 1: default python zipfile module
# 2: patch zipfile module
ZIPMODULE = 1
import sys
if sys.hexversion < 0x2070000:
try:
import chilkat
ZIPMODULE = 0
# UNLOCK : change it with your valid key !
try:
CHILKAT_KEY = read("key.txt")
except Exception:
CHILKAT_KEY = "testme"
except ImportError:
ZIPMODULE = 1
else:
ZIPMODULE = 1
################################################### CHILKAT ZIP FORMAT #####################################################
class ChilkatZip(object):
def __init__(self, raw):
self.files = []
self.zip = chilkat.CkZip()
self.zip.UnlockComponent( CHILKAT_KEY )
self.zip.OpenFromMemory( raw, len(raw) )
filename = chilkat.CkString()
e = self.zip.FirstEntry()
while e != None:
e.get_FileName(filename)
self.files.append( filename.getString() )
e = e.NextEntry()
def delete(self, patterns):
el = []
filename = chilkat.CkString()
e = self.zip.FirstEntry()
while e != None:
e.get_FileName(filename)
if re.match(patterns, filename.getString()) != None:
el.append( e )
e = e.NextEntry()
for i in el:
self.zip.DeleteEntry( i )
def remplace_file(self, filename, buff):
entry = self.zip.GetEntryByName(filename)
if entry != None:
obj = chilkat.CkByteData()
obj.append2( buff, len(buff) )
return entry.ReplaceData( obj )
return False
def write(self):
obj = chilkat.CkByteData()
self.zip.WriteToMemory( obj )
return obj.getBytes()
def namelist(self):
return self.files
def read(self, elem):
e = self.zip.GetEntryByName( elem )
s = chilkat.CkByteData()
e.Inflate( s )
return s.getBytes()
def sign_apk(filename, keystore, storepass):
from subprocess import Popen, PIPE, STDOUT
compile = Popen([androconf.CONF["PATH_JARSIGNER"],
"-sigalg",
"MD5withRSA",
"-digestalg",
"SHA1",
"-storepass",
storepass,
"-keystore",
keystore,
filename,
"alias_name"],
stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
######################################################## APK FORMAT ########################################################
class APK(object):
"""
This class can access to all elements in an APK file
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param mode: specify the mode to open the file (optional)
:param magic_file: specify the magic file (optional)
:param zipmodule: specify the type of zip module to use (0:chilkat, 1:zipfile, 2:patch zipfile)
:type filename: string
:type raw: boolean
:type mode: string
:type magic_file: string
:type zipmodule: int
:Example:
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
"""
def __init__(self, filename, raw=False, mode="r", magic_file=None, zipmodule=ZIPMODULE):
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.valid_apk = False
self.files = {}
self.files_crc32 = {}
self.magic_file = magic_file
if raw == True:
self.__raw = filename
else:
self.__raw = read(filename)
self.zipmodule = zipmodule
if zipmodule == 0:
self.zip = ChilkatZip(self.__raw)
elif zipmodule == 2:
from androguard.patch import zipfile
self.zip = zipfile.ZipFile(StringIO.StringIO(self.__raw), mode=mode)
else:
import zipfile
self.zip = zipfile.ZipFile(StringIO.StringIO(self.__raw), mode=mode)
for i in self.zip.namelist():
if i == "AndroidManifest.xml":
self.axml[i] = AXMLPrinter(self.zip.read(i))
try:
self.xml[i] = minidom.parseString(self.axml[i].get_buff())
except:
self.xml[i] = None
if self.xml[i] != None:
self.package = self.xml[i].documentElement.getAttribute("package")
self.androidversion["Code"] = self.xml[i].documentElement.getAttributeNS(NS_ANDROID_URI, "versionCode")
self.androidversion["Name"] = self.xml[i].documentElement.getAttributeNS(NS_ANDROID_URI, "versionName")
for item in self.xml[i].getElementsByTagName('uses-permission'):
self.permissions.append(str(item.getAttributeNS(NS_ANDROID_URI, "name")))
self.valid_apk = True
self.get_files_types()
def get_AndroidManifest(self):
"""
Return the Android Manifest XML file
:rtype: xml object
"""
return self.xml["AndroidManifest.xml"]
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: string
"""
return self.filename
def get_package(self):
"""
Return the name of the package
:rtype: string
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
:rtype: string
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
:rtype: string
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the files inside the APK
:rtype: a list of strings
"""
return self.zip.namelist()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
try:
import magic
except ImportError:
# no lib magic !
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
self.files[i] = "Unknown"
return self.files
if self.files != {}:
return self.files
builtin_magic = 0
try:
getattr(magic, "MagicException")
except AttributeError:
builtin_magic = 1
if builtin_magic:
ms = magic.open(magic.MAGIC_NONE)
ms.load()
for i in self.get_files():
buffer = self.zip.read(i)
self.files[i] = ms.buffer(buffer)
self.files[i] = self._patch_magic(buffer, self.files[i])
self.files_crc32[i] = crc32(buffer)
else:
m = magic.Magic(magic_file=self.magic_file)
for i in self.get_files():
buffer = self.zip.read(i)
self.files[i] = m.from_buffer(buffer)
self.files[i] = self._patch_magic(buffer, self.files[i])
self.files_crc32[i] = crc32(buffer)
return self.files
def _patch_magic(self, buffer, orig):
if ("Zip" in orig) or ("DBase" in orig):
val = androconf.is_android_raw(buffer)
if val == "APK":
if androconf.is_valid_android_raw(buffer):
return "Android application package file"
elif val == "AXML":
return "Android's binary XML"
return orig
def get_files_crc32(self):
if self.files_crc32 == {}:
self.get_files_types()
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: string, string, int
"""
if self.files == {}:
self.get_files_types()
for i in self.get_files():
try:
yield i, self.files[i], self.files_crc32[i]
except KeyError:
yield i, "", ""
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: string
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
:rtype: string
"""
try:
return self.zip.read(filename)
except KeyError:
return ""
def get_dex(self):
"""
Return the raw data of the classes dex file
:rtype: string
"""
return self.get_file("classes.dex")
def get_elements(self, tag_name, attribute):
"""
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
l = []
for i in self.xml:
for item in self.xml[i].getElementsByTagName(tag_name):
value = item.getAttributeNS(NS_ANDROID_URI, attribute)
value = self.format_value( value )
l.append( str( value ) )
return l
def format_value(self, value):
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
def get_element(self, tag_name, attribute):
"""
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
for item in self.xml[i].getElementsByTagName(tag_name):
value = item.getAttributeNS(NS_ANDROID_URI, attribute)
if len(value) > 0:
return value
return None
def get_main_activity(self):
"""
Return the name of the main activity
:rtype: string
"""
x = set()
y = set()
for i in self.xml:
for item in self.xml[i].getElementsByTagName("activity"):
for sitem in item.getElementsByTagName( "action" ):
val = sitem.getAttributeNS(NS_ANDROID_URI, "name" )
if val == "android.intent.action.MAIN":
x.add( item.getAttributeNS(NS_ANDROID_URI, "name" ) )
for sitem in item.getElementsByTagName( "category" ):
val = sitem.getAttributeNS(NS_ANDROID_URI, "name" )
if val == "android.intent.category.LAUNCHER":
y.add( item.getAttributeNS(NS_ANDROID_URI, "name" ) )
z = x.intersection(y)
if len(z) > 0:
return self.format_value(z.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of string
"""
return self.get_elements("activity", "name")
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of string
"""
return self.get_elements("service", "name")
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return self.get_elements("receiver", "name")
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return self.get_elements("provider", "name")
def get_intent_filters(self, category, name):
d = {}
d["action"] = []
d["category"] = []
for i in self.xml:
for item in self.xml[i].getElementsByTagName(category):
if self.format_value(item.getAttributeNS(NS_ANDROID_URI, "name")) == name:
for sitem in item.getElementsByTagName("intent-filter"):
for ssitem in sitem.getElementsByTagName("action"):
if ssitem.getAttributeNS(NS_ANDROID_URI, "name") not in d["action"]:
d["action"].append(ssitem.getAttributeNS(NS_ANDROID_URI, "name"))
for ssitem in sitem.getElementsByTagName("category"):
if ssitem.getAttributeNS(NS_ANDROID_URI, "name") not in d["category"]:
d["category"].append(ssitem.getAttributeNS(NS_ANDROID_URI, "name"))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions
:rtype: list of string
"""
return self.permissions
def get_details_permissions(self):
"""
Return permissions with details
:rtype: list of string
"""
l = {}
for i in self.permissions:
perm = i
pos = i.rfind(".")
if pos != -1:
perm = i[pos+1:]
try:
l[ i ] = DVM_PERMISSIONS["MANIFEST_PERMISSION"][ perm ]
except KeyError:
l[ i ] = [ "normal", "Unknown permission from android reference", "Unknown permission from android reference" ]
return l
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_element("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_element("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_element( "uses-sdk", "targetSdkVersion" )
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return self.get_elements( "uses-library", "name" )
def get_certificate(self, filename):
"""
Return a certificate object by giving the name in the apk file
"""
import chilkat
cert = chilkat.CkCert()
f = self.get_file(filename)
data = chilkat.CkByteData()
data.append2(f, len(f))
success = cert.LoadFromBinary(data)
return success, cert
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
if self.zipmodule == 2:
from androguard.patch import zipfile
zout = zipfile.ZipFile(filename, 'w')
else:
import zipfile
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
if deleted_files != None:
if re.match(deleted_files, item.filename) == None:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the xml object which corresponds to the AndroidManifest.xml file
:rtype: object
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`ARSCParser` object which corresponds to the resources.arsc file
:rtype: :class:`ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
try:
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
except KeyError:
return None
def get_signature_name(self):
signature_expr = re.compile("^(META-INF/)(.*)(\.RSA|\.DSA)$")
for i in self.get_files():
if signature_expr.search(i):
return i
return None
def get_signature(self):
signature_expr = re.compile("^(META-INF/)(.*)(\.RSA|\.DSA)$")
for i in self.get_files():
if signature_expr.search(i):
return self.get_file(i)
return None
def show(self):
self.get_files_types()
print "FILES: "
for i in self.get_files():
try:
print "\t", i, self.files[i], "%x" % self.files_crc32[i]
except KeyError:
print "\t", i, "%x" % self.files_crc32[i]
print "PERMISSIONS: "
details_permissions = self.get_details_permissions()
for i in details_permissions:
print "\t", i, details_permissions[i]
print "MAIN ACTIVITY: ", self.get_main_activity()
print "ACTIVITIES: "
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print "\t", i, filters or ""
print "SERVICES: "
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print "\t", i, filters or ""
print "RECEIVERS: "
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print "\t", i, filters or ""
print "PROVIDERS: ", self.get_providers()
def show_Certificate(cert):
print "Issuer: C=%s, CN=%s, DN=%s, E=%s, L=%s, O=%s, OU=%s, S=%s" % (cert.issuerC(), cert.issuerCN(), cert.issuerDN(), cert.issuerE(), cert.issuerL(), cert.issuerO(), cert.issuerOU(), cert.issuerS())
print "Subject: C=%s, CN=%s, DN=%s, E=%s, L=%s, O=%s, OU=%s, S=%s" % (cert.subjectC(), cert.subjectCN(), cert.subjectDN(), cert.subjectE(), cert.subjectL(), cert.subjectO(), cert.subjectOU(), cert.subjectS())
######################################################## AXML FORMAT ########################################################
# Translated from http://code.google.com/p/android4me/source/browse/src/android/content/res/AXmlResourceParser.java
UTF8_FLAG = 0x00000100
class StringBlock(object):
def __init__(self, buff):
self.start = buff.get_idx()
self._cache = {}
self.header = unpack('<h', buff.read(2))[0]
self.header_size = unpack('<h', buff.read(2))[0]
self.chunkSize = unpack('<i', buff.read(4))[0]
self.stringCount = unpack('<i', buff.read(4))[0]
self.styleOffsetCount = unpack('<i', buff.read(4))[0]
self.flags = unpack('<i', buff.read(4))[0]
self.m_isUTF8 = ((self.flags & UTF8_FLAG) != 0)
self.stringsOffset = unpack('<i', buff.read(4))[0]
self.stylesOffset = unpack('<i', buff.read(4))[0]
self.m_stringOffsets = []
self.m_styleOffsets = []
self.m_strings = []
self.m_styles = []
for i in range(0, self.stringCount):
self.m_stringOffsets.append(unpack('<i', buff.read(4))[0])
for i in range(0, self.styleOffsetCount):
self.m_styleOffsets.append(unpack('<i', buff.read(4))[0])
size = self.chunkSize - self.stringsOffset
if self.stylesOffset != 0:
size = self.stylesOffset - self.stringsOffset
# FIXME
if (size % 4) != 0:
androconf.warning("ooo")
for i in range(0, size):
self.m_strings.append(unpack('=b', buff.read(1))[0])
if self.stylesOffset != 0:
size = self.chunkSize - self.stylesOffset
# FIXME
if (size % 4) != 0:
androconf.warning("ooo")
for i in range(0, size / 4):
self.m_styles.append(unpack('<i', buff.read(4))[0])
def getString(self, idx):
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringOffsets or idx >= len(self.m_stringOffsets):
return ""
offset = self.m_stringOffsets[idx]
if not self.m_isUTF8:
length = self.getShort2(self.m_strings, offset)
offset += 2
self._cache[idx] = self.decode(self.m_strings, offset, length)
else:
offset += self.getVarint(self.m_strings, offset)[1]
varint = self.getVarint(self.m_strings, offset)
offset += varint[1]
length = varint[0]
self._cache[idx] = self.decode2(self.m_strings, offset, length)
return self._cache[idx]
def getStyle(self, idx):
print idx
print idx in self.m_styleOffsets, self.m_styleOffsets[idx]
print self.m_styles[0]
def decode(self, array, offset, length):
length = length * 2
length = length + length % 2
data = ""
for i in range(0, length):
t_data = pack("=b", self.m_strings[offset + i])
data += unicode(t_data, errors='ignore')
if data[-2:] == "\x00\x00":
break
end_zero = data.find("\x00\x00")
if end_zero != -1:
data = data[:end_zero]
return data.decode("utf-16", 'replace')
def decode2(self, array, offset, length):
data = ""
for i in range(0, length):
t_data = pack("=b", self.m_strings[offset + i])
data += unicode(t_data, errors='ignore')
return data.decode("utf-8", 'replace')
def getVarint(self, array, offset):
val = array[offset]
more = (val & 0x80) != 0
val &= 0x7f
if not more:
return val, 1
return val << 8 | array[offset + 1] & 0xff, 2
def getShort(self, array, offset):
value = array[offset / 4]
if ((offset % 4) / 2) == 0:
return value & 0xFFFF
else:
return value >> 16
def getShort2(self, array, offset):
return (array[offset + 1] & 0xff) << 8 | array[offset] & 0xff
def show(self):
print "StringBlock", hex(self.start), hex(self.header), hex(self.header_size), hex(self.chunkSize), hex(self.stringsOffset), self.m_stringOffsets
for i in range(0, len(self.m_stringOffsets)):
print i, repr(self.getString(i))
ATTRIBUTE_IX_NAMESPACE_URI = 0
ATTRIBUTE_IX_NAME = 1
ATTRIBUTE_IX_VALUE_STRING = 2
ATTRIBUTE_IX_VALUE_TYPE = 3
ATTRIBUTE_IX_VALUE_DATA = 4
ATTRIBUTE_LENGHT = 5
CHUNK_AXML_FILE = 0x00080003
CHUNK_RESOURCEIDS = 0x00080180
CHUNK_XML_FIRST = 0x00100100
CHUNK_XML_START_NAMESPACE = 0x00100100
CHUNK_XML_END_NAMESPACE = 0x00100101
CHUNK_XML_START_TAG = 0x00100102
CHUNK_XML_END_TAG = 0x00100103
CHUNK_XML_TEXT = 0x00100104
CHUNK_XML_LAST = 0x00100104
START_DOCUMENT = 0
END_DOCUMENT = 1
START_TAG = 2
END_TAG = 3
TEXT = 4
class AXMLParser(object):
def __init__(self, raw_buff):
self.reset()
self.valid_axml = True
self.buff = bytecode.BuffHandle(raw_buff)
axml_file = unpack('<L', self.buff.read(4))[0]
if axml_file == CHUNK_AXML_FILE:
self.buff.read(4)
self.sb = StringBlock(self.buff)
self.m_resourceIDs = []
self.m_prefixuri = {}
self.m_uriprefix = {}
self.m_prefixuriL = []
self.visited_ns = []
else:
self.valid_axml = False
androconf.warning("Not a valid xml file")
def is_valid(self):
return self.valid_axml
def reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def next(self):
self.doNext()
return self.m_event
def doNext(self):
if self.m_event == END_DOCUMENT:
return
event = self.m_event
self.reset()
while True:
chunkType = -1
# Fake END_DOCUMENT event.
if event == END_TAG:
pass
# START_DOCUMENT
if event == START_DOCUMENT:
chunkType = CHUNK_XML_START_TAG
else:
if self.buff.end():
self.m_event = END_DOCUMENT
break
chunkType = unpack('<L', self.buff.read(4))[0]
if chunkType == CHUNK_RESOURCEIDS:
chunkSize = unpack('<L', self.buff.read(4))[0]
# FIXME
if chunkSize < 8 or chunkSize % 4 != 0:
androconf.warning("Invalid chunk size")
for i in range(0, chunkSize / 4 - 2):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# FIXME
if chunkType < CHUNK_XML_FIRST or chunkType > CHUNK_XML_LAST:
androconf.warning("invalid chunk type")
# Fake START_DOCUMENT event.
if chunkType == CHUNK_XML_START_TAG and event == -1:
self.m_event = START_DOCUMENT
break
self.buff.read(4) # /*chunkSize*/
lineNumber = unpack('<L', self.buff.read(4))[0]
self.buff.read(4) # 0xFFFFFFFF
if chunkType == CHUNK_XML_START_NAMESPACE or chunkType == CHUNK_XML_END_NAMESPACE:
if chunkType == CHUNK_XML_START_NAMESPACE:
prefix = unpack('<L', self.buff.read(4))[0]
uri = unpack('<L', self.buff.read(4))[0]
self.m_prefixuri[prefix] = uri
self.m_uriprefix[uri] = prefix
self.m_prefixuriL.append((prefix, uri))
self.ns = uri
else:
self.ns = -1
self.buff.read(4)
self.buff.read(4)
(prefix, uri) = self.m_prefixuriL.pop()
#del self.m_prefixuri[ prefix ]
#del self.m_uriprefix[ uri ]
continue
self.m_lineNumber = lineNumber
if chunkType == CHUNK_XML_START_TAG:
self.m_namespaceUri = unpack('<L', self.buff.read(4))[0]
self.m_name = unpack('<L', self.buff.read(4))[0]
# FIXME
self.buff.read(4) # flags
attributeCount = unpack('<L', self.buff.read(4))[0]
self.m_idAttribute = (attributeCount >> 16) - 1
attributeCount = attributeCount & 0xFFFF
self.m_classAttribute = unpack('<L', self.buff.read(4))[0]
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
for i in range(0, attributeCount * ATTRIBUTE_LENGHT):
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
for i in range(ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = START_TAG
break
if chunkType == CHUNK_XML_END_TAG:
self.m_namespaceUri = unpack('<L', self.buff.read(4))[0]
self.m_name = unpack('<L', self.buff.read(4))[0]
self.m_event = END_TAG
break
if chunkType == CHUNK_XML_TEXT:
self.m_name = unpack('<L', self.buff.read(4))[0]
# FIXME
self.buff.read(4)
self.buff.read(4)
self.m_event = TEXT
break
def getPrefixByUri(self, uri):
try:
return self.m_uriprefix[uri]
except KeyError:
return -1
def getPrefix(self):
try:
return self.sb.getString(self.m_uriprefix[self.m_namespaceUri])
except KeyError:
return u''
def getName(self):
if self.m_name == -1 or (self.m_event != START_TAG and self.m_event != END_TAG):
return u''
return self.sb.getString(self.m_name)
def getText(self):
if self.m_name == -1 or self.m_event != TEXT:
return u''
return self.sb.getString(self.m_name)
def getNamespacePrefix(self, pos):
prefix = self.m_prefixuriL[pos][0]
return self.sb.getString(prefix)
def getNamespaceUri(self, pos):
uri = self.m_prefixuriL[pos][1]
return self.sb.getString(uri)
def getXMLNS(self):
buff = ""
for i in self.m_uriprefix:
if i not in self.visited_ns:
buff += "xmlns:%s=\"%s\"\n" % (self.sb.getString(self.m_uriprefix[i]), self.sb.getString(self.m_prefixuri[self.m_uriprefix[i]]))
self.visited_ns.append(i)
return buff
def getNamespaceCount(self, pos):
pass
def getAttributeOffset(self, index):
# FIXME
if self.m_event != START_TAG:
androconf.warning("Current event is not START_TAG.")
offset = index * 5
# FIXME
if offset >= len(self.m_attributes):
androconf.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
if self.m_event != START_TAG:
return -1
return len(self.m_attributes) / ATTRIBUTE_LENGHT
def getAttributePrefix(self, index):
offset = self.getAttributeOffset(index)
uri = self.m_attributes[offset + ATTRIBUTE_IX_NAMESPACE_URI]
prefix = self.getPrefixByUri(uri)
if prefix == -1:
return ""
return self.sb.getString(prefix)
def getAttributeName(self, index):
offset = self.getAttributeOffset(index)
name = self.m_attributes[offset+ATTRIBUTE_IX_NAME]
if name == -1:
return ""
return self.sb.getString( name )
def getAttributeValueType(self, index):
offset = self.getAttributeOffset(index)
return self.m_attributes[offset+ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValueData(self, index):
offset = self.getAttributeOffset(index)
return self.m_attributes[offset+ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index):
offset = self.getAttributeOffset(index)
valueType = self.m_attributes[offset+ATTRIBUTE_IX_VALUE_TYPE]
if valueType == TYPE_STRING:
valueString = self.m_attributes[offset+ATTRIBUTE_IX_VALUE_STRING]
return self.sb.getString( valueString )
# WIP
return ""
#int valueData=m_attributes[offset+ATTRIBUTE_IX_VALUE_DATA];
#return TypedValue.coerceToString(valueType,valueData);
TYPE_ATTRIBUTE = 2
TYPE_DIMENSION = 5
TYPE_FIRST_COLOR_INT = 28
TYPE_FIRST_INT = 16
TYPE_FLOAT = 4
TYPE_FRACTION = 6
TYPE_INT_BOOLEAN = 18
TYPE_INT_COLOR_ARGB4 = 30
TYPE_INT_COLOR_ARGB8 = 28
TYPE_INT_COLOR_RGB4 = 31
TYPE_INT_COLOR_RGB8 = 29
TYPE_INT_DEC = 16
TYPE_INT_HEX = 17
TYPE_LAST_COLOR_INT = 31
TYPE_LAST_INT = 31
TYPE_NULL = 0
TYPE_REFERENCE = 1
TYPE_STRING = 3
RADIX_MULTS = [ 0.00390625, 3.051758E-005, 1.192093E-007, 4.656613E-010 ]
DIMENSION_UNITS = [ "px","dip","sp","pt","in","mm" ]
FRACTION_UNITS = [ "%", "%p" ]
COMPLEX_UNIT_MASK = 15
def complexToFloat(xcomplex):
return (float)(xcomplex & 0xFFFFFF00) * RADIX_MULTS[(xcomplex >> 4) & 3]
class AXMLPrinter(object):
def __init__(self, raw_buff):
self.axml = AXMLParser(raw_buff)
self.xmlns = False
self.buff = u''
while True and self.axml.is_valid():
_type = self.axml.next()
# print "tagtype = ", _type
if _type == START_DOCUMENT:
self.buff += u'<?xml version="1.0" encoding="utf-8"?>\n'
elif _type == START_TAG:
self.buff += u'<' + self.getPrefix(self.axml.getPrefix()) + self.axml.getName() + u'\n'
self.buff += self.axml.getXMLNS()
for i in range(0, self.axml.getAttributeCount()):
self.buff += "%s%s=\"%s\"\n" % (self.getPrefix(
self.axml.getAttributePrefix(i)), self.axml.getAttributeName(i), self._escape(self.getAttributeValue(i)))
self.buff += u'>\n'
elif _type == END_TAG:
self.buff += "</%s%s>\n" % (self.getPrefix(self.axml.getPrefix()), self.axml.getName())
elif _type == TEXT:
self.buff += "%s\n" % self.axml.getText()
elif _type == END_DOCUMENT:
break
# pleed patch
def _escape(self, s):
s = s.replace("&", "&")
s = s.replace('"', """)
s = s.replace("'", "'")
s = s.replace("<", "<")
s = s.replace(">", ">")
return escape(s)
def get_buff(self):
return self.buff.encode('utf-8')
def get_xml(self):
return minidom.parseString(self.get_buff()).toprettyxml(encoding="utf-8")
def get_xml_obj(self):
return minidom.parseString(self.get_buff())
def getPrefix(self, prefix):
if prefix == None or len(prefix) == 0:
return u''
return prefix + u':'
def getAttributeValue(self, index):
_type = self.axml.getAttributeValueType(index)
_data = self.axml.getAttributeValueData(index)
if _type == TYPE_STRING:
return self.axml.getAttributeValue(index)
elif _type == TYPE_ATTRIBUTE:
return "?%s%08X" % (self.getPackage(_data), _data)
elif _type == TYPE_REFERENCE:
return "@%s%08X" % (self.getPackage(_data), _data)
elif _type == TYPE_FLOAT:
return "%f" % unpack("=f", pack("=L", _data))[0]
elif _type == TYPE_INT_HEX:
return "0x%08X" % _data
elif _type == TYPE_INT_BOOLEAN:
if _data == 0:
return "false"
return "true"
elif _type == TYPE_DIMENSION:
return "%f%s" % (complexToFloat(_data), DIMENSION_UNITS[_data & COMPLEX_UNIT_MASK])
elif _type == TYPE_FRACTION:
return "%f%s" % (complexToFloat(_data) * 100, FRACTION_UNITS[_data & COMPLEX_UNIT_MASK])
elif _type >= TYPE_FIRST_COLOR_INT and _type <= TYPE_LAST_COLOR_INT:
return "#%08X" % _data
elif _type >= TYPE_FIRST_INT and _type <= TYPE_LAST_INT:
return "%d" % androconf.long2int(_data)
return "<0x%X, type 0x%02X>" % (_data, _type)
def getPackage(self, id):
if id >> 24 == 1:
return "android:"
return ""
RES_NULL_TYPE = 0x0000
RES_STRING_POOL_TYPE = 0x0001
RES_TABLE_TYPE = 0x0002
RES_XML_TYPE = 0x0003
# Chunk types in RES_XML_TYPE
RES_XML_FIRST_CHUNK_TYPE = 0x0100
RES_XML_START_NAMESPACE_TYPE= 0x0100
RES_XML_END_NAMESPACE_TYPE = 0x0101
RES_XML_START_ELEMENT_TYPE = 0x0102
RES_XML_END_ELEMENT_TYPE = 0x0103
RES_XML_CDATA_TYPE = 0x0104
RES_XML_LAST_CHUNK_TYPE = 0x017f
# This contains a uint32_t array mapping strings in the string
# pool back to resource identifiers. It is optional.
RES_XML_RESOURCE_MAP_TYPE = 0x0180
# Chunk types in RES_TABLE_TYPE
RES_TABLE_PACKAGE_TYPE = 0x0200
RES_TABLE_TYPE_TYPE = 0x0201
RES_TABLE_TYPE_SPEC_TYPE = 0x0202
class ARSCParser(object):
def __init__(self, raw_buff):
self.analyzed = False
self.buff = bytecode.BuffHandle(raw_buff)
#print "SIZE", hex(self.buff.size())
self.header = ARSCHeader(self.buff)
self.packageCount = unpack('<i', self.buff.read(4))[0]
#print hex(self.packageCount)
self.stringpool_main = StringBlock(self.buff)
self.next_header = ARSCHeader(self.buff)
self.packages = {}
self.values = {}
for i in range(0, self.packageCount):
current_package = ARSCResTablePackage(self.buff)
package_name = current_package.get_name()
self.packages[package_name] = []
mTableStrings = StringBlock(self.buff)
mKeyStrings = StringBlock(self.buff)
#self.stringpool_main.show()
#self.mTableStrings.show()
#self.mKeyStrings.show()
self.packages[package_name].append(current_package)
self.packages[package_name].append(mTableStrings)
self.packages[package_name].append(mKeyStrings)
pc = PackageContext(current_package, self.stringpool_main, mTableStrings, mKeyStrings)
current = self.buff.get_idx()
while not self.buff.end():
header = ARSCHeader(self.buff)
self.packages[package_name].append(header)
if header.type == RES_TABLE_TYPE_SPEC_TYPE:
self.packages[package_name].append(ARSCResTypeSpec(self.buff, pc))
elif header.type == RES_TABLE_TYPE_TYPE:
a_res_type = ARSCResType(self.buff, pc)
self.packages[package_name].append(a_res_type)
entries = []
for i in range(0, a_res_type.entryCount):
current_package.mResId = current_package.mResId & 0xffff0000 | i
entries.append((unpack('<i', self.buff.read(4))[0], current_package.mResId))
self.packages[package_name].append(entries)
for entry, res_id in entries:
if self.buff.end():
break
if entry != -1:
ate = ARSCResTableEntry(self.buff, res_id, pc)
self.packages[package_name].append(ate)
elif header.type == RES_TABLE_PACKAGE_TYPE:
break
else:
androconf.warning("unknown type")
break
current += header.size
self.buff.set_idx(current)
def _analyse(self):
if self.analyzed:
return
self.analyzed = True
for package_name in self.packages:
self.values[package_name] = {}
nb = 3
for header in self.packages[package_name][nb:]:
if isinstance(header, ARSCHeader):
if header.type == RES_TABLE_TYPE_TYPE:
a_res_type = self.packages[package_name][nb + 1]
if a_res_type.config.get_language() not in self.values[package_name]:
self.values[package_name][a_res_type.config.get_language()] = {}
self.values[package_name][a_res_type.config.get_language()]["public"] = []
c_value = self.values[package_name][a_res_type.config.get_language()]
entries = self.packages[package_name][nb + 2]
nb_i = 0
for entry, res_id in entries:
if entry != -1:
ate = self.packages[package_name][nb + 3 + nb_i]
#print ate.is_public(), a_res_type.get_type(), ate.get_value(), hex(ate.mResId)
if ate.get_index() != -1:
c_value["public"].append((a_res_type.get_type(), ate.get_value(), ate.mResId))
if a_res_type.get_type() not in c_value:
c_value[a_res_type.get_type()] = []
if a_res_type.get_type() == "string":
c_value["string"].append(self.get_resource_string(ate))
elif a_res_type.get_type() == "id":
if not ate.is_complex():
c_value["id"].append(self.get_resource_id(ate))
elif a_res_type.get_type() == "bool":
if not ate.is_complex():
c_value["bool"].append(self.get_resource_bool(ate))
elif a_res_type.get_type() == "integer":
c_value["integer"].append(self.get_resource_integer(ate))
elif a_res_type.get_type() == "color":
c_value["color"].append(self.get_resource_color(ate))
elif a_res_type.get_type() == "dimen":
c_value["dimen"].append(self.get_resource_dimen(ate))
#elif a_res_type.get_type() == "style":
# c_value["style"].append(self.get_resource_style(ate))
nb_i += 1
nb += 1
def get_resource_string(self, ate):
return [ate.get_value(), ate.get_key_data()]
def get_resource_id(self, ate):
x = [ate.get_value()]
if ate.key.get_data() == 0:
x.append("false")
elif ate.key.get_data() == 1:
x.append("true")
return x
def get_resource_bool(self, ate):
x = [ate.get_value()]
if ate.key.get_data() == 0:
x.append("false")
elif ate.key.get_data() == -1:
x.append("true")
return x
def get_resource_integer(self, ate):
return [ate.get_value(), ate.key.get_data()]
def get_resource_color(self, ate):
entry_data = ate.key.get_data()
return [ate.get_value(), "#%02x%02x%02x%02x" % (((entry_data >> 24) & 0xFF), ((entry_data >> 16) & 0xFF), ((entry_data >> 8) & 0xFF), (entry_data & 0xFF))]
def get_resource_dimen(self, ate):
try:
return [ate.get_value(), "%s%s" % (complexToFloat(ate.key.get_data()), DIMENSION_UNITS[ate.key.get_data() & COMPLEX_UNIT_MASK])]
except Exception, why:
androconf.warning(why.__str__())
return [ate.get_value(), ate.key.get_data()]
# FIXME
def get_resource_style(self, ate):
return ["", ""]
def get_packages_names(self):
return self.packages.keys()
def get_locales(self, package_name):
self._analyse()
return self.values[package_name].keys()
def get_types(self, package_name, locale):
self._analyse()
return self.values[package_name][locale].keys()
def get_public_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["public"]:
buff += '<public type="%s" name="%s" id="0x%08x" />\n' % (i[0], i[1], i[2])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_string_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_strings_resources(self):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += "<packages>\n"
for package_name in self.get_packages_names():
buff += "<package name=\"%s\">\n" % package_name
for locale in self.get_locales(package_name):
buff += "<locale value=%s>\n" % repr(locale)
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
buff += '</locale>\n'
buff += "</package>\n"
buff += "</packages>\n"
return buff.encode('utf-8')
def get_id_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["id"]:
if len(i) == 1:
buff += '<item type="id" name="%s"/>\n' % (i[0])
else:
buff += '<item type="id" name="%s">%s</item>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_bool_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["bool"]:
buff += '<bool name="%s">%s</bool>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_integer_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["integer"]:
buff += '<integer name="%s">%s</integer>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_color_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["color"]:
buff += '<color name="%s">%s</color>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_dimen_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["dimen"]:
buff += '<dimen name="%s">%s</dimen>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_id(self, package_name, rid, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["public"]:
if i[2] == rid:
return i
except KeyError:
return None
def get_string(self, package_name, name, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["string"]:
if i[0] == name:
return i
except KeyError:
return None
def get_items(self, package_name):
self._analyse()
return self.packages[package_name]
class PackageContext(object):
def __init__(self, current_package, stringpool_main, mTableStrings, mKeyStrings):
self.stringpool_main = stringpool_main
self.mTableStrings = mTableStrings
self.mKeyStrings = mKeyStrings
self.current_package = current_package
def get_mResId(self):
return self.current_package.mResId
def set_mResId(self, mResId):
self.current_package.mResId = mResId
class ARSCHeader(object):
def __init__(self, buff):
self.start = buff.get_idx()
self.type = unpack('<h', buff.read(2))[0]
self.header_size = unpack('<h', buff.read(2))[0]
self.size = unpack('<i', buff.read(4))[0]
#print "ARSCHeader", hex(self.start), hex(self.type), hex(self.header_size), hex(self.size)
class ARSCResTablePackage(object):
def __init__(self, buff):
self.start = buff.get_idx()
self.id = unpack('<i', buff.read(4))[0]
self.name = buff.readNullString(256)
self.typeStrings = unpack('<i', buff.read(4))[0]
self.lastPublicType = unpack('<i', buff.read(4))[0]
self.keyStrings = unpack('<i', buff.read(4))[0]
self.lastPublicKey = unpack('<i', buff.read(4))[0]
self.mResId = self.id << 24
#print "ARSCResTablePackage", hex(self.start), hex(self.id), hex(self.mResId), repr(self.name.decode("utf-16", errors='replace')), hex(self.typeStrings), hex(self.lastPublicType), hex(self.keyStrings), hex(self.lastPublicKey)
def get_name(self):
name = self.name.decode("utf-16", 'replace')
name = name[:name.find("\x00")]
return name
class ARSCResTypeSpec(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id = unpack('<b', buff.read(1))[0]
self.res0 = unpack('<b', buff.read(1))[0]
self.res1 = unpack('<h', buff.read(2))[0]
self.entryCount = unpack('<i', buff.read(4))[0]
#print "ARSCResTypeSpec", hex(self.start), hex(self.id), hex(self.res0), hex(self.res1), hex(self.entryCount), "table:" + self.parent.mTableStrings.getString(self.id - 1)
self.typespec_entries = []
for i in range(0, self.entryCount):
self.typespec_entries.append(unpack('<i', buff.read(4))[0])
class ARSCResType(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id = unpack('<b', buff.read(1))[0]
self.res0 = unpack('<b', buff.read(1))[0]
self.res1 = unpack('<h', buff.read(2))[0]
self.entryCount = unpack('<i', buff.read(4))[0]
self.entriesStart = unpack('<i', buff.read(4))[0]
self.mResId = (0xff000000 & self.parent.get_mResId()) | self.id << 16
self.parent.set_mResId(self.mResId)
#print "ARSCResType", hex(self.start), hex(self.id), hex(self.res0), hex(self.res1), hex(self.entryCount), hex(self.entriesStart), hex(self.mResId), "table:" + self.parent.mTableStrings.getString(self.id - 1)
self.config = ARSCResTableConfig(buff)
def get_type(self):
return self.parent.mTableStrings.getString(self.id - 1)
class ARSCResTableConfig(object):
def __init__(self, buff):
self.start = buff.get_idx()
self.size = unpack('<i', buff.read(4))[0]
self.imsi = unpack('<i', buff.read(4))[0]
self.locale = unpack('<i', buff.read(4))[0]
self.screenType = unpack('<i', buff.read(4))[0]
self.input = unpack('<i', buff.read(4))[0]
self.screenSize = unpack('<i', buff.read(4))[0]
self.version = unpack('<i', buff.read(4))[0]
self.screenConfig = 0
self.screenSizeDp = 0
if self.size >= 32:
self.screenConfig = unpack('<i', buff.read(4))[0]
if self.size >= 36:
self.screenSizeDp = unpack('<i', buff.read(4))[0]
self.exceedingSize = self.size - 36
if self.exceedingSize > 0:
androconf.warning("too much bytes !")
self.padding = buff.read(self.exceedingSize)
#print "ARSCResTableConfig", hex(self.start), hex(self.size), hex(self.imsi), hex(self.locale), repr(self.get_language()), repr(self.get_country()), hex(self.screenType), hex(self.input), hex(self.screenSize), hex(self.version), hex(self.screenConfig), hex(self.screenSizeDp)
def get_language(self):
x = self.locale & 0x0000ffff
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
def get_country(self):
x = (self.locale & 0xffff0000) >> 16
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
class ARSCResTableEntry(object):
def __init__(self, buff, mResId, parent=None):
self.start = buff.get_idx()
self.mResId = mResId
self.parent = parent
self.size = unpack('<h', buff.read(2))[0]
self.flags = unpack('<h', buff.read(2))[0]
self.index = unpack('<i', buff.read(4))[0]
#print "ARSCResTableEntry", hex(self.start), hex(self.mResId), hex(self.size), hex(self.flags), hex(self.index), self.is_complex()#, hex(self.mResId)
if self.flags & 1:
self.item = ARSCComplex(buff, parent)
else:
self.key = ARSCResStringPoolRef(buff, self.parent)
def get_index(self):
return self.index
def get_value(self):
return self.parent.mKeyStrings.getString(self.index)
def get_key_data(self):
return self.key.get_data_value()
def is_public(self):
return self.flags == 0 or self.flags == 2
def is_complex(self):
return (self.flags & 1) == 1
class ARSCComplex(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id_parent = unpack('<i', buff.read(4))[0]
self.count = unpack('<i', buff.read(4))[0]
self.items = []
for i in range(0, self.count):
self.items.append((unpack('<i', buff.read(4))[0], ARSCResStringPoolRef(buff, self.parent)))
#print "ARSCComplex", hex(self.start), self.id_parent, self.count, repr(self.parent.mKeyStrings.getString(self.id_parent))
class ARSCResStringPoolRef(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.skip_bytes = buff.read(3)
self.data_type = unpack('<b', buff.read(1))[0]
self.data = unpack('<i', buff.read(4))[0]
#print "ARSCResStringPoolRef", hex(self.start), hex(self.data_type), hex(self.data)#, "key:" + self.parent.mKeyStrings.getString(self.index), self.parent.stringpool_main.getString(self.data)
def get_data_value(self):
return self.parent.stringpool_main.getString(self.data)
def get_data(self):
return self.data
def get_data_type(self):
return self.data_type
def get_arsc_info(arscobj):
buff = ""
for package in arscobj.get_packages_names():
buff += package + ":\n"
for locale in arscobj.get_locales(package):
buff += "\t" + repr(locale) + ":\n"
for ttype in arscobj.get_types(package, locale):
buff += "\t\t" + ttype + ":\n"
try:
tmp_buff = getattr(arscobj, "get_" + ttype + "_resources")(package, locale).decode("utf-8", 'replace').split("\n")
for i in tmp_buff:
buff += "\t\t\t" + i + "\n"
except AttributeError:
pass
return buff
| apache-2.0 |
Limags/MissionPlanner | Lib/site-packages/numpy/distutils/tests/test_misc_util.py | 51 | 2430 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
from numpy.testing import *
from numpy.distutils.misc_util import appendpath, minrelpath, gpaths, rel_path
from os.path import join, sep, dirname
ajoin = lambda *paths: join(*((sep,)+paths))
class TestAppendpath(TestCase):
def test_1(self):
assert_equal(appendpath('prefix','name'),join('prefix','name'))
assert_equal(appendpath('/prefix','name'),ajoin('prefix','name'))
assert_equal(appendpath('/prefix','/name'),ajoin('prefix','name'))
assert_equal(appendpath('prefix','/name'),join('prefix','name'))
def test_2(self):
assert_equal(appendpath('prefix/sub','name'),
join('prefix','sub','name'))
assert_equal(appendpath('prefix/sub','sup/name'),
join('prefix','sub','sup','name'))
assert_equal(appendpath('/prefix/sub','/prefix/name'),
ajoin('prefix','sub','name'))
def test_3(self):
assert_equal(appendpath('/prefix/sub','/prefix/sup/name'),
ajoin('prefix','sub','sup','name'))
assert_equal(appendpath('/prefix/sub/sub2','/prefix/sup/sup2/name'),
ajoin('prefix','sub','sub2','sup','sup2','name'))
assert_equal(appendpath('/prefix/sub/sub2','/prefix/sub/sup/name'),
ajoin('prefix','sub','sub2','sup','name'))
class TestMinrelpath(TestCase):
def test_1(self):
n = lambda path: path.replace('/',sep)
assert_equal(minrelpath(n('aa/bb')),n('aa/bb'))
assert_equal(minrelpath('..'),'..')
assert_equal(minrelpath(n('aa/..')),'')
assert_equal(minrelpath(n('aa/../bb')),'bb')
assert_equal(minrelpath(n('aa/bb/..')),'aa')
assert_equal(minrelpath(n('aa/bb/../..')),'')
assert_equal(minrelpath(n('aa/bb/../cc/../dd')),n('aa/dd'))
assert_equal(minrelpath(n('.././..')),n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')),n('dd'))
class TestGpaths(TestCase):
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__),'..'))
ls = gpaths('command/*.py', local_path)
assert join(local_path,'command','build_src.py') in ls,`ls`
f = gpaths('system_info.py', local_path)
assert join(local_path,'system_info.py')==f[0],`f`
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
MathieuDuponchelle/gobject-introspection | giscanner/girwriter.py | 1 | 26509 | # -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2008 Johan Dahlin
# Copyright (C) 2008, 2009 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import with_statement
from . import ast
from .xmlwriter import XMLWriter
# Bump this for *incompatible* changes to the .gir.
# Compatible changes we just make inline
COMPATIBLE_GIR_VERSION = '1.2'
class GIRWriter(XMLWriter):
def __init__(self, namespace):
super(GIRWriter, self).__init__()
self.write_comment(
'This file was automatically generated from C sources - DO NOT EDIT!\n'
'To affect the contents of this file, edit the original C definitions,\n'
'and/or use gtk-doc annotations. ')
self._write_repository(namespace)
def _write_repository(self, namespace):
attrs = [
('version', COMPATIBLE_GIR_VERSION),
('xmlns', 'http://www.gtk.org/introspection/core/1.0'),
('xmlns:c', 'http://www.gtk.org/introspection/c/1.0'),
('xmlns:glib', 'http://www.gtk.org/introspection/glib/1.0')]
with self.tagcontext('repository', attrs):
for include in sorted(namespace.includes):
self._write_include(include)
for pkg in sorted(set(namespace.exported_packages)):
self._write_pkgconfig_pkg(pkg)
for c_include in sorted(set(namespace.c_includes)):
self._write_c_include(c_include)
self._namespace = namespace
self._write_namespace(namespace)
self._namespace = None
def _write_include(self, include):
attrs = [('name', include.name), ('version', include.version)]
self.write_tag('include', attrs)
def _write_pkgconfig_pkg(self, package):
attrs = [('name', package)]
self.write_tag('package', attrs)
def _write_c_include(self, c_include):
attrs = [('name', c_include)]
self.write_tag('c:include', attrs)
def _write_namespace(self, namespace):
attrs = [('name', namespace.name),
('version', namespace.version),
('shared-library', ','.join(namespace.shared_libraries)),
('c:identifier-prefixes', ','.join(namespace.identifier_prefixes)),
('c:symbol-prefixes', ','.join(namespace.symbol_prefixes))]
with self.tagcontext('namespace', attrs):
# We define a custom sorting function here because
# we want aliases to be first. They're a bit
# special because the typelib compiler expands them.
def nscmp(a, b):
if isinstance(a, ast.Alias):
if isinstance(b, ast.Alias):
return cmp(a.name, b.name)
else:
return -1
elif isinstance(b, ast.Alias):
return 1
else:
return cmp(a, b)
for node in sorted(namespace.itervalues(), cmp=nscmp):
self._write_node(node)
def _write_node(self, node):
if isinstance(node, ast.Function):
self._write_function(node)
elif isinstance(node, ast.Enum):
self._write_enum(node)
elif isinstance(node, ast.Bitfield):
self._write_bitfield(node)
elif isinstance(node, (ast.Class, ast.Interface)):
self._write_class(node)
elif isinstance(node, ast.Callback):
self._write_callback(node)
elif isinstance(node, ast.Record):
self._write_record(node)
elif isinstance(node, ast.Union):
self._write_union(node)
elif isinstance(node, ast.Boxed):
self._write_boxed(node)
elif isinstance(node, ast.Member):
# FIXME: atk_misc_instance singleton
pass
elif isinstance(node, ast.Alias):
self._write_alias(node)
elif isinstance(node, ast.Constant):
self._write_constant(node)
elif isinstance(node, ast.DocSection):
self._write_doc_section(node)
else:
print 'WRITER: Unhandled node', node
def _append_version(self, node, attrs):
if node.version:
attrs.append(('version', node.version))
def _write_generic(self, node):
for key, value in node.attributes.items():
self.write_tag('attribute', [('name', key), ('value', value)])
if hasattr(node, 'doc') and node.doc:
self.write_tag('doc', [('xml:space', 'preserve')],
node.doc)
if hasattr(node, 'version_doc') and node.version_doc:
self.write_tag('doc-version', [('xml:space', 'preserve')],
node.version_doc)
if hasattr(node, 'deprecated_doc') and node.deprecated_doc:
self.write_tag('doc-deprecated', [('xml:space', 'preserve')],
node.deprecated_doc)
if hasattr(node, 'stability_doc') and node.stability_doc:
self.write_tag('doc-stability', [('xml:space', 'preserve')],
node.stability_doc)
def _append_node_generic(self, node, attrs):
if node.skip or not node.introspectable:
attrs.append(('introspectable', '0'))
if node.deprecated or node.deprecated_doc:
# The deprecated attribute used to contain node.deprecated_doc as an attribute. As
# an xml attribute cannot preserve whitespace, deprecated_doc has been moved into
# it's own tag, written in _write_generic() above. We continue to write the deprecated
# attribute for backwards compatibility
attrs.append(('deprecated', '1'))
if node.deprecated:
attrs.append(('deprecated-version', node.deprecated))
if node.stability:
attrs.append(('stability', node.stability))
def _append_throws(self, func, attrs):
if func.throws:
attrs.append(('throws', '1'))
def _write_alias(self, alias):
attrs = [('name', alias.name)]
if alias.ctype is not None:
attrs.append(('c:type', alias.ctype))
self._append_node_generic(alias, attrs)
with self.tagcontext('alias', attrs):
self._write_generic(alias)
self._write_type_ref(alias.target)
def _write_callable(self, callable, tag_name, extra_attrs):
attrs = [('name', callable.name)]
attrs.extend(extra_attrs)
self._append_version(callable, attrs)
self._append_node_generic(callable, attrs)
self._append_throws(callable, attrs)
with self.tagcontext(tag_name, attrs):
self._write_generic(callable)
self._write_return_type(callable.retval, parent=callable)
self._write_parameters(callable)
def _write_function(self, func, tag_name='function'):
if func.internal_skipped:
return
attrs = []
if hasattr(func, 'symbol'):
attrs.append(('c:identifier', func.symbol))
if func.shadowed_by:
attrs.append(('shadowed-by', func.shadowed_by))
elif func.shadows:
attrs.append(('shadows', func.shadows))
if func.moved_to is not None:
attrs.append(('moved-to', func.moved_to))
self._write_callable(func, tag_name, attrs)
def _write_method(self, method):
self._write_function(method, tag_name='method')
def _write_static_method(self, method):
self._write_function(method, tag_name='function')
def _write_constructor(self, method):
self._write_function(method, tag_name='constructor')
def _write_return_type(self, return_, parent=None):
if not return_:
return
attrs = []
if return_.transfer:
attrs.append(('transfer-ownership', return_.transfer))
if return_.skip:
attrs.append(('skip', '1'))
if return_.nullable:
attrs.append(('nullable', '1'))
with self.tagcontext('return-value', attrs):
self._write_generic(return_)
self._write_type(return_.type, parent=parent)
def _write_parameters(self, callable):
if not callable.parameters and callable.instance_parameter is None:
return
with self.tagcontext('parameters'):
if callable.instance_parameter:
self._write_parameter(callable, callable.instance_parameter, 'instance-parameter')
for parameter in callable.parameters:
self._write_parameter(callable, parameter)
def _write_parameter(self, parent, parameter, nodename='parameter'):
attrs = []
if parameter.argname is not None:
attrs.append(('name', parameter.argname))
if (parameter.direction is not None) and (parameter.direction != 'in'):
attrs.append(('direction', parameter.direction))
attrs.append(('caller-allocates',
'1' if parameter.caller_allocates else '0'))
if parameter.transfer:
attrs.append(('transfer-ownership',
parameter.transfer))
if parameter.nullable:
attrs.append(('nullable', '1'))
if parameter.direction != ast.PARAM_DIRECTION_OUT:
attrs.append(('allow-none', '1'))
if parameter.optional:
attrs.append(('optional', '1'))
if parameter.direction == ast.PARAM_DIRECTION_OUT:
attrs.append(('allow-none', '1'))
if parameter.scope:
attrs.append(('scope', parameter.scope))
if parameter.closure_name is not None:
idx = parent.get_parameter_index(parameter.closure_name)
attrs.append(('closure', '%d' % (idx, )))
if parameter.destroy_name is not None:
idx = parent.get_parameter_index(parameter.destroy_name)
attrs.append(('destroy', '%d' % (idx, )))
if parameter.skip:
attrs.append(('skip', '1'))
with self.tagcontext(nodename, attrs):
self._write_generic(parameter)
self._write_type(parameter.type, parent=parent)
def _type_to_name(self, typeval):
if not typeval.resolved:
raise AssertionError("Caught unresolved type %r (ctype=%r)" % (typeval, typeval.ctype))
assert typeval.target_giname is not None
prefix = self._namespace.name + '.'
if typeval.target_giname.startswith(prefix):
return typeval.target_giname[len(prefix):]
return typeval.target_giname
def _write_type_ref(self, ntype):
""" Like _write_type, but only writes the type name rather than the full details """
assert isinstance(ntype, ast.Type), ntype
attrs = []
if ntype.ctype:
attrs.append(('c:type', ntype.ctype))
if isinstance(ntype, ast.Array):
if ntype.array_type != ast.Array.C:
attrs.insert(0, ('name', ntype.array_type))
elif isinstance(ntype, ast.List):
if ntype.name:
attrs.insert(0, ('name', ntype.name))
elif isinstance(ntype, ast.Map):
attrs.insert(0, ('name', 'GLib.HashTable'))
else:
if ntype.target_giname:
attrs.insert(0, ('name', self._type_to_name(ntype)))
elif ntype.target_fundamental:
attrs.insert(0, ('name', ntype.target_fundamental))
self.write_tag('type', attrs)
def _write_type(self, ntype, relation=None, parent=None):
assert isinstance(ntype, ast.Type), ntype
attrs = []
if ntype.complete_ctype:
attrs.append(('c:type', ntype.complete_ctype))
elif ntype.ctype:
attrs.append(('c:type', ntype.ctype))
if isinstance(ntype, ast.Varargs):
self.write_tag('varargs', [])
elif isinstance(ntype, ast.Array):
if ntype.array_type != ast.Array.C:
attrs.insert(0, ('name', ntype.array_type))
# we insert an explicit 'zero-terminated' attribute
# when it is false, or when it would not be implied
# by the absence of length and fixed-size
if not ntype.zeroterminated:
attrs.insert(0, ('zero-terminated', '0'))
elif (ntype.zeroterminated
and (ntype.size is not None or ntype.length_param_name is not None)):
attrs.insert(0, ('zero-terminated', '1'))
if ntype.size is not None:
attrs.append(('fixed-size', '%d' % (ntype.size, )))
if ntype.length_param_name is not None:
if isinstance(parent, ast.Callable):
length = parent.get_parameter_index(ntype.length_param_name)
elif isinstance(parent, ast.Compound):
length = parent.get_field_index(ntype.length_param_name)
else:
assert False, "parent not a callable or compound: %r" % parent
attrs.insert(0, ('length', '%d' % (length, )))
with self.tagcontext('array', attrs):
self._write_type(ntype.element_type)
elif isinstance(ntype, ast.List):
if ntype.name:
attrs.insert(0, ('name', ntype.name))
with self.tagcontext('type', attrs):
self._write_type(ntype.element_type)
elif isinstance(ntype, ast.Map):
attrs.insert(0, ('name', 'GLib.HashTable'))
with self.tagcontext('type', attrs):
self._write_type(ntype.key_type)
self._write_type(ntype.value_type)
else:
# REWRITEFIXME - enable this for 1.2
if ntype.target_giname:
attrs.insert(0, ('name', self._type_to_name(ntype)))
elif ntype.target_fundamental:
# attrs = [('fundamental', ntype.target_fundamental)]
attrs.insert(0, ('name', ntype.target_fundamental))
elif ntype.target_foreign:
attrs.insert(0, ('foreign', '1'))
self.write_tag('type', attrs)
def _append_registered(self, node, attrs):
assert isinstance(node, ast.Registered)
if node.get_type:
attrs.extend([('glib:type-name', node.gtype_name),
('glib:get-type', node.get_type)])
def _write_enum(self, enum):
attrs = [('name', enum.name)]
self._append_version(enum, attrs)
self._append_node_generic(enum, attrs)
self._append_registered(enum, attrs)
attrs.append(('c:type', enum.ctype))
if enum.error_domain:
attrs.append(('glib:error-domain', enum.error_domain))
with self.tagcontext('enumeration', attrs):
self._write_generic(enum)
for member in enum.members:
self._write_member(member)
for method in sorted(enum.static_methods):
self._write_static_method(method)
def _write_bitfield(self, bitfield):
attrs = [('name', bitfield.name)]
self._append_version(bitfield, attrs)
self._append_node_generic(bitfield, attrs)
self._append_registered(bitfield, attrs)
attrs.append(('c:type', bitfield.ctype))
with self.tagcontext('bitfield', attrs):
self._write_generic(bitfield)
for member in bitfield.members:
self._write_member(member)
for method in sorted(bitfield.static_methods):
self._write_static_method(method)
def _write_member(self, member):
attrs = [('name', member.name),
('value', str(member.value)),
('c:identifier', member.symbol)]
if member.nick is not None:
attrs.append(('glib:nick', member.nick))
with self.tagcontext('member', attrs):
self._write_generic(member)
def _write_doc_section(self, doc_section):
attrs = [('name', doc_section.name)]
with self.tagcontext('docsection', attrs):
self._write_generic(doc_section)
def _write_constant(self, constant):
attrs = [('name', constant.name),
('value', constant.value),
('c:type', constant.ctype)]
self._append_version(constant, attrs)
self._append_node_generic(constant, attrs)
with self.tagcontext('constant', attrs):
self._write_generic(constant)
self._write_type(constant.value_type)
def _write_class(self, node):
attrs = [('name', node.name),
('c:symbol-prefix', node.c_symbol_prefix),
('c:type', node.ctype)]
self._append_version(node, attrs)
self._append_node_generic(node, attrs)
if isinstance(node, ast.Class):
tag_name = 'class'
if node.parent_type is not None:
attrs.append(('parent',
self._type_to_name(node.parent_type)))
if node.is_abstract:
attrs.append(('abstract', '1'))
else:
assert isinstance(node, ast.Interface)
tag_name = 'interface'
attrs.append(('glib:type-name', node.gtype_name))
if node.get_type is not None:
attrs.append(('glib:get-type', node.get_type))
if node.glib_type_struct is not None:
attrs.append(('glib:type-struct',
self._type_to_name(node.glib_type_struct)))
if isinstance(node, ast.Class):
if node.fundamental:
attrs.append(('glib:fundamental', '1'))
if node.ref_func:
attrs.append(('glib:ref-func', node.ref_func))
if node.unref_func:
attrs.append(('glib:unref-func', node.unref_func))
if node.set_value_func:
attrs.append(('glib:set-value-func', node.set_value_func))
if node.get_value_func:
attrs.append(('glib:get-value-func', node.get_value_func))
with self.tagcontext(tag_name, attrs):
self._write_generic(node)
if isinstance(node, ast.Class):
for iface in sorted(node.interfaces):
self.write_tag('implements',
[('name', self._type_to_name(iface))])
if isinstance(node, ast.Interface):
for iface in sorted(node.prerequisites):
self.write_tag('prerequisite',
[('name', self._type_to_name(iface))])
if isinstance(node, ast.Class):
for method in sorted(node.constructors):
self._write_constructor(method)
for method in sorted(node.static_methods):
self._write_static_method(method)
for vfunc in sorted(node.virtual_methods):
self._write_vfunc(vfunc)
for method in sorted(node.methods):
self._write_method(method)
for prop in sorted(node.properties):
self._write_property(prop)
for field in node.fields:
self._write_field(field, node)
for signal in sorted(node.signals):
self._write_signal(signal)
def _write_boxed(self, boxed):
attrs = [('glib:name', boxed.name)]
if boxed.c_symbol_prefix is not None:
attrs.append(('c:symbol-prefix', boxed.c_symbol_prefix))
self._append_registered(boxed, attrs)
with self.tagcontext('glib:boxed', attrs):
self._write_generic(boxed)
for method in sorted(boxed.constructors):
self._write_constructor(method)
for method in sorted(boxed.methods):
self._write_method(method)
for method in sorted(boxed.static_methods):
self._write_static_method(method)
def _write_property(self, prop):
attrs = [('name', prop.name)]
self._append_version(prop, attrs)
self._append_node_generic(prop, attrs)
# Properties are assumed to be readable (see also generate.c)
if not prop.readable:
attrs.append(('readable', '0'))
if prop.writable:
attrs.append(('writable', '1'))
if prop.construct:
attrs.append(('construct', '1'))
if prop.construct_only:
attrs.append(('construct-only', '1'))
if prop.transfer:
attrs.append(('transfer-ownership', prop.transfer))
with self.tagcontext('property', attrs):
self._write_generic(prop)
self._write_type(prop.type)
def _write_vfunc(self, vf):
attrs = []
if vf.invoker:
attrs.append(('invoker', vf.invoker))
self._write_callable(vf, 'virtual-method', attrs)
def _write_callback(self, callback):
attrs = []
if callback.ctype != callback.name:
attrs.append(('c:type', callback.ctype))
self._write_callable(callback, 'callback', attrs)
def _write_record(self, record, extra_attrs=[]):
is_gtype_struct = False
attrs = list(extra_attrs)
if record.name is not None:
attrs.append(('name', record.name))
if record.ctype is not None: # the record might be anonymous
attrs.append(('c:type', record.ctype))
if record.disguised:
attrs.append(('disguised', '1'))
if record.foreign:
attrs.append(('foreign', '1'))
if record.is_gtype_struct_for is not None:
is_gtype_struct = True
attrs.append(('glib:is-gtype-struct-for',
self._type_to_name(record.is_gtype_struct_for)))
self._append_version(record, attrs)
self._append_node_generic(record, attrs)
self._append_registered(record, attrs)
if record.c_symbol_prefix:
attrs.append(('c:symbol-prefix', record.c_symbol_prefix))
with self.tagcontext('record', attrs):
self._write_generic(record)
if record.fields:
for field in record.fields:
self._write_field(field, record, is_gtype_struct)
for method in sorted(record.constructors):
self._write_constructor(method)
for method in sorted(record.methods):
self._write_method(method)
for method in sorted(record.static_methods):
self._write_static_method(method)
def _write_union(self, union):
attrs = []
if union.name is not None:
attrs.append(('name', union.name))
if union.ctype is not None: # the union might be anonymous
attrs.append(('c:type', union.ctype))
self._append_version(union, attrs)
self._append_node_generic(union, attrs)
self._append_registered(union, attrs)
if union.c_symbol_prefix:
attrs.append(('c:symbol-prefix', union.c_symbol_prefix))
with self.tagcontext('union', attrs):
self._write_generic(union)
if union.fields:
for field in union.fields:
self._write_field(field, union)
for method in sorted(union.constructors):
self._write_constructor(method)
for method in sorted(union.methods):
self._write_method(method)
for method in sorted(union.static_methods):
self._write_static_method(method)
def _write_field(self, field, parent, is_gtype_struct=False):
if field.anonymous_node:
if isinstance(field.anonymous_node, ast.Callback):
attrs = [('name', field.name)]
self._append_node_generic(field, attrs)
with self.tagcontext('field', attrs):
self._write_callback(field.anonymous_node)
elif isinstance(field.anonymous_node, ast.Record):
self._write_record(field.anonymous_node)
elif isinstance(field.anonymous_node, ast.Union):
self._write_union(field.anonymous_node)
else:
raise AssertionError("Unknown field anonymous: %r" % (field.anonymous_node, ))
else:
attrs = [('name', field.name)]
self._append_node_generic(field, attrs)
# Fields are assumed to be read-only
# (see also girparser.c and generate.c)
if not field.readable:
attrs.append(('readable', '0'))
if field.writable:
attrs.append(('writable', '1'))
if field.bits:
attrs.append(('bits', str(field.bits)))
if field.private:
attrs.append(('private', '1'))
with self.tagcontext('field', attrs):
self._write_generic(field)
self._write_type(field.type, parent=parent)
def _write_signal(self, signal):
attrs = [('name', signal.name)]
if signal.when:
attrs.append(('when', signal.when))
if signal.no_recurse:
attrs.append(('no-recurse', '1'))
if signal.detailed:
attrs.append(('detailed', '1'))
if signal.action:
attrs.append(('action', '1'))
if signal.no_hooks:
attrs.append(('no-hooks', '1'))
self._append_version(signal, attrs)
self._append_node_generic(signal, attrs)
with self.tagcontext('glib:signal', attrs):
self._write_generic(signal)
self._write_return_type(signal.retval)
self._write_parameters(signal)
| gpl-2.0 |
Serag8/Bachelor | google_appengine/google/appengine/ext/admin_redirect/main.py | 8 | 2616 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Main module for admin redirect.
To use, add this to app.yaml:
builtins:
- admin_redirect: on
"""
import logging
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
GOOGLE_SUFFIX = '.google.com'
CONSOLE_SUFFIX = '/dashboard?app_id='
APPENGINE_URL = 'https://appengine.google.com'
ADMIN_CONSOLE_NAME = 'admin-console'
APPLICATION_ID_PARAM = 'APPLICATION_ID'
SERVER_NAME_PARAM = 'SERVER_NAME'
class RedirectToAdminConsole(webapp.RequestHandler):
"""Used to redirect the user to the appropriate Admin Console URL."""
def get(self):
"""Handler to redirect all /_ah/admin.* requests to Admin Console."""
app_id = self.request.environ.get(APPLICATION_ID_PARAM)
if not app_id:
logging.error('Could not get application id; generic redirect.')
self.redirect(APPENGINE_URL)
return
server = self.request.environ.get(SERVER_NAME_PARAM)
if not server:
logging.warning('Server parameter not present; appengine.com redirect.')
self.redirect('%s%s%s' % (APPENGINE_URL, CONSOLE_SUFFIX, app_id))
return
if server.endswith(GOOGLE_SUFFIX):
if server.find(app_id) == 0:
new_server = server.replace(app_id, ADMIN_CONSOLE_NAME)
self.redirect('http://%s%s%s' % (new_server,
CONSOLE_SUFFIX,
app_id))
else:
self.response.out.write("""
Could not determine admin console location from server name.""")
else:
self.redirect('%s%s%s' % (APPENGINE_URL, CONSOLE_SUFFIX, app_id))
def CreateApplication():
"""Create new WSGIApplication and register all handlers.
Returns:
an instance of webapp.WSGIApplication with all mapreduce handlers
registered.
"""
return webapp.WSGIApplication([(r'.*', RedirectToAdminConsole)],
debug=True)
APP = CreateApplication()
def main():
util.run_wsgi_app(APP)
if __name__ == '__main__':
main()
| mit |
helium/helium-client-python | setup.py | 1 | 2330 | #!/usr/bin/env python
"""
setup.py file for helium-client-python
"""
from setuptools import setup, find_packages
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext
import codecs
import versioneer
def get_ext_modules():
local_inc = 'helium_client/helium-client'
local_sources = ['helium_client/_helium.c',
'helium_client/_serial.c',
'helium_client/helium-client/helium-client.c',
'helium_client/helium-client/cauterize/atom_api.c',
'helium_client/helium-client/cauterize/config_api.c',
'helium_client/helium-client/cauterize/cauterize.c']
extra_compile_args = ['-std=gnu99', '-Werror']
return [
Extension(name="helium_client._helium",
sources=local_sources,
include_dirs=[local_inc],
extra_compile_args=extra_compile_args),
]
cmdclass = {'build_ext' : build_ext}
cmdclass.update(versioneer.get_cmdclass())
setup(
name='helium_client',
version=versioneer.get_version(),
author='Helium',
author_email='[email protected]',
packages=find_packages(),
license='LICENSE.txt',
url="http://github.com/helium/helium-client-python",
description='A Python interface to the Helium Atom.',
long_description=codecs.open('README.md',
mode='r', encoding='utf-8').read(),
classifiers=['Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Development Status :: 3 - Alpha',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development'],
extras_require={'dev': ['cython']},
include_package_data=True,
ext_modules=get_ext_modules(),
cmdclass=cmdclass,
)
| bsd-3-clause |
MyAOSP/external_chromium_org | ppapi/native_client/src/untrusted/pnacl_support_extension/pnacl_component_crx_gen.py | 48 | 13105 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script lays out the PNaCl translator files for a
normal Chrome installer, for one platform. Once run num-of-arches times,
the result can then be packed into a multi-CRX zip file.
This script depends on and pulls in the translator nexes and libraries
from the toolchain directory (so that must be downloaded first) and
it depends on the pnacl_irt_shim.
"""
import json
import logging
import optparse
import os
import platform
import re
import shutil
import sys
J = os.path.join
######################################################################
# Target arch and build arch junk to convert between all the
# silly conventions between SCons, Chrome and PNaCl.
# The version of the arch used by NaCl manifest files.
# This is based on the machine "building" this extension.
# We also used this to identify the arch-specific different versions of
# this extension.
def CanonicalArch(arch):
if arch in ('x86_64', 'x86-64', 'x64', 'amd64'):
return 'x86-64'
# TODO(jvoung): be more specific about the arm architecture version?
if arch in ('arm', 'armv7'):
return 'arm'
if re.match('^i.86$', arch) or arch in ('x86_32', 'x86-32', 'ia32', 'x86'):
return 'x86-32'
return None
def GetBuildArch():
arch = platform.machine()
return CanonicalArch(arch)
BUILD_ARCH = GetBuildArch()
ARCHES = ['x86-32', 'x86-64', 'arm']
def IsValidArch(arch):
return arch in ARCHES
# The version of the arch used by configure and pnacl's build.sh.
def StandardArch(arch):
return {'x86-32': 'i686',
'x86-64': 'x86_64',
'arm' : 'armv7'}[arch]
######################################################################
def GetNaClRoot():
""" Find the native_client path, relative to this script.
This script is in ppapi/... and native_client is a sibling of ppapi.
"""
script_file = os.path.abspath(__file__)
def SearchForNaCl(cur_dir):
if cur_dir.endswith('ppapi'):
parent = os.path.dirname(cur_dir)
sibling = os.path.join(parent, 'native_client')
if not os.path.isdir(sibling):
raise Exception('Could not find native_client relative to %s' %
script_file)
return sibling
# Detect when we've the root (linux is /, but windows is not...)
next_dir = os.path.dirname(cur_dir)
if cur_dir == next_dir:
raise Exception('Could not find native_client relative to %s' %
script_file)
return SearchForNaCl(next_dir)
return SearchForNaCl(script_file)
NACL_ROOT = GetNaClRoot()
######################################################################
# Normalize the platform name to be the way SCons finds chrome binaries.
# This is based on the platform "building" the extension.
def GetBuildPlatform():
if sys.platform == 'darwin':
platform = 'mac'
elif sys.platform.startswith('linux'):
platform = 'linux'
elif sys.platform in ('cygwin', 'win32'):
platform = 'windows'
else:
raise Exception('Unknown platform: %s' % sys.platform)
return platform
BUILD_PLATFORM = GetBuildPlatform()
def DetermineInstallerArches(target_arch):
arch = CanonicalArch(target_arch)
if not IsValidArch(arch):
raise Exception('Unknown target_arch %s' % target_arch)
# On windows, we need x86-32 and x86-64 (assuming non-windows RT).
if BUILD_PLATFORM == 'windows':
if arch.startswith('x86'):
return ['x86-32', 'x86-64']
else:
raise Exception('Unknown target_arch on windows w/ target_arch == %s' %
target_arch)
else:
return [arch]
######################################################################
class PnaclPackaging(object):
package_base = os.path.dirname(__file__)
# File paths that are set from the command line.
pnacl_template = None
tool_revisions = None
# Agreed-upon name for pnacl-specific info.
pnacl_json = 'pnacl.json'
@staticmethod
def SetPnaclInfoTemplatePath(path):
PnaclPackaging.pnacl_template = path
@staticmethod
def SetToolsRevisionPath(path):
PnaclPackaging.tool_revisions = path
@staticmethod
def PnaclToolsRevision():
with open(PnaclPackaging.tool_revisions, 'r') as f:
for line in f.read().splitlines():
if line.startswith('PNACL_VERSION'):
_, version = line.split('=')
# CWS happens to use version quads, so make it a quad too.
# However, each component of the quad is limited to 64K max.
# Try to handle a bit more.
max_version = 2 ** 16
version = int(version)
version_more = version / max_version
version = version % max_version
return '0.1.%d.%d' % (version_more, version)
raise Exception('Cannot find PNACL_VERSION in TOOL_REVISIONS file: %s' %
PnaclPackaging.tool_revisions)
@staticmethod
def GeneratePnaclInfo(target_dir, abi_version, arch):
# A note on versions: pnacl_version is the version of translator built
# by the NaCl repo, while abi_version is bumped when the NaCl sandbox
# actually changes.
pnacl_version = PnaclPackaging.PnaclToolsRevision()
with open(PnaclPackaging.pnacl_template, 'r') as pnacl_template_fd:
pnacl_template = json.load(pnacl_template_fd)
out_name = J(target_dir, UseWhitelistedChars(PnaclPackaging.pnacl_json,
None))
with open(out_name, 'w') as output_fd:
pnacl_template['pnacl-arch'] = arch
pnacl_template['pnacl-version'] = pnacl_version
json.dump(pnacl_template, output_fd, sort_keys=True, indent=4)
######################################################################
class PnaclDirs(object):
toolchain_dir = J(NACL_ROOT, 'toolchain')
output_dir = J(toolchain_dir, 'pnacl-package')
@staticmethod
def TranslatorRoot():
return J(PnaclDirs.toolchain_dir, 'pnacl_translator')
@staticmethod
def LibDir(target_arch):
return J(PnaclDirs.TranslatorRoot(), 'lib-%s' % target_arch)
@staticmethod
def SandboxedCompilerDir(target_arch):
return J(PnaclDirs.toolchain_dir,
'pnacl_translator', StandardArch(target_arch), 'bin')
@staticmethod
def SetOutputDir(d):
PnaclDirs.output_dir = d
@staticmethod
def OutputDir():
return PnaclDirs.output_dir
@staticmethod
def OutputAllDir(version_quad):
return J(PnaclDirs.OutputDir(), version_quad)
@staticmethod
def OutputArchBase(arch):
return '%s' % arch
@staticmethod
def OutputArchDir(arch):
# Nest this in another directory so that the layout will be the same
# as the "all"/universal version.
parent_dir = J(PnaclDirs.OutputDir(), PnaclDirs.OutputArchBase(arch))
return (parent_dir, J(parent_dir, PnaclDirs.OutputArchBase(arch)))
######################################################################
def StepBanner(short_desc, long_desc):
logging.info("**** %s\t%s", short_desc, long_desc)
def Clean():
out_dir = PnaclDirs.OutputDir()
StepBanner('CLEAN', 'Cleaning out old packaging: %s' % out_dir)
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
else:
logging.info('Clean skipped -- no previous output directory!')
######################################################################
def UseWhitelistedChars(orig_basename, arch):
""" Make the filename match the pattern expected by nacl_file_host.
Currently, this assumes there is prefix "pnacl_public_" and
that the allowed chars are in the set [a-zA-Z0-9_].
"""
if arch:
target_basename = 'pnacl_public_%s_%s' % (arch, orig_basename)
else:
target_basename = 'pnacl_public_%s' % orig_basename
result = re.sub(r'[^a-zA-Z0-9_]', '_', target_basename)
logging.info('UseWhitelistedChars using: %s' % result)
return result
def CopyFlattenDirsAndPrefix(src_dir, arch, dest_dir):
""" Copy files from src_dir to dest_dir.
When copying, also rename the files such that they match the white-listing
pattern in chrome/browser/nacl_host/nacl_file_host.cc.
"""
for (root, dirs, files) in os.walk(src_dir, followlinks=True):
for f in files:
# Assume a flat directory.
assert (f == os.path.basename(f))
full_name = J(root, f)
target_name = UseWhitelistedChars(f, arch)
shutil.copy(full_name, J(dest_dir, target_name))
def BuildArchForInstaller(version_quad, arch, lib_overrides):
""" Build an architecture specific version for the chrome installer.
"""
target_dir = PnaclDirs.OutputDir()
StepBanner('BUILD INSTALLER',
'Packaging for arch %s in %s' % (arch, target_dir))
# Copy llc.nexe and ld.nexe, but with some renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.SandboxedCompilerDir(arch),
arch,
target_dir)
# Copy native libraries, also with renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.LibDir(arch), arch, target_dir)
# Also copy files from the list of overrides.
# This needs the arch tagged onto the name too, like the other files.
if arch in lib_overrides:
for override in lib_overrides[arch]:
override_base = os.path.basename(override)
target_name = UseWhitelistedChars(override_base, arch)
shutil.copy(override, J(target_dir, target_name))
def BuildInstallerStyle(version_quad, lib_overrides, arches):
""" Package the pnacl component for use within the chrome installer
infrastructure. These files need to be named in a special way
so that white-listing of files is easy.
"""
StepBanner("BUILD_ALL", "Packaging installer for version: %s" % version_quad)
for arch in arches:
BuildArchForInstaller(version_quad, arch, lib_overrides)
# Generate pnacl info manifest.
# Hack around the fact that there may be more than one arch, on Windows.
if len(arches) == 1:
arches = arches[0]
PnaclPackaging.GeneratePnaclInfo(PnaclDirs.OutputDir(), version_quad, arches)
######################################################################
def Main():
usage = 'usage: %prog [options] version_arg'
parser = optparse.OptionParser(usage)
# We may want to accept a target directory to dump it in the usual
# output directory (e.g., scons-out).
parser.add_option('-c', '--clean', dest='clean',
action='store_true', default=False,
help='Clean out destination directory first.')
parser.add_option('-d', '--dest', dest='dest',
help='The destination root for laying out the extension')
parser.add_option('-L', '--lib_override',
dest='lib_overrides', action='append', default=[],
help='Specify path to a fresher native library ' +
'that overrides the tarball library with ' +
'(arch:libfile) tuple.')
parser.add_option('-t', '--target_arch',
dest='target_arch', default=None,
help='Only generate the chrome installer version for arch')
parser.add_option('--info_template_path',
dest='info_template_path', default=None,
help='Path of the info template file')
parser.add_option('--tool_revisions_path', dest='tool_revisions_path',
default=None, help='Location of NaCl TOOL_REVISIONS file.')
parser.add_option('-v', '--verbose', dest='verbose', default=False,
action='store_true',
help='Print verbose debug messages.')
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.ERROR)
logging.info('pnacl_component_crx_gen w/ options %s and args %s\n'
% (options, args))
# Set destination directory before doing any cleaning, etc.
if options.dest:
PnaclDirs.SetOutputDir(options.dest)
if options.clean:
Clean()
if options.info_template_path:
PnaclPackaging.SetPnaclInfoTemplatePath(options.info_template_path)
if options.tool_revisions_path:
PnaclPackaging.SetToolsRevisionPath(options.tool_revisions_path)
lib_overrides = {}
for o in options.lib_overrides:
arch, override_lib = o.split(',')
arch = CanonicalArch(arch)
if not IsValidArch(arch):
raise Exception('Unknown arch for -L: %s (from %s)' % (arch, o))
if not os.path.isfile(override_lib):
raise Exception('Override native lib not a file for -L: %s (from %s)' %
(override_lib, o))
override_list = lib_overrides.get(arch, [])
override_list.append(override_lib)
lib_overrides[arch] = override_list
if len(args) != 1:
parser.print_help()
parser.error('Incorrect number of arguments')
abi_version = int(args[0])
arches = DetermineInstallerArches(options.target_arch)
BuildInstallerStyle(abi_version, lib_overrides, arches)
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause |
xeoron/namebench | nb_third_party/jinja2/tests.py | 285 | 3313 | # -*- coding: utf-8 -*-
"""
jinja2.tests
~~~~~~~~~~~~
Jinja test functions. Used with the "is" operator.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.runtime import Undefined
# nose, nothing here to test
__test__ = False
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
try:
test_callable = callable
except NameError:
def test_callable(x):
return hasattr(x, '__call__')
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_lower(value):
"""Return true if the variable is lowercased."""
return unicode(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return unicode(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, basestring)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, (int, long, float, complex))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, '__html__')
TESTS = {
'odd': test_odd,
'even': test_even,
'divisibleby': test_divisibleby,
'defined': test_defined,
'undefined': test_undefined,
'none': test_none,
'lower': test_lower,
'upper': test_upper,
'string': test_string,
'number': test_number,
'sequence': test_sequence,
'iterable': test_iterable,
'callable': test_callable,
'sameas': test_sameas,
'escaped': test_escaped
}
| apache-2.0 |
bukepo/openthread | tests/scripts/thread-cert/pktverify/verify.py | 7 | 3121 | #!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import importlib.util
import inspect
import json
import logging
import os
import sys
THREAD_CERT_DIR = './tests/scripts/thread-cert'
sys.path.append(THREAD_CERT_DIR)
import thread_cert
from pktverify.packet_verifier import PacketVerifier
logging.basicConfig(level=logging.INFO,
format='File "%(pathname)s", line %(lineno)d, in %(funcName)s\n'
'%(asctime)s - %(levelname)s - %(message)s')
def main():
json_file = sys.argv[1]
with open(json_file, 'rt') as fp:
test_info = json.load(fp)
script = test_info['script']
script = os.path.relpath(script, THREAD_CERT_DIR)
module_name = os.path.splitext(script)[0].replace('/', '.')
logging.info("Loading %s as module %s ...", script, module_name)
spec = importlib.util.spec_from_file_location(module_name, os.path.join(THREAD_CERT_DIR, script))
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
test_class = None
for name, member in inspect.getmembers(mod):
if isinstance(member, type) and issubclass(member, thread_cert.TestCase):
assert test_class is None, (test_class, member)
test_class = member
assert test_class is not None, "can not find a test class in %s" % script
test_instance = test_class()
pv = PacketVerifier(json_file)
pv.add_common_vars()
test_instance.verify(pv)
print("Packet verification passed: %s" % json_file, file=sys.stderr)
if __name__ == '__main__':
main()
| bsd-3-clause |
KousikaGanesh/purchaseandInventory | openerp/addons/mrp_repair/__openerp__.py | 35 | 2497 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Repairs Management',
'version': '1.0',
'category': 'Manufacturing',
'description': """
The aim is to have a complete module to manage all products repairs.
====================================================================
The following topics should be covered by this module:
------------------------------------------------------
* Add/remove products in the reparation
* Impact for stocks
* Invoicing (products and/or services)
* Warranty concept
* Repair quotation report
* Notes for the technician and for the final customer
""",
'author': 'OpenERP SA',
'images': ['images/repair_order.jpeg'],
'depends': ['mrp', 'sale', 'account'],
'data': [
'security/ir.model.access.csv',
'security/mrp_repair_security.xml',
'mrp_repair_data.xml',
'mrp_repair_sequence.xml',
'wizard/mrp_repair_cancel_view.xml',
'wizard/mrp_repair_make_invoice_view.xml',
'mrp_repair_view.xml',
'mrp_repair_workflow.xml',
'mrp_repair_report.xml',
],
'demo': ['mrp_repair_demo.yml'],
'test': ['test/test_mrp_repair_noneinv.yml',
'test/test_mrp_repair_b4inv.yml',
'test/test_mrp_repair_afterinv.yml',
'test/test_mrp_repair_cancel.yml',
'test/mrp_repair_report.yml',
'test/test_mrp_repair_fee.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
datamade/openelections-core | openelex/us/ia/transform.py | 2 | 1161 | from openelex.base.transform import Transform, registry
from openelex.models import RawResult
class FixVanBurenTransform(Transform):
"""
s/VanBuren/Van Buren in RawResults from the source file
20001107__ia__general__state_senate__county.csv
"""
name = 'fix_van_buren'
def __call__(self):
results = RawResult.objects.filter(
source="20001107__ia__general__state_senate__county.csv",
jurisdiction="VanBuren")
msg = "Changing 'VanBuren' to 'Van Buren' in {} raw results.".format(
results.count())
print(msg)
results.update(set__jurisdiction="Van Buren",
set__ocd_id="ocd-division/country:us/state:ia/county:van_buren")
def reverse(self):
results = RawResult.objects.filter(
source="20001107__ia__general__state_senate__county.csv",
jurisdiction="Van Buren")
msg = "Reverting 'Van Buren' to 'VanBuren' in {} raw results".format(
results.count())
print(msg)
results.update(set__jurisdiction="VanBuren", set__ocd_id="")
registry.register('ia', FixVanBurenTransform, raw=True)
| mit |
sumanthns/flask-project | flask_project/templates/factory.py | 1 | 2162 | from flask import Flask
from werkzeug.utils import import_string
class NoBlueprintException(Exception):
pass
class NoRouteModuleException(Exception):
pass
def _get_imported_stuff_by_path(path):
module_name, object_name = path.rsplit('.', 1)
module = import_string(module_name)
return module, object_name
class AppFactory(object):
def __init__(self, config, name):
self.config = config
self.name = name
def _build_app(self):
app = Flask(self.name)
self._add_config(app)
self._init_db(app)
self._register_blueprints(app)
self._register_routes(app)
return app
def _add_config(self, app):
app.config.from_object(self.config)
def _init_db(self, app):
from app import db
db.init_app(app)
def get_app(self):
app = self._build_app()
return app
def _register_blueprints(self, app):
self._bp = {}
for blueprint_path in app.config.get('BLUEPRINTS', []):
module, b_name = \
_get_imported_stuff_by_path(blueprint_path)
if hasattr(module, b_name):
app.register_blueprint(getattr(module, b_name))
else:
raise NoBlueprintException(
'No {bp_name} blueprint found'.format(bp_name=b_name))
def _register_routes(self, app):
for url_module in app.config.get('URL_MODULES', []):
module, r_name = _get_imported_stuff_by_path(url_module)
if hasattr(module, r_name):
self._setup_routes(getattr(module, r_name), app)
else:
raise NoRouteModuleException('No {r_name} url module found'.format(r_name=r_name))
def _setup_routes(self, routes, app):
for route in routes:
blueprint, rules = route[0], route[1:]
for pattern, view in rules:
if isinstance(blueprint, tuple):
blueprint = blueprint[0]
blueprint.add_url_rule(pattern, view_func=view)
if blueprint not in app.blueprints:
app.register_blueprint(blueprint)
| mit |
cloudmesh/cmd3light | cloudmesh_cmd3light/console.py | 1 | 3669 | import textwrap
from colorama import Fore, Back, Style
import colorama
colorama.init()
class Console(object):
"""
A simple way to print in a console terminal in color. Instead of using
simply the print statement you can use special methods to indicate
warnings, errors, ok and regular messages.
Example Usage::
Console.warning("Warning")
Console.error("Error")
Console.info("Info")
Console.msg("msg")
Console.ok("Success")
One can swith the color mode off with::
Console.color = False
Console.error("Error")
The color will be switched on by default.
"""
#
# TODO: It would be good if the Console uses the borg pattern to have a
# global switch for the console color mode. Currently each import
# switches it back to color.
#
color = True
theme_color = {
'HEADER': Fore.MAGENTA,
'BLACK': Fore.RED,
'CYAN': Fore.CYAN,
'WHITE': Fore.WHITE,
'BLUE': Fore.BLUE,
'OKBLUE': Fore.BLUE,
'OKGREEN': Fore.GREEN,
'FAIL': Fore.RED,
'WARNING': Fore.MAGENTA,
'RED': Fore.RED,
'ENDC': '\033[0m',
'BOLD': "\033[1m",
'OK': Fore.GREEN,
}
theme_bw = {
'HEADER': '',
'BLACK': '',
'CYAN': '',
'WHITE': '',
'BLUE': '',
'OKBLUE': '',
'OKGREEN': '',
'FAIL': '',
'WARNING': '',
'RED': '',
'ENDC': '',
'BOLD': "",
'OK': "",
}
theme = theme_color
@staticmethod
def set_theme(color=True):
if color:
Console.theme = Console.theme_color
else:
Console.theme = Console.theme_bw
Console.color = color
@staticmethod
def get(name):
if name in Console.theme:
return Console.theme[name]
else:
return Console.theme['BLACK']
@staticmethod
def _msg(message, width=90):
return textwrap.fill(message, width=width)
@staticmethod
def msg(message):
print (message)
@staticmethod
def error(message, prefix=True):
if prefix:
text = "ERROR: "
else:
text = ""
if Console.color:
Console._print('FAIL', text, message)
else:
print Console._msg(text + message)
@staticmethod
def info(message):
if Console.color:
Console._print('OKBLUE', "INFO: ", message)
else:
print Console._msg("INFO: " + message)
@staticmethod
def warning(message):
if Console.color:
Console._print('WARNING', "WARNING: ", message)
else:
print Console._msg("WARNING: " + message)
@staticmethod
def ok(message):
if Console.color:
Console._print('OKGREEN', "", message)
else:
print Console._msg(message)
@staticmethod
def _print(color, prefix, message):
print (Console.theme[color] +
prefix +
Console._msg(message) +
Console.theme['ENDC'])
#
# Example
#
if __name__ == "__main__":
print Console.color
print Console.theme
Console.warning("Warning")
Console.error("Error")
Console.info("Info")
Console.msg("msg")
Console.ok("Success")
Console.color = False
print Console.color
Console.error("Error")
print(Fore.RED + 'some red text')
print(Back.GREEN + 'and with a green background')
print(Style.DIM + 'and in dim text')
print(Fore.RESET + Back.RESET + Style.RESET_ALL)
print('back to normal now')
| apache-2.0 |
jemofthewest/mykoans | python2/koans/about_asserts.py | 10 | 2289 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
"""
We shall contemplate truth by testing reality, via asserts.
"""
# Confused? This video should help:
#
# http://bit.ly/about_asserts
self.assertTrue(False) # This should be true
def test_assert_with_message(self):
"""
Enlightenment may be more easily achieved with appropriate messages.
"""
self.assertTrue(False, "This should be true -- Please fix this")
def test_fill_in_values(self):
"""
Sometimes we will ask you to fill in the values
"""
self.assertEqual(__, 1 + 1)
def test_assert_equality(self):
"""
To understand reality, we must compare our expectations against
reality.
"""
expected_value = __
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
"""
Some ways of asserting equality are better than others.
"""
expected_value = __
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
"""
Understand what lies within.
"""
# This throws an AssertionError exception
assert False
def test_that_sometimes_we_need_to_know_the_class_type(self):
"""
What is in a class name?
"""
# Sometimes we will ask you what the class type of an object is.
#
# For example, contemplate the text string "naval". What is it's class type?
# The koans runner will include this feedback for this koan:
#
# AssertionError: '-=> FILL ME IN! <=-' != <type 'str'>
#
# So "naval".__class__ is equal to <type 'str'>? No not quite. This
# is just what it displays. The answer is simply str.
#
# See for yourself:
self.assertEqual(__, "naval".__class__) # It's str, not <type 'str'>
# Need an illustration? More reading can be found here:
#
# http://bit.ly/__class__
| mit |
darkleons/odoo | addons/hr_expense/report/hr_expense_report.py | 287 | 5652 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_expense_report(osv.osv):
_name = "hr.expense.report"
_description = "Expenses Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date ', readonly=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Force Journal', readonly=True),
'product_qty':fields.float('Product Quantity', readonly=True),
'employee_id': fields.many2one('hr.employee', "Employee's Name", readonly=True),
'date_confirm': fields.date('Confirmation Date', readonly=True),
'date_valid': fields.date('Validation Date', readonly=True),
'department_id':fields.many2one('hr.department','Department', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Validation User', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'price_total':fields.float('Total Price', readonly=True, digits_compute=dp.get_precision('Account')),
'delay_valid':fields.float('Delay to Valid', readonly=True),
'delay_confirm':fields.float('Delay to Confirm', readonly=True),
'analytic_account': fields.many2one('account.analytic.account','Analytic account',readonly=True),
'price_average':fields.float('Average Price', readonly=True, digits_compute=dp.get_precision('Account')),
'nbr':fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'no_of_products':fields.integer('# of Products', readonly=True),
'no_of_account':fields.integer('# of Accounts', readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('confirm', 'Waiting confirmation'),
('accepted', 'Accepted'),
('done', 'Done'),
('cancelled', 'Cancelled')],
'Status', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_expense_report')
cr.execute("""
create or replace view hr_expense_report as (
select
min(l.id) as id,
s.date as date,
s.create_date as create_date,
s.employee_id,
s.journal_id,
s.currency_id,
s.date_confirm as date_confirm,
s.date_valid as date_valid,
s.user_valid as user_id,
s.department_id,
avg(extract('epoch' from age(s.date_valid,s.date)))/(3600*24) as delay_valid,
avg(extract('epoch' from age(s.date_valid,s.date_confirm)))/(3600*24) as delay_confirm,
l.product_id as product_id,
l.analytic_account as analytic_account,
sum(l.unit_quantity * u.factor) as product_qty,
s.company_id as company_id,
sum(l.unit_quantity*l.unit_amount) as price_total,
(sum(l.unit_quantity*l.unit_amount)/sum(case when l.unit_quantity=0 or u.factor=0 then 1 else l.unit_quantity * u.factor end))::decimal(16,2) as price_average,
count(*) as nbr,
(select unit_quantity from hr_expense_line where id=l.id and product_id is not null) as no_of_products,
(select analytic_account from hr_expense_line where id=l.id and analytic_account is not null) as no_of_account,
s.state
from hr_expense_line l
left join hr_expense_expense s on (s.id=l.expense_id)
left join product_uom u on (u.id=l.uom_id)
group by
s.date,
s.create_date,
s.date_confirm,
s.date_valid,
l.product_id,
l.analytic_account,
s.currency_id,
s.user_valid,
s.department_id,
l.uom_id,
l.id,
s.state,
s.journal_id,
s.company_id,
s.employee_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
thnee/ansible | lib/ansible/modules/cloud/vultr/vultr_dns_record.py | 21 | 10067 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vultr_dns_record
short_description: Manages DNS records on Vultr.
description:
- Create, update and remove DNS records.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
name:
description:
- The record name (subrecord).
default: ""
aliases: [ subrecord ]
domain:
description:
- The domain the record is related to.
required: true
record_type:
description:
- Type of the record.
default: A
choices:
- A
- AAAA
- CNAME
- MX
- SRV
- CAA
- TXT
- NS
- SSHFP
aliases: [ type ]
data:
description:
- Data of the record.
- Required if C(state=present) or C(multiple=yes).
ttl:
description:
- TTL of the record.
default: 300
multiple:
description:
- Whether to use more than one record with similar C(name) including no name and C(record_type).
- Only allowed for a few record types, e.g. C(record_type=A), C(record_type=NS) or C(record_type=MX).
- C(data) will not be updated, instead it is used as a key to find existing records.
default: no
type: bool
priority:
description:
- Priority of the record.
default: 0
state:
description:
- State of the DNS record.
default: present
choices: [ present, absent ]
extends_documentation_fragment: vultr
'''
EXAMPLES = '''
- name: Ensure an A record exists
vultr_dns_record:
name: www
domain: example.com
data: 10.10.10.10
ttl: 3600
- name: Ensure a second A record exists for round robin LB
vultr_dns_record:
name: www
domain: example.com
data: 10.10.10.11
ttl: 60
multiple: yes
- name: Ensure a CNAME record exists
vultr_dns_record:
name: web
record_type: CNAME
domain: example.com
data: www.example.com
- name: Ensure MX record exists
vultr_dns_record:
record_type: MX
domain: example.com
data: "{{ item.data }}"
priority: "{{ item.priority }}"
multiple: yes
with_items:
- { data: mx1.example.com, priority: 10 }
- { data: mx2.example.com, priority: 10 }
- { data: mx3.example.com, priority: 20 }
- name: Ensure a record is absent
local_action:
module: vultr_dns_record
name: www
domain: example.com
state: absent
- name: Ensure MX record is absent in case multiple exists
vultr_dns_record:
record_type: MX
domain: example.com
data: mx1.example.com
multiple: yes
state: absent
'''
RETURN = '''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_dns_record:
description: Response from Vultr API
returned: success
type: complex
contains:
id:
description: The ID of the DNS record.
returned: success
type: int
sample: 1265277
name:
description: The name of the DNS record.
returned: success
type: str
sample: web
record_type:
description: The name of the DNS record.
returned: success
type: str
sample: web
data:
description: Data of the DNS record.
returned: success
type: str
sample: 10.10.10.10
domain:
description: Domain the DNS record is related to.
returned: success
type: str
sample: example.com
priority:
description: Priority of the DNS record.
returned: success
type: int
sample: 10
ttl:
description: Time to live of the DNS record.
returned: success
type: int
sample: 300
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
RECORD_TYPES = [
'A',
'AAAA',
'CNAME',
'MX',
'TXT',
'NS',
'SRV',
'CAA',
'SSHFP'
]
class AnsibleVultrDnsRecord(Vultr):
def __init__(self, module):
super(AnsibleVultrDnsRecord, self).__init__(module, "vultr_dns_record")
self.returns = {
'RECORDID': dict(key='id'),
'name': dict(),
'record': dict(),
'priority': dict(),
'data': dict(),
'type': dict(key='record_type'),
'ttl': dict(),
}
def get_record(self):
records = self.api_query(path="/v1/dns/records?domain=%s" % self.module.params.get('domain'))
multiple = self.module.params.get('multiple')
data = self.module.params.get('data')
name = self.module.params.get('name')
record_type = self.module.params.get('record_type')
result = {}
for record in records or []:
if record.get('type') != record_type:
continue
if record.get('name') == name:
if not multiple:
if result:
self.module.fail_json(msg="More than one record with record_type=%s and name=%s params. "
"Use multiple=yes for more than one record." % (record_type, name))
else:
result = record
elif record.get('data') == data:
return record
return result
def present_record(self):
record = self.get_record()
if not record:
record = self._create_record(record)
else:
record = self._update_record(record)
return record
def _create_record(self, record):
self.result['changed'] = True
data = {
'name': self.module.params.get('name'),
'domain': self.module.params.get('domain'),
'data': self.module.params.get('data'),
'type': self.module.params.get('record_type'),
'priority': self.module.params.get('priority'),
'ttl': self.module.params.get('ttl'),
}
self.result['diff']['before'] = {}
self.result['diff']['after'] = data
if not self.module.check_mode:
self.api_query(
path="/v1/dns/create_record",
method="POST",
data=data
)
record = self.get_record()
return record
def _update_record(self, record):
data = {
'RECORDID': record['RECORDID'],
'name': self.module.params.get('name'),
'domain': self.module.params.get('domain'),
'data': self.module.params.get('data'),
'type': self.module.params.get('record_type'),
'priority': self.module.params.get('priority'),
'ttl': self.module.params.get('ttl'),
}
has_changed = [k for k in data if k in record and data[k] != record[k]]
if has_changed:
self.result['changed'] = True
self.result['diff']['before'] = record
self.result['diff']['after'] = record.copy()
self.result['diff']['after'].update(data)
if not self.module.check_mode:
self.api_query(
path="/v1/dns/update_record",
method="POST",
data=data
)
record = self.get_record()
return record
def absent_record(self):
record = self.get_record()
if record:
self.result['changed'] = True
data = {
'RECORDID': record['RECORDID'],
'domain': self.module.params.get('domain'),
}
self.result['diff']['before'] = record
self.result['diff']['after'] = {}
if not self.module.check_mode:
self.api_query(
path="/v1/dns/delete_record",
method="POST",
data=data
)
return record
def main():
argument_spec = vultr_argument_spec()
argument_spec.update(dict(
domain=dict(required=True),
name=dict(default="", aliases=['subrecord']),
state=dict(choices=['present', 'absent'], default='present'),
ttl=dict(type='int', default=300),
record_type=dict(choices=RECORD_TYPES, default='A', aliases=['type']),
multiple=dict(type='bool', default=False),
priority=dict(type='int', default=0),
data=dict()
))
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['data']),
('multiple', True, ['data']),
],
supports_check_mode=True,
)
vultr_record = AnsibleVultrDnsRecord(module)
if module.params.get('state') == "absent":
record = vultr_record.absent_record()
else:
record = vultr_record.present_record()
result = vultr_record.get_result(record)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
dhomeier/astropy | astropy/convolution/tests/test_convolve_nddata.py | 12 | 1761 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import Gaussian2DKernel
from astropy.nddata import NDData
def test_basic_nddata():
arr = np.zeros((11, 11))
arr[5, 5] = 1
ndd = NDData(arr)
test_kernel = Gaussian2DKernel(1)
result = convolve(ndd, test_kernel)
x, y = np.mgrid[:11, :11]
expected = result[5, 5] * np.exp(-0.5 * ((x - 5)**2 + (y - 5)**2))
np.testing.assert_allclose(result, expected, atol=1e-6)
resultf = convolve_fft(ndd, test_kernel)
np.testing.assert_allclose(resultf, expected, atol=1e-6)
@pytest.mark.parametrize('convfunc',
[lambda *args: convolve(*args, nan_treatment='interpolate', normalize_kernel=True),
lambda *args: convolve_fft(*args, nan_treatment='interpolate', normalize_kernel=True)])
def test_masked_nddata(convfunc):
arr = np.zeros((11, 11))
arr[4, 5] = arr[6, 5] = arr[5, 4] = arr[5, 6] = 0.2
arr[5, 5] = 1.5
ndd_base = NDData(arr)
mask = arr < 0 # this is all False
mask[5, 5] = True
ndd_mask = NDData(arr, mask=mask)
arrnan = arr.copy()
arrnan[5, 5] = np.nan
ndd_nan = NDData(arrnan)
test_kernel = Gaussian2DKernel(1)
result_base = convfunc(ndd_base, test_kernel)
result_nan = convfunc(ndd_nan, test_kernel)
result_mask = convfunc(ndd_mask, test_kernel)
assert np.allclose(result_nan, result_mask)
assert not np.allclose(result_base, result_mask)
assert not np.allclose(result_base, result_nan)
# check to make sure the mask run doesn't talk back to the initial array
assert np.sum(np.isnan(ndd_base.data)) != np.sum(np.isnan(ndd_nan.data))
| bsd-3-clause |
47lining/ansible | lib/ansible/runner/lookup_plugins/csvfile.py | 121 | 2645 | # (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils, errors
import os
import codecs
import csv
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def read_csv(self, filename, key, delimiter, dflt=None, col=1):
try:
f = codecs.open(filename, 'r', encoding='utf-8')
creader = csv.reader(f, delimiter=delimiter)
for row in creader:
if row[0] == key:
return row[int(col)]
except Exception, e:
raise errors.AnsibleError("csvfile: %s" % str(e))
return dflt
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if isinstance(terms, basestring):
terms = [ terms ]
ret = []
for term in terms:
params = term.split()
key = params[0]
paramvals = {
'file' : 'ansible.csv',
'default' : None,
'delimiter' : "TAB",
'col' : "1", # column to return
}
# parameters specified?
try:
for param in params[1:]:
name, value = param.split('=')
assert(name in paramvals)
paramvals[name] = value
except (ValueError, AssertionError), e:
raise errors.AnsibleError(e)
if paramvals['delimiter'] == 'TAB':
paramvals['delimiter'] = "\t"
path = utils.path_dwim(self.basedir, paramvals['file'])
var = self.read_csv(path, key, paramvals['delimiter'], paramvals['default'], paramvals['col'])
if var is not None:
if type(var) is list:
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
| gpl-3.0 |
froch/kubernetes-py | kubernetes_py/models/v1/LoadBalancerStatus.py | 3 | 1579 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes_py.models.v1.LoadBalancerIngress import LoadBalancerIngress
from kubernetes_py.utils import is_valid_list
class LoadBalancerStatus(object):
"""
http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_loadbalancerstatus
"""
def __init__(self, model=None):
super(LoadBalancerStatus, self).__init__()
self._ingress = None
if model is not None:
self._build_with_model(model)
def _build_with_model(self, model=None):
if "ingress" in model:
statuses = []
for i in model["ingress"]:
status = LoadBalancerIngress(i)
statuses.append(status)
self.ingress = statuses
# ------------------------------------------------------------------------------------- ingress
@property
def ingress(self):
return self._ingress
@ingress.setter
def ingress(self, ingress=None):
if not is_valid_list(ingress, LoadBalancerIngress):
raise SyntaxError("LoadBalancerStatus: ingress: [ {0} ] is invalid.".format(ingress))
self._ingress = ingress
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.ingress is not None:
data["ingress"] = [x.serialize() for x in self.ingress]
return data
| apache-2.0 |
Johnzero/OE7 | openerp/addons-modules/base_gengo/res_company.py | 34 | 1601 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.Model):
_name = "res.company"
_inherit = "res.company"
_columns = {
"gengo_private_key": fields.text("Gengo Private Key"),
"gengo_public_key": fields.text("Gengo Public Key"),
"gengo_comment": fields.text("Comments", help="This comment will be automatically be enclosed in each an every request sent to Gengo"),
"gengo_auto_approve": fields.boolean("Auto Approve Translation ?", help="Jobs are Automatically Approved by Gengo."),
}
_defaults = {
"gengo_auto_approve": True,
}
| agpl-3.0 |
phase4ground/DVB-receiver | modem/python/library/demodulator.py | 1 | 2190 | import numpy as np
import iir_filter
import pi_filter
class demodulator:
'General purpose demodulator that supports BPSK, QPSK and OQPSK'
def __init__(self, modulation_type, samples_per_symbol):
""" Create the classical Costas loop carrier recovery object """
# store the parameteers internally - important for stability analysis later
self.modulation_type = modulation_type
self.samples_per_symbol = samples_per_symbol
# create the sample counter
self.count = 0
# I and Q channel sum variables
self.I_sum = 0.0
self.Q_sum = 0.0
def update(self, input_sample, input_tick):
""" process a new sample, estimate a new demodulated bit if the correct time """
# # if the previous block wants to delay sampling it will supply an empty list
# # therefore we want to skip any operation and hold back on advancing the count
# if input_sample != []:
# # new bit transition, return demodulated bits depending on modulation type
# if self.count == 0:
# self.count += 1
# if self.modulation_type == "BPSK":
# return [np.real(input_sample)]
# elif self.modulation_type == "QPSK":
# return [np.real(input_sample), np.imag(input_sample)]
# elif self.modulation_type == "OQPSK":
# return [np.real(input_sample)]
# # offset bit, return demodulated bit for the offset bit in OQPSK
# elif self.count == self.samples_per_symbol/2:
# self.count += 1
# if self.modulation_type == "OQPSK":
# return [np.imag(input_sample)]
# # not the correct time demodulate, return nothing
# # callign function should be used with the extend function rather than append so a zero length list is added
# else:
# self.count += 1
# return []
# else:
# return []
if output_tick[0] == 1:
I_sample = I_sum
I_sum = 0.0
| gpl-3.0 |
drowningchild/lgog_old | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
psgganesh/sparkplug | packages/Sparkplug/Admin/src/node_modules/laravel-elixir/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | 6387 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
| mit |
usirin/koding | go/src/vendor/github.com/caglar10ur/lxc/config/apparmor/lxc-generate-aa-rules.py | 34 | 3770 | #!/usr/bin/env python3
import sys
blocks = []
#
# blocks is an array of paths under which we want to block by
# default.
#
# blocks[0] = ['path' = '/sys', 'children' = [A,B] ]
# blocks[1] = ['path' = '/proc/sys', 'children' = [ E ] ]
# A = [ 'path' = 'fs', children = [C] ]
# C = [ 'path' = 'cgroup', children = [F] ]
# B = [ 'path' = 'class', children = [D] ]
# D = [ 'path' = 'net', children = [F] ]
# E = [ 'path' = 'shm*' ]
# F = [ 'path' = '**' ]
def add_block(path):
for b in blocks:
if b['path'] == path:
# duplicate
return
blocks.append({'path': path.strip(), 'children': []})
# @prev is an array of dicts which containing 'path' and
# 'children'. @path is a string. We are looking for an entry
# in @prev which contains @path, and will return its
# children array.
def child_get(prev, path):
for p in prev:
if p['path'] == path:
return p['children']
return None
def add_allow(path):
# find which block we belong to
found = None
for b in blocks:
l = len(b['path'])
if len(path) <= l:
continue
# TODO - should we find the longest match?
if path[0:l] == b['path']:
found = b
break
if found is None:
print("allow with no previous block at %s" % path)
sys.exit(1)
p = path[l:].strip()
while p[:1] == "/":
p = p[1:]
prev = b['children']
for s in p.split('/'):
n = {'path': s.strip(), 'children': []}
tmp = child_get(prev, n['path'])
if tmp is not None:
prev = tmp
else:
prev.append(n)
prev = n['children']
config = "config"
if len(sys.argv) > 1:
config = sys.argv[1]
with open(config) as f:
for x in f.readlines():
x.strip()
if x[:1] == '#':
continue
try:
(cmd, path) = x.split(' ')
except: # blank line
continue
if cmd == "block":
add_block(path)
elif cmd == "allow":
add_allow(path)
else:
print("Unknown command: %s" % cmd)
sys.exit(1)
denies = []
def collect_chars(children, ref, index):
r = ""
for c in children:
if index >= len(c['path']):
continue
if ref[0:index] != c['path'][0:index]:
continue
if c['path'][index] not in r:
r = r + c['path'][index]
return r
def append_deny(s):
s = "%s wklx," % s
if s not in denies:
denies.append(s)
def gen_denies(pathsofar, children):
for c in children:
for char in range(len(c['path'])):
if char == len(c['path'])-1 and c['path'][char] == '*':
continue
if char == len(c['path'])-2:
if c['path'][char:char+2] == '**':
continue
x = collect_chars(children, c['path'], char)
newdeny = "deny %s/%s[^%s]*{,/**}" % (pathsofar,
c['path'][0:char], x)
append_deny(newdeny)
if c['path'] != '**' and c['path'][len(c['path'])-1] != '*':
newdeny = "deny %s/%s?*{,/**}" % (pathsofar, c['path'])
append_deny(newdeny)
elif c['path'] != '**':
newdeny = "deny %s/%s/**" % (pathsofar, c['path'])
append_deny(newdeny)
if len(c['children']) != 0:
newpath = "%s/%s" % (pathsofar, c['path'])
gen_denies(newpath, c['children'])
for b in blocks:
gen_denies(b['path'], b['children'])
denies.sort()
genby = " # generated by: lxc-generate-aa-rules.py"
for a in sys.argv[1:]:
genby += " %s" % a
print(genby)
for d in denies:
print(" %s" % d)
| apache-2.0 |
jaxxstorm/fullerite | src/diamond/collectors/portstat/portstat.py | 51 | 1984 | """
The PortStatCollector collects metrics about ports listed in config file.
##### Dependencies
* psutil
"""
from collections import defaultdict
import diamond.collector
try:
import psutil
except ImportError:
psutil = None
def get_port_stats(port):
"""
Iterate over connections and count states for specified port
:param port: port for which stats are collected
:return: Counter with port states
"""
cnts = defaultdict(int)
for c in psutil.net_connections():
c_port = c.laddr[1]
if c_port != port:
continue
status = c.status.lower()
cnts[status] += 1
return cnts
class PortStatCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
super(PortStatCollector, self).__init__(*args, **kwargs)
self.ports = {}
for port_name, cfg in self.config['port'].items():
port_cfg = {}
for key in ('number',):
port_cfg[key] = cfg.get(key, [])
self.ports[port_name] = port_cfg
def get_default_config_help(self):
config_help = super(PortStatCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
config = super(PortStatCollector, self).get_default_config()
config.update({
'path': 'port',
'port': {},
})
return config
def collect(self):
"""
Overrides the Collector.collect method
"""
if psutil is None:
self.log.error('Unable to import module psutil')
return {}
for port_name, port_cfg in self.ports.iteritems():
port = int(port_cfg['number'])
stats = get_port_stats(port)
for stat_name, stat_value in stats.iteritems():
metric_name = '%s.%s' % (port_name, stat_name)
self.publish(metric_name, stat_value)
| apache-2.0 |
jruiperezv/ANALYSE | lms/djangoapps/shoppingcart/processors/helpers.py | 169 | 1025 | """
Helper methods for credit card processing modules.
These methods should be shared among all processor implementations,
but should NOT be imported by modules outside this package.
"""
from django.conf import settings
from microsite_configuration import microsite
def get_processor_config():
"""
Return a dictionary of configuration settings for the active credit card processor.
If we're in a microsite and overrides are available, return those instead.
Returns:
dict
"""
# Retrieve the configuration settings for the active credit card processor
config = settings.CC_PROCESSOR.get(
settings.CC_PROCESSOR_NAME, {}
)
# Check whether we're in a microsite that overrides our configuration
# If so, find the microsite-specific configuration in the 'microsites'
# sub-key of the normal processor configuration.
config_key = microsite.get_value('cybersource_config_key')
if config_key:
config = config['microsites'][config_key]
return config
| agpl-3.0 |
kenrachynski/powerline | powerline/renderers/vim.py | 32 | 5785 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import vim
from powerline.bindings.vim import vim_get_func, vim_getoption, environ, current_tabpage, get_vim_encoding
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
from powerline.theme import Theme
from powerline.lib.unicode import unichr, register_strwidth_error
vim_mode = vim_get_func('mode', rettype='unicode')
if int(vim.eval('v:version')) >= 702:
_vim_mode = vim_mode
vim_mode = lambda: _vim_mode(1)
mode_translations = {
unichr(ord('V') - 0x40): '^V',
unichr(ord('S') - 0x40): '^S',
}
class VimRenderer(Renderer):
'''Powerline vim segment renderer.'''
character_translations = Renderer.character_translations.copy()
character_translations[ord('%')] = '%%'
segment_info = Renderer.segment_info.copy()
segment_info.update(environ=environ)
def __init__(self, *args, **kwargs):
if not hasattr(vim, 'strwidth'):
# Hope nobody want to change this at runtime
if vim.eval('&ambiwidth') == 'double':
kwargs = dict(**kwargs)
kwargs['ambigious'] = 2
super(VimRenderer, self).__init__(*args, **kwargs)
self.hl_groups = {}
self.prev_highlight = None
self.strwidth_error_name = register_strwidth_error(self.strwidth)
self.encoding = get_vim_encoding()
def shutdown(self):
self.theme.shutdown()
for match in self.local_themes.values():
if 'theme' in match:
match['theme'].shutdown()
def add_local_theme(self, matcher, theme):
if matcher in self.local_themes:
raise KeyError('There is already a local theme with given matcher')
self.local_themes[matcher] = theme
def get_matched_theme(self, match):
try:
return match['theme']
except KeyError:
match['theme'] = Theme(theme_config=match['config'], main_theme_config=self.theme_config, **self.theme_kwargs)
return match['theme']
def get_theme(self, matcher_info):
if matcher_info is None:
return self.get_matched_theme(self.local_themes[None])
for matcher in self.local_themes.keys():
if matcher and matcher(matcher_info):
return self.get_matched_theme(self.local_themes[matcher])
else:
return self.theme
if hasattr(vim, 'strwidth'):
if sys.version_info < (3,):
def strwidth(self, string):
# Does not work with tabs, but neither is strwidth from default
# renderer
return vim.strwidth(string.encode(self.encoding, 'replace'))
else:
@staticmethod
def strwidth(string):
return vim.strwidth(string)
def get_segment_info(self, segment_info, mode):
return segment_info or self.segment_info
def render(self, window=None, window_id=None, winnr=None, is_tabline=False):
'''Render all segments.'''
segment_info = self.segment_info.copy()
if window is vim.current.window:
mode = vim_mode()
mode = mode_translations.get(mode, mode)
else:
mode = 'nc'
segment_info.update(
window=window,
mode=mode,
window_id=window_id,
winnr=winnr,
buffer=window.buffer,
tabpage=current_tabpage(),
encoding=self.encoding,
)
segment_info['tabnr'] = segment_info['tabpage'].number
segment_info['bufnr'] = segment_info['buffer'].number
if is_tabline:
winwidth = int(vim_getoption('columns'))
else:
winwidth = segment_info['window'].width
statusline = super(VimRenderer, self).render(
mode=mode,
width=winwidth,
segment_info=segment_info,
matcher_info=(None if is_tabline else segment_info),
)
statusline = statusline.encode(self.encoding, self.strwidth_error_name)
return statusline
def reset_highlight(self):
self.hl_groups.clear()
def hlstyle(self, fg=None, bg=None, attrs=None):
'''Highlight a segment.
If an argument is None, the argument is ignored. If an argument is
False, the argument is reset to the terminal defaults. If an argument
is a valid color or attribute, it’s added to the vim highlight group.
'''
# In order not to hit E541 two consequent identical highlighting
# specifiers may be squashed into one.
attrs = attrs or 0 # Normalize `attrs`
if (fg, bg, attrs) == self.prev_highlight:
return ''
self.prev_highlight = (fg, bg, attrs)
# We don’t need to explicitly reset attributes in vim, so skip those
# calls
if not attrs and not bg and not fg:
return ''
if not (fg, bg, attrs) in self.hl_groups:
hl_group = {
'ctermfg': 'NONE',
'guifg': None,
'ctermbg': 'NONE',
'guibg': None,
'attrs': ['NONE'],
'name': '',
}
if fg is not None and fg is not False:
hl_group['ctermfg'] = fg[0]
hl_group['guifg'] = fg[1]
if bg is not None and bg is not False:
hl_group['ctermbg'] = bg[0]
hl_group['guibg'] = bg[1]
if attrs:
hl_group['attrs'] = []
if attrs & ATTR_BOLD:
hl_group['attrs'].append('bold')
if attrs & ATTR_ITALIC:
hl_group['attrs'].append('italic')
if attrs & ATTR_UNDERLINE:
hl_group['attrs'].append('underline')
hl_group['name'] = (
'Pl_'
+ str(hl_group['ctermfg']) + '_'
+ str(hl_group['guifg']) + '_'
+ str(hl_group['ctermbg']) + '_'
+ str(hl_group['guibg']) + '_'
+ ''.join(hl_group['attrs'])
)
self.hl_groups[(fg, bg, attrs)] = hl_group
vim.command('hi {group} ctermfg={ctermfg} guifg={guifg} guibg={guibg} ctermbg={ctermbg} cterm={attrs} gui={attrs}'.format(
group=hl_group['name'],
ctermfg=hl_group['ctermfg'],
guifg='#{0:06x}'.format(hl_group['guifg']) if hl_group['guifg'] is not None else 'NONE',
ctermbg=hl_group['ctermbg'],
guibg='#{0:06x}'.format(hl_group['guibg']) if hl_group['guibg'] is not None else 'NONE',
attrs=','.join(hl_group['attrs']),
))
return '%#' + self.hl_groups[(fg, bg, attrs)]['name'] + '#'
renderer = VimRenderer
| mit |
saurabhbajaj207/CarpeDiem | venv/Lib/site-packages/Crypto/SelfTest/Cipher/test_DES3.py | 117 | 15558 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/DES3.py: Self-test for the Triple-DES cipher
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.DES3"""
__revision__ = "$Id$"
from common import dict # For compatibility with Python 2.1 and 2.2
from Crypto.Util.py3compat import *
from binascii import hexlify
# This is a list of (plaintext, ciphertext, key, description) tuples.
SP800_20_A1_KEY = '01' * 24
SP800_20_A2_PT = '00' * 8
test_data = [
# Test vector from Appendix B of NIST SP 800-67
# "Recommendation for the Triple Data Encryption Algorithm (TDEA) Block
# Cipher"
# http://csrc.nist.gov/publications/nistpubs/800-67/SP800-67.pdf
('54686520717566636b2062726f776e20666f78206a756d70',
'a826fd8ce53b855fcce21c8112256fe668d5c05dd9b6b900',
'0123456789abcdef23456789abcdef01456789abcdef0123',
'NIST SP800-67 B.1'),
# Test vectors "The Multi-block Message Test (MMT) for DES and TDES"
# http://csrc.nist.gov/groups/STM/cavp/documents/des/DESMMT.pdf
('326a494cd33fe756', 'b22b8d66de970692',
'627f460e08104a1043cd265d5840eaf1313edf97df2a8a8c',
'DESMMT #1', dict(mode='CBC', iv='8e29f75ea77e5475')),
('84401f78fe6c10876d8ea23094ea5309', '7b1f7c7e3b1c948ebd04a75ffba7d2f5',
'37ae5ebf46dff2dc0754b94f31cbb3855e7fd36dc870bfae',
'DESMMT #2', dict(mode='CBC', iv='3d1de3cc132e3b65')),
# Test vectors from Appendix A of NIST SP 800-20
# "Modes of Operation Validation System for the Triple Data Encryption
# Algorithm (TMOVS): Requirements and Procedures"
# http://csrc.nist.gov/publications/nistpubs/800-20/800-20.pdf
# Table A.1 - Variable Plaintext Known Answer Test
('8000000000000000', '95f8a5e5dd31d900', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #0'),
('4000000000000000', 'dd7f121ca5015619', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #1'),
('2000000000000000', '2e8653104f3834ea', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #2'),
('1000000000000000', '4bd388ff6cd81d4f', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #3'),
('0800000000000000', '20b9e767b2fb1456', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #4'),
('0400000000000000', '55579380d77138ef', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #5'),
('0200000000000000', '6cc5defaaf04512f', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #6'),
('0100000000000000', '0d9f279ba5d87260', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #7'),
('0080000000000000', 'd9031b0271bd5a0a', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #8'),
('0040000000000000', '424250b37c3dd951', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #9'),
('0020000000000000', 'b8061b7ecd9a21e5', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #10'),
('0010000000000000', 'f15d0f286b65bd28', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #11'),
('0008000000000000', 'add0cc8d6e5deba1', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #12'),
('0004000000000000', 'e6d5f82752ad63d1', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #13'),
('0002000000000000', 'ecbfe3bd3f591a5e', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #14'),
('0001000000000000', 'f356834379d165cd', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #15'),
('0000800000000000', '2b9f982f20037fa9', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #16'),
('0000400000000000', '889de068a16f0be6', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #17'),
('0000200000000000', 'e19e275d846a1298', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #18'),
('0000100000000000', '329a8ed523d71aec', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #19'),
('0000080000000000', 'e7fce22557d23c97', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #20'),
('0000040000000000', '12a9f5817ff2d65d', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #21'),
('0000020000000000', 'a484c3ad38dc9c19', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #22'),
('0000010000000000', 'fbe00a8a1ef8ad72', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #23'),
('0000008000000000', '750d079407521363', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #24'),
('0000004000000000', '64feed9c724c2faf', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #25'),
('0000002000000000', 'f02b263b328e2b60', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #26'),
('0000001000000000', '9d64555a9a10b852', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #27'),
('0000000800000000', 'd106ff0bed5255d7', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #28'),
('0000000400000000', 'e1652c6b138c64a5', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #29'),
('0000000200000000', 'e428581186ec8f46', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #30'),
('0000000100000000', 'aeb5f5ede22d1a36', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #31'),
('0000000080000000', 'e943d7568aec0c5c', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #32'),
('0000000040000000', 'df98c8276f54b04b', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #33'),
('0000000020000000', 'b160e4680f6c696f', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #34'),
('0000000010000000', 'fa0752b07d9c4ab8', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #35'),
('0000000008000000', 'ca3a2b036dbc8502', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #36'),
('0000000004000000', '5e0905517bb59bcf', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #37'),
('0000000002000000', '814eeb3b91d90726', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #38'),
('0000000001000000', '4d49db1532919c9f', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #39'),
('0000000000800000', '25eb5fc3f8cf0621', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #40'),
('0000000000400000', 'ab6a20c0620d1c6f', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #41'),
('0000000000200000', '79e90dbc98f92cca', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #42'),
('0000000000100000', '866ecedd8072bb0e', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #43'),
('0000000000080000', '8b54536f2f3e64a8', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #44'),
('0000000000040000', 'ea51d3975595b86b', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #45'),
('0000000000020000', 'caffc6ac4542de31', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #46'),
('0000000000010000', '8dd45a2ddf90796c', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #47'),
('0000000000008000', '1029d55e880ec2d0', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #48'),
('0000000000004000', '5d86cb23639dbea9', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #49'),
('0000000000002000', '1d1ca853ae7c0c5f', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #50'),
('0000000000001000', 'ce332329248f3228', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #51'),
('0000000000000800', '8405d1abe24fb942', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #52'),
('0000000000000400', 'e643d78090ca4207', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #53'),
('0000000000000200', '48221b9937748a23', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #54'),
('0000000000000100', 'dd7c0bbd61fafd54', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #55'),
('0000000000000080', '2fbc291a570db5c4', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #56'),
('0000000000000040', 'e07c30d7e4e26e12', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #57'),
('0000000000000020', '0953e2258e8e90a1', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #58'),
('0000000000000010', '5b711bc4ceebf2ee', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #59'),
('0000000000000008', 'cc083f1e6d9e85f6', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #60'),
('0000000000000004', 'd2fd8867d50d2dfe', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #61'),
('0000000000000002', '06e7ea22ce92708f', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #62'),
('0000000000000001', '166b40b44aba4bd6', SP800_20_A1_KEY,
'NIST SP800-20 A.1 #63'),
# Table A.2 - Variable Key Known Answer Test
(SP800_20_A2_PT, '95a8d72813daa94d', '8001010101010101'*3,
'NIST SP800-20 A.2 #0'),
(SP800_20_A2_PT, '0eec1487dd8c26d5', '4001010101010101'*3,
'NIST SP800-20 A.2 #1'),
(SP800_20_A2_PT, '7ad16ffb79c45926', '2001010101010101'*3,
'NIST SP800-20 A.2 #2'),
(SP800_20_A2_PT, 'd3746294ca6a6cf3', '1001010101010101'*3,
'NIST SP800-20 A.2 #3'),
(SP800_20_A2_PT, '809f5f873c1fd761', '0801010101010101'*3,
'NIST SP800-20 A.2 #4'),
(SP800_20_A2_PT, 'c02faffec989d1fc', '0401010101010101'*3,
'NIST SP800-20 A.2 #5'),
(SP800_20_A2_PT, '4615aa1d33e72f10', '0201010101010101'*3,
'NIST SP800-20 A.2 #6'),
(SP800_20_A2_PT, '2055123350c00858', '0180010101010101'*3,
'NIST SP800-20 A.2 #7'),
(SP800_20_A2_PT, 'df3b99d6577397c8', '0140010101010101'*3,
'NIST SP800-20 A.2 #8'),
(SP800_20_A2_PT, '31fe17369b5288c9', '0120010101010101'*3,
'NIST SP800-20 A.2 #9'),
(SP800_20_A2_PT, 'dfdd3cc64dae1642', '0110010101010101'*3,
'NIST SP800-20 A.2 #10'),
(SP800_20_A2_PT, '178c83ce2b399d94', '0108010101010101'*3,
'NIST SP800-20 A.2 #11'),
(SP800_20_A2_PT, '50f636324a9b7f80', '0104010101010101'*3,
'NIST SP800-20 A.2 #12'),
(SP800_20_A2_PT, 'a8468ee3bc18f06d', '0102010101010101'*3,
'NIST SP800-20 A.2 #13'),
(SP800_20_A2_PT, 'a2dc9e92fd3cde92', '0101800101010101'*3,
'NIST SP800-20 A.2 #14'),
(SP800_20_A2_PT, 'cac09f797d031287', '0101400101010101'*3,
'NIST SP800-20 A.2 #15'),
(SP800_20_A2_PT, '90ba680b22aeb525', '0101200101010101'*3,
'NIST SP800-20 A.2 #16'),
(SP800_20_A2_PT, 'ce7a24f350e280b6', '0101100101010101'*3,
'NIST SP800-20 A.2 #17'),
(SP800_20_A2_PT, '882bff0aa01a0b87', '0101080101010101'*3,
'NIST SP800-20 A.2 #18'),
(SP800_20_A2_PT, '25610288924511c2', '0101040101010101'*3,
'NIST SP800-20 A.2 #19'),
(SP800_20_A2_PT, 'c71516c29c75d170', '0101020101010101'*3,
'NIST SP800-20 A.2 #20'),
(SP800_20_A2_PT, '5199c29a52c9f059', '0101018001010101'*3,
'NIST SP800-20 A.2 #21'),
(SP800_20_A2_PT, 'c22f0a294a71f29f', '0101014001010101'*3,
'NIST SP800-20 A.2 #22'),
(SP800_20_A2_PT, 'ee371483714c02ea', '0101012001010101'*3,
'NIST SP800-20 A.2 #23'),
(SP800_20_A2_PT, 'a81fbd448f9e522f', '0101011001010101'*3,
'NIST SP800-20 A.2 #24'),
(SP800_20_A2_PT, '4f644c92e192dfed', '0101010801010101'*3,
'NIST SP800-20 A.2 #25'),
(SP800_20_A2_PT, '1afa9a66a6df92ae', '0101010401010101'*3,
'NIST SP800-20 A.2 #26'),
(SP800_20_A2_PT, 'b3c1cc715cb879d8', '0101010201010101'*3,
'NIST SP800-20 A.2 #27'),
(SP800_20_A2_PT, '19d032e64ab0bd8b', '0101010180010101'*3,
'NIST SP800-20 A.2 #28'),
(SP800_20_A2_PT, '3cfaa7a7dc8720dc', '0101010140010101'*3,
'NIST SP800-20 A.2 #29'),
(SP800_20_A2_PT, 'b7265f7f447ac6f3', '0101010120010101'*3,
'NIST SP800-20 A.2 #30'),
(SP800_20_A2_PT, '9db73b3c0d163f54', '0101010110010101'*3,
'NIST SP800-20 A.2 #31'),
(SP800_20_A2_PT, '8181b65babf4a975', '0101010108010101'*3,
'NIST SP800-20 A.2 #32'),
(SP800_20_A2_PT, '93c9b64042eaa240', '0101010104010101'*3,
'NIST SP800-20 A.2 #33'),
(SP800_20_A2_PT, '5570530829705592', '0101010102010101'*3,
'NIST SP800-20 A.2 #34'),
(SP800_20_A2_PT, '8638809e878787a0', '0101010101800101'*3,
'NIST SP800-20 A.2 #35'),
(SP800_20_A2_PT, '41b9a79af79ac208', '0101010101400101'*3,
'NIST SP800-20 A.2 #36'),
(SP800_20_A2_PT, '7a9be42f2009a892', '0101010101200101'*3,
'NIST SP800-20 A.2 #37'),
(SP800_20_A2_PT, '29038d56ba6d2745', '0101010101100101'*3,
'NIST SP800-20 A.2 #38'),
(SP800_20_A2_PT, '5495c6abf1e5df51', '0101010101080101'*3,
'NIST SP800-20 A.2 #39'),
(SP800_20_A2_PT, 'ae13dbd561488933', '0101010101040101'*3,
'NIST SP800-20 A.2 #40'),
(SP800_20_A2_PT, '024d1ffa8904e389', '0101010101020101'*3,
'NIST SP800-20 A.2 #41'),
(SP800_20_A2_PT, 'd1399712f99bf02e', '0101010101018001'*3,
'NIST SP800-20 A.2 #42'),
(SP800_20_A2_PT, '14c1d7c1cffec79e', '0101010101014001'*3,
'NIST SP800-20 A.2 #43'),
(SP800_20_A2_PT, '1de5279dae3bed6f', '0101010101012001'*3,
'NIST SP800-20 A.2 #44'),
(SP800_20_A2_PT, 'e941a33f85501303', '0101010101011001'*3,
'NIST SP800-20 A.2 #45'),
(SP800_20_A2_PT, 'da99dbbc9a03f379', '0101010101010801'*3,
'NIST SP800-20 A.2 #46'),
(SP800_20_A2_PT, 'b7fc92f91d8e92e9', '0101010101010401'*3,
'NIST SP800-20 A.2 #47'),
(SP800_20_A2_PT, 'ae8e5caa3ca04e85', '0101010101010201'*3,
'NIST SP800-20 A.2 #48'),
(SP800_20_A2_PT, '9cc62df43b6eed74', '0101010101010180'*3,
'NIST SP800-20 A.2 #49'),
(SP800_20_A2_PT, 'd863dbb5c59a91a0', '0101010101010140'*3,
'NIST SP800-20 A.2 #50'),
(SP800_20_A2_PT, 'a1ab2190545b91d7', '0101010101010120'*3,
'NIST SP800-20 A.2 #51'),
(SP800_20_A2_PT, '0875041e64c570f7', '0101010101010110'*3,
'NIST SP800-20 A.2 #52'),
(SP800_20_A2_PT, '5a594528bebef1cc', '0101010101010108'*3,
'NIST SP800-20 A.2 #53'),
(SP800_20_A2_PT, 'fcdb3291de21f0c0', '0101010101010104'*3,
'NIST SP800-20 A.2 #54'),
(SP800_20_A2_PT, '869efd7f9f265a09', '0101010101010102'*3,
'NIST SP800-20 A.2 #55'),
# "Two-key 3DES". Test vector generated using PyCrypto 2.0.1.
# This test is designed to test the DES3 API, not the correctness of the
# output.
('21e81b7ade88a259', '5c577d4d9b20c0f8',
'9b397ebf81b1181e282f4bb8adbadc6b', 'Two-key 3DES'),
# The following test vectors have been generated with gpg v1.4.0.
# The command line used was:
# gpg -c -z 0 --cipher-algo 3DES --passphrase secret_passphrase \
# --disable-mdc --s2k-mode 0 --output ct pt
# For an explanation, see test_AES.py .
( 'ac1762037074324fb53ba3596f73656d69746556616c6c6579', # Plaintext, 'YosemiteValley'
'9979238528357b90e2e0be549cb0b2d5999b9a4a447e5c5c7d', # Ciphertext
'7ade65b460f5ea9be35f9e14aa883a2048e3824aa616c0b2', # Key (hash of 'BearsAhead')
'GPG Test Vector #1',
dict(mode='OPENPGP', iv='cd47e2afb8b7e4b0', encrypted_iv='6a7eef0b58050e8b904a' ) ),
]
def get_tests(config={}):
from Crypto.Cipher import DES3
from common import make_block_tests
return make_block_tests(DES3, "DES3", test_data)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| mit |
MSusik/invenio | invenio/legacy/webstyle/templates.py | 3 | 29772 | ## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebStyle templates. Customize the look of pages of Invenio
"""
__revision__ = \
"$Id$"
import time
import cgi
import traceback
import urllib
import sys
import string
from bs4 import BeautifulSoup
from invenio.ext.template import render_template_to_string
from invenio.config import \
CFG_SITE_RECORD, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_SECURE_URL, \
CFG_BASE_URL, \
CFG_SITE_URL, \
CFG_VERSION, \
CFG_WEBSTYLE_TEMPLATE_SKIN, \
CFG_INSPIRE_SITE, \
CFG_WEBLINKBACK_TRACKBACK_ENABLED
from invenio.base.i18n import gettext_set_language, language_list_long, is_language_rtl
from invenio.utils.url import make_canonical_urlargd, create_html_link, \
get_canonical_and_alternates_urls
from invenio.utils.date import convert_datecvs_to_datestruct, \
convert_datestruct_to_dategui
from invenio.modules.formatter import format_record
from invenio.utils.html import get_mathjax_header
import invenio.legacy.template
websearch_templates = invenio.legacy.template.load('websearch')
class Template:
def tmpl_navtrailbox_body(self, ln, title, previous_links, separator,
prolog, epilog):
"""Bootstrap friendly-Create navigation trail box body
Parameters:
- 'ln' *string* - The language to display
- 'title' *string* - page title;
- 'previous_links' *string* - the trail content from site title until current page (both ends exclusive)
- 'prolog' *string* - HTML code to prefix the navtrail item with
- 'epilog' *string* - HTML code to suffix the navtrail item with
- 'separator' *string* - HTML code that separates two navtrail items
Output:
- text containing the navtrail
Note: returns empty string for Home page. (guessed by title).
"""
# load the right message language
_ = gettext_set_language(ln)
if title == CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME):
return ""
# Breadcrumbs
# breadcrumb objects should provide properties 'text' and 'url'
# First element
breadcrumbs = [dict(text=_("Home"), url=CFG_SITE_URL), ]
# Decode previous elements
if previous_links:
soup = BeautifulSoup(previous_links)
for link in soup.find_all('a'):
breadcrumbs.append(dict(
text=unicode(' '.join(link.contents)),
url=link.get('href')))
# Add head
if title:
breadcrumbs.append(dict(text=title, url='#'))
return render_template_to_string("breadcrumbs.html",
breadcrumbs=breadcrumbs).encode('utf8')
def tmpl_page(self, req, **kwargs):
"""Creates a complete page
Parameters:
- 'ln' *string* - The language to display
- 'description' *string* - description goes to the metadata in the header of the HTML page,
not yet escaped for HTML
- 'keywords' *string* - keywords goes to the metadata in the header of the HTML page,
not yet escaped for HTML
- 'userinfobox' *string* - the HTML code for the user information box
- 'useractivities_menu' *string* - the HTML code for the user activities menu
- 'adminactivities_menu' *string* - the HTML code for the admin activities menu
- 'navtrailbox' *string* - the HTML code for the navigation trail box
- 'pageheaderadd' *string* - additional page header HTML code
- 'boxlefttop' *string* - left-top box HTML code
- 'boxlefttopadd' *string* - additional left-top box HTML code
- 'boxleftbottom' *string* - left-bottom box HTML code
- 'boxleftbottomadd' *string* - additional left-bottom box HTML code
- 'boxrighttop' *string* - right-top box HTML code
- 'boxrighttopadd' *string* - additional right-top box HTML code
- 'boxrightbottom' *string* - right-bottom box HTML code
- 'boxrightbottomadd' *string* - additional right-bottom box HTML code
- 'title' *string* - the title of the page, not yet escaped for HTML
- 'titleprologue' *string* - what to print before page title
- 'titleepilogue' *string* - what to print after page title
- 'body' *string* - the body of the page
- 'lastupdated' *string* - when the page was last updated
- 'uid' *int* - user ID
- 'pagefooteradd' *string* - additional page footer HTML code
- 'secure_page_p' *int* (0 or 1) - are we to use HTTPS friendly page elements or not?
- 'navmenuid' *string* - the id of the navigation item to highlight for this page
- 'metaheaderadd' *string* - list of further tags to add to the <HEAD></HEAD> part of the page
- 'rssurl' *string* - the url of the RSS feed for this page
- 'show_title_p' *int* (0 or 1) - do we display the page title in the body of the page?
- 'body_css_classes' *list* - list of classes to add to the body tag
- 'show_header' *boolean* - tells whether page header should be displayed or not
- 'show_footer' *boolean* - tells whether page footer should be displayed or not
Output:
- HTML code of the page
"""
ctx = dict(ln=CFG_SITE_LANG, description="",
keywords="", userinfobox="", useractivities_menu="",
adminactivities_menu="", navtrailbox="",
pageheaderadd="", boxlefttop="", boxlefttopadd="",
boxleftbottom="", boxleftbottomadd="",
boxrighttop="", boxrighttopadd="",
boxrightbottom="", boxrightbottomadd="",
titleprologue="", title="", titleepilogue="",
body="", lastupdated=None, pagefooteradd="", uid=0,
secure_page_p=0, navmenuid="", metaheaderadd="",
rssurl=CFG_SITE_URL+"/rss",
show_title_p=True, body_css_classes=None,
show_header=True, show_footer=True)
ctx.update(kwargs)
return render_template_to_string("legacy_page.html", **ctx).encode('utf8')
def tmpl_pageheader(self, req, **kwargs):
"""Creates a page header
Parameters:
- 'ln' *string* - The language to display
- 'headertitle' *string* - the title of the HTML page, not yet escaped for HTML
- 'description' *string* - description goes to the metadata in the header of the HTML page,
not yet escaped for HTML
- 'keywords' *string* - keywords goes to the metadata in the header of the HTML page,
not yet escaped for HTML
- 'userinfobox' *string* - the HTML code for the user information box
- 'useractivities_menu' *string* - the HTML code for the user activities menu
- 'adminactivities_menu' *string* - the HTML code for the admin activities menu
- 'navtrailbox' *string* - the HTML code for the navigation trail box
- 'pageheaderadd' *string* - additional page header HTML code
- 'uid' *int* - user ID
- 'secure_page_p' *int* (0 or 1) - are we to use HTTPS friendly page elements or not?
- 'navmenuid' *string* - the id of the navigation item to highlight for this page
- 'metaheaderadd' *string* - list of further tags to add to the <HEAD></HEAD> part of the page
- 'rssurl' *string* - the url of the RSS feed for this page
- 'body_css_classes' *list* - list of classes to add to the body tag
Output:
- HTML code of the page headers
"""
ctx = dict(ln=CFG_SITE_LANG, headertitle="",
description="", keywords="", userinfobox="",
useractivities_menu="", adminactivities_menu="",
navtrailbox="", pageheaderadd="", uid=0,
secure_page_p=0, navmenuid="admin", metaheaderadd="",
rssurl=CFG_SITE_URL+"/rss", body_css_classes=None)
ctx.update(kwargs)
if ctx['body_css_classes'] is None:
ctx['body_css_classes'] = [ctx.get('navmenuid', '')]
else:
ctx['body_css_classes'].append([ctx.get('navmenuid', '')])
return render_template_to_string(
"legacy_page.html",
no_pagebody=True,
no_pagefooter=True,
**ctx
).encode('utf8')
def tmpl_pagefooter(self, req, **kwargs):
"""Creates a page footer
Parameters:
- 'ln' *string* - The language to display
- 'lastupdated' *string* - when the page was last updated
- 'pagefooteradd' *string* - additional page footer HTML code
Output:
- HTML code of the page headers
"""
ctx = dict(ln=CFG_SITE_LANG, lastupdated=None, pagefooteradd=None)
ctx.update(kwargs)
lastupdated = ctx.get('lastupdated')
if lastupdated and lastupdated != '$Date$':
if lastupdated.startswith("$Date: ") or lastupdated.startswith("$Id: "):
ctx['lastupdated'] = convert_datecvs_to_datestruct(lastupdated)
return render_template_to_string(
"legacy_page.html",
no_pagebody=True,
no_pageheader=True,
**ctx
).encode('utf8')
def tmpl_language_selection_box(self, req, language=CFG_SITE_LANG):
"""Take URLARGS and LANGUAGE and return textual language
selection box for the given page.
Parameters:
- 'req' - The mod_python request object
- 'language' *string* - The selected language
"""
# load the right message language
_ = gettext_set_language(language)
# Work on a copy in order not to bork the arguments of the caller
argd = {}
if req and req.args:
argd.update(cgi.parse_qs(req.args))
parts = []
for (lang, lang_namelong) in language_list_long():
if lang == language:
parts.append('<span class="langinfo">%s</span>' % lang_namelong)
else:
# Update the 'ln' argument in the initial request
argd['ln'] = lang
if req and req.uri:
args = urllib.quote(req.uri, '/:?') + make_canonical_urlargd(argd, {})
else:
args = ""
parts.append(create_html_link(args,
{}, lang_namelong,
{'class': "langinfo"}))
if len(parts) > 1:
return _("This site is also available in the following languages:") + \
"<br />" + ' '.join(parts)
else:
## There is only one (or zero?) languages configured,
## so there so need to display language alternatives.
return ""
def tmpl_error_box(self, ln, title, verbose, req, errors):
"""Produces an error box.
Parameters:
- 'title' *string* - The title of the error box
- 'ln' *string* - The selected language
- 'verbose' *bool* - If lots of information should be displayed
- 'req' *object* - the request object
- 'errors' list of tuples (error_code, error_message)
"""
# load the right message language
_ = gettext_set_language(ln)
info_not_available = _("N/A")
if title is None:
if errors:
title = _("Error") + ': %s' % errors[0][1]
else:
title = _("Internal Error")
browser_s = _("Browser")
if req:
try:
if 'User-Agent' in req.headers_in:
browser_s += ': ' + req.headers_in['User-Agent']
else:
browser_s += ': ' + info_not_available
host_s = req.hostname
page_s = req.unparsed_uri
client_s = req.remote_ip
except: # FIXME: bad except
browser_s += ': ' + info_not_available
host_s = page_s = client_s = info_not_available
else:
browser_s += ': ' + info_not_available
host_s = page_s = client_s = info_not_available
error_s = ''
sys_error_s = ''
traceback_s = ''
if verbose >= 1:
if sys.exc_info()[0]:
sys_error_s = '\n' + _("System Error") + ': %s %s\n' % \
(sys.exc_info()[0], sys.exc_info()[1])
if errors:
errs = ''
for error_tuple in errors:
try:
errs += "%s%s : %s\n " % (' '*6, error_tuple[0],
error_tuple[1])
except:
errs += "%s%s\n" % (' '*6, error_tuple)
errs = errs[6:-2] # get rid of trainling ','
error_s = _("Error") + ': %s")' % errs + "\n"
else:
error_s = _("Error") + ': ' + info_not_available
if verbose >= 9:
traceback_s = '\n' + _("Traceback") + ': \n%s' % \
string.join(traceback.format_tb(sys.exc_info()[2]),
"\n")
out = """
<table class="errorbox">
<thead>
<tr>
<th class="errorboxheader">
<p> %(title)s %(sys1)s %(sys2)s</p>
</th>
</tr>
</thead>
<tbody>
<tr>
<td class="errorboxbody">
<p>%(contact)s</p>
<blockquote><pre>
URI: http://%(host)s%(page)s
%(time_label)s: %(time)s
%(browser)s
%(client_label)s: %(client)s
%(error)s%(sys_error)s%(traceback)s
</pre></blockquote>
</td>
</tr>
<tr>
<td>
<form action="%(siteurl)s/error/send" method="post">
%(send_error_label)s
<input class="adminbutton" type="submit" value="%(send_label)s" />
<input type="hidden" name="header" value="%(title)s %(sys1)s %(sys2)s" />
<input type="hidden" name="url" value="URI: http://%(host)s%(page)s" />
<input type="hidden" name="time" value="Time: %(time)s" />
<input type="hidden" name="browser" value="%(browser)s" />
<input type="hidden" name="client" value="Client: %(client)s" />
<input type="hidden" name="error" value="%(error)s" />
<input type="hidden" name="sys_error" value="%(sys_error)s" />
<input type="hidden" name="traceback" value="%(traceback)s" />
<input type="hidden" name="referer" value="%(referer)s" />
</form>
</td>
</tr>
</tbody>
</table>
""" % {
'title' : cgi.escape(title).replace('"', '"'),
'time_label': _("Time"),
'client_label': _("Client"),
'send_error_label': \
_("Please send an error report to the administrator."),
'send_label': _("Send error report"),
'sys1' : cgi.escape(str((sys.exc_info()[0] or ''))).replace('"', '"'),
'sys2' : cgi.escape(str((sys.exc_info()[1] or ''))).replace('"', '"'),
'contact' : \
_("Please contact %(x_name)s quoting the following information:",
x_name=('<a href="mailto:' + urllib.quote(CFG_SITE_SUPPORT_EMAIL) +'">' + CFG_SITE_SUPPORT_EMAIL + '</a>')),
'host' : cgi.escape(host_s),
'page' : cgi.escape(page_s),
'time' : time.strftime("%d/%b/%Y:%H:%M:%S %z"),
'browser' : cgi.escape(browser_s).replace('"', '"'),
'client' : cgi.escape(client_s).replace('"', '"'),
'error' : cgi.escape(error_s).replace('"', '"'),
'traceback' : cgi.escape(traceback_s).replace('"', '"'),
'sys_error' : cgi.escape(sys_error_s).replace('"', '"'),
'siteurl' : CFG_BASE_URL,
'referer' : page_s!=info_not_available and \
("http://" + host_s + page_s) or \
info_not_available
}
return out
def detailed_record_container_top(self, recid, tabs, ln=CFG_SITE_LANG,
show_similar_rec_p=True,
creationdate=None,
modificationdate=None, show_short_rec_p=True,
citationnum=-1, referencenum=-1, discussionnum=-1,
include_jquery = False, include_mathjax = False):
"""Prints the box displayed in detailed records pages, with tabs at the top.
Returns content as it is if the number of tabs for this record
is smaller than 2
Parameters:
@param recid: int - the id of the displayed record
@param tabs: ** - the tabs displayed at the top of the box.
@param ln: *string* - the language of the page in which the box is displayed
@param show_similar_rec_p: *bool* print 'similar records' link in the box
@param creationdate: *string* - the creation date of the displayed record
@param modificationdate: *string* - the last modification date of the displayed record
@param show_short_rec_p: *boolean* - prints a very short version of the record as reminder.
@param citationnum: show (this) number of citations in the citations tab
@param referencenum: show (this) number of references in the references tab
@param discussionnum: show (this) number of comments/reviews in the discussion tab
"""
from invenio.legacy.search_engine import \
get_restricted_collections_for_recid, \
is_record_in_any_collection
# load the right message language
_ = gettext_set_language(ln)
# Prepare restriction flag
restriction_flag = ''
if get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False):
restriction_flag = '<div class="restrictedflag"><span>%s</span></div>' % _("Restricted")
elif not is_record_in_any_collection(recid, recreate_cache_if_needed=False):
restriction_flag = '<div class="restrictedflag restrictedflag-pending"><span>%s</span></div>' % _("Restricted (Processing Record)")
# If no tabs, returns nothing (excepted if restricted)
if len(tabs) <= 1:
return restriction_flag
# Build the tabs at the top of the page
out_tabs = ''
if len(tabs) > 1:
first_tab = True
for (label, url, selected, enabled) in tabs:
addnum = ""
if (citationnum > -1) and url.count("/citation") == 1:
addnum = "(" + str(citationnum) + ")"
if (referencenum > -1) and url.count("/references") == 1:
addnum = "(" + str(referencenum) + ")"
if (discussionnum > -1) and url.count("/comments") == 1:
addnum = "(" + str(discussionnum) + ")"
css_class = []
if selected:
css_class.append('on')
if first_tab:
css_class.append('first')
first_tab = False
if not enabled:
css_class.append('disabled')
css_class = ' class="%s"' % ' '.join(css_class)
if not enabled:
out_tabs += '<li%(class)s><a>%(label)s %(addnum)s</a></li>' % \
{'class':css_class,
'label':label,
'addnum':addnum}
else:
out_tabs += '<li%(class)s><a href="%(url)s">%(label)s %(addnum)s </a></li>' % \
{'class':css_class,
'url':url,
'label':label,
'addnum':addnum}
if out_tabs != '':
out_tabs = ''' <div class="detailedrecordtabs">
<div>
<ul class="detailedrecordtabs">%s</ul>
<div id="tabsSpacer" style="clear:both;height:0px"> </div></div>
</div>''' % out_tabs
# Add the clip icon and the brief record reminder if necessary
record_brief = ''
if show_short_rec_p:
record_brief = format_record(recID=recid, of='hs', ln=ln)
record_brief = '''<div id="detailedrecordshortreminder">
<div id="clip"> </div>
<div id="HB">
%(record_brief)s
</div>
</div>
<div style="clear:both;height:1px"> </div>
''' % {'record_brief': record_brief}
additional_scripts = ""
if include_jquery:
additional_scripts += """<script type="text/javascript" src="%s/js/jquery.min.js">' \
'</script>\n""" % (CFG_BASE_URL, )
if include_mathjax:
additional_scripts += get_mathjax_header()
# Print the content
out = """
%(additional_scripts)s<div class="detailedrecordbox">
%(tabs)s
<div class="detailedrecordboxcontent">
<div class="top-left-folded"></div>
<div class="top-right-folded"></div>
<div class="inside">
<!--<div style="height:0.1em;"> </div>
<p class="notopgap"> </p>-->
%(record_brief)s
""" % {'additional_scripts': additional_scripts,
'tabs':out_tabs,
'record_brief':record_brief}
out = restriction_flag + out
return out
def detailed_record_container_bottom(self, recid, tabs, ln=CFG_SITE_LANG,
show_similar_rec_p=True,
creationdate=None,
modificationdate=None, show_short_rec_p=True):
"""Prints the box displayed in detailed records pages, with tabs at the top.
Returns content as it is if the number of tabs for this record
is smaller than 2
Parameters:
- recid *int* - the id of the displayed record
- tabs ** - the tabs displayed at the top of the box.
- ln *string* - the language of the page in which the box is displayed
- show_similar_rec_p *bool* print 'similar records' link in the box
- creationdate *string* - the creation date of the displayed record
- modificationdate *string* - the last modification date of the displayed record
- show_short_rec_p *boolean* - prints a very short version of the record as reminder.
"""
# If no tabs, returns nothing
if len(tabs) <= 1:
return ''
# load the right message language
_ = gettext_set_language(ln)
similar = ""
if show_similar_rec_p and not CFG_INSPIRE_SITE:
similar = create_html_link(
websearch_templates.build_search_url(p='recid:%d' % \
recid,
rm='wrd',
ln=ln),
{}, _("Similar records"),{'class': "moreinfo"})
out = """
<div class="bottom-left-folded">%(dates)s</div>
<div class="bottom-right-folded" style="text-align:right;padding-bottom:2px;">
<span class="moreinfo" style="margin-right:10px;">%(similar)s</span></div>
</div>
</div>
</div>
<br/>
""" % {'similar' : similar,
'dates' : creationdate and '<div class="recordlastmodifiedbox" style="position:relative;margin-left:1px"> %(dates)s</div>' % {
'dates': _("Record created %(x_date_creation)s, last modified %(x_date_modification)s",
x_date_creation=creationdate,
x_date_modification=modificationdate),
} or ''
}
return out
def detailed_record_mini_panel(self, recid, ln=CFG_SITE_LANG,
format='hd',
files='',
reviews='',
actions=''):
"""Displays the actions dock at the bottom of the detailed record
pages.
Parameters:
- recid *int* - the id of the displayed record
- ln *string* - interface language code
- format *string* - the format used to display the record
- files *string* - the small panel representing the attached files
- reviews *string* - the small panel representing the reviews
- actions *string* - the small panel representing the possible user's action
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<br />
<div class="detailedrecordminipanel">
<div class="top-left"></div><div class="top-right"></div>
<div class="inside">
<div id="detailedrecordminipanelfile" style="width:33%%;float:left;text-align:center;margin-top:0">
%(files)s
</div>
<div id="detailedrecordminipanelreview" style="width:30%%;float:left;text-align:center">
%(reviews)s
</div>
<div id="detailedrecordminipanelactions" style="width:36%%;float:right;text-align:right;">
%(actions)s
</div>
<div style="clear:both;margin-bottom: 0;"></div>
</div>
<div class="bottom-left"></div><div class="bottom-right"></div>
</div>
""" % {
'siteurl': CFG_BASE_URL,
'ln':ln,
'recid':recid,
'files': files,
'reviews':reviews,
'actions': actions,
}
return out
def tmpl_error_page(self, ln=CFG_SITE_LANG, status="", admin_was_alerted=True):
"""
Display an error page.
- status *string* - the HTTP status.
"""
_ = gettext_set_language(ln)
out = """
<p>%(message)s</p>
<p>%(alerted)s</p>
<p>%(doubts)s</p>""" % {
'status' : status,
'message' : _("The server encountered an error while dealing with your request."),
'alerted' : admin_was_alerted and _("The system administrators have been alerted.") or '',
'doubts' : _("In case of doubt, please contact %(x_admin_email)s.",
x_admin_email='<a href="mailto:%(admin)s">%(admin)s</a>' % {'admin' : CFG_SITE_SUPPORT_EMAIL})
}
return out
def tmpl_warning_message(self, ln, msg):
"""
Produces a warning message for the specified text
Parameters:
- 'ln' *string* - The language to display the interface in
- 'msg' *string* - The message to display
"""
# load the right message language
_ = gettext_set_language(ln)
return """<center><font color="red">%s</font></center>""" % msg
def tmpl_write_warning(self, msg, type='', prologue='', epilogue=''):
"""
Returns formatted warning message.
Parameters:
- 'msg' *string* - The message string
- 'type' *string* - the warning type
- 'prologue' *string* - HTML code to display before the warning
- 'epilogue' *string* - HTML code to display after the warning
"""
out = '\n%s<span class="quicknote">' % (prologue)
if type:
out += '%s: ' % type
out += '%s</span>%s' % (msg, epilogue)
return out
| gpl-2.0 |
davgibbs/django | django/forms/widgets.py | 184 | 37166 | """
HTML Widget classes
"""
from __future__ import unicode_literals
import copy
import datetime
import re
from itertools import chain
from django.conf import settings
from django.forms.utils import flatatt, to_current_timezone
from django.utils import datetime_safe, formats, six
from django.utils.datastructures import MultiValueDict
from django.utils.dates import MONTHS
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import get_format
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.translation import ugettext_lazy
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
@html_safe
@python_2_unicode_compatible
class Media(object):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain(*[[
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet" />',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media])
def absolute_path(self, path, prefix=None):
if path.startswith(('http://', 'https://', '/')):
return path
if prefix is None:
if settings.STATIC_URL is None:
# backwards compatibility
prefix = settings.MEDIA_URL
else:
prefix = settings.STATIC_URL
return urljoin(prefix, path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = (super(MediaDefiningClass, mcs)
.__new__(mcs, name, bases, attrs))
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@html_safe
@python_2_unicode_compatible
class SubWidget(object):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
class Widget(six.with_metaclass(MediaDefiningClass)):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None, choices=()):
"""
Yields all "subwidgets" of this widget. Used only by RadioSelect to
allow template access to individual <input type="radio"> buttons.
Arguments are the same as for render().
"""
yield SubWidget(self, name, value, attrs, choices)
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError('subclasses of Widget must provide a render() method')
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name)
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def _format_value(self, value):
if self.is_localized:
return formats.localize_input(value)
return value
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(self._format_value(value))
return format_html('<input{} />', flatatt(final_attrs))
class TextInput(Input):
input_type = 'text'
def __init__(self, attrs=None):
if attrs is not None:
self.input_type = attrs.pop('type', self.input_type)
super(TextInput, self).__init__(attrs)
class NumberInput(TextInput):
input_type = 'number'
class EmailInput(TextInput):
input_type = 'email'
class URLInput(TextInput):
input_type = 'url'
class PasswordInput(TextInput):
input_type = 'password'
def __init__(self, attrs=None, render_value=False):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value:
value = None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
super(MultipleHiddenInput, self).__init__(attrs)
# choices can be any iterable
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
id_ = final_attrs.get('id')
inputs = []
for i, v in enumerate(value):
input_attrs = dict(value=force_text(v), **final_attrs)
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
input_attrs['id'] = '%s_%s' % (id_, i)
inputs.append(format_html('<input{} />', flatatt(input_attrs)))
return mark_safe('\n'.join(inputs))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name)
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
def render(self, name, value, attrs=None):
return super(FileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = (
'%(initial_text)s: <a href="%(initial_url)s">%(initial)s</a> '
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
template_with_clear = '%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
# hasattr() masks exceptions on Python 2.
if six.PY2:
try:
getattr(value, 'url')
except AttributeError:
return False
else:
return bool(value)
return bool(value and hasattr(value, 'url'))
def get_template_substitution_values(self, value):
"""
Return value-related substitutions.
"""
return {
'initial': conditional_escape(value),
'initial_url': conditional_escape(value.url),
}
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = '%(input)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
if self.is_initial(value):
template = self.template_with_initial
substitutions.update(self.get_template_substitution_values(value))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class Textarea(Widget):
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super(Textarea, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
return format_html('<textarea{}>\r\n{}</textarea>',
flatatt(final_attrs),
force_text(value))
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super(DateTimeBaseInput, self).__init__(attrs)
self.format = format if format else None
def _format_value(self, value):
return formats.localize_input(value,
self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=None):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
return format_html('<input{} />', flatatt(final_attrs))
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, six.string_types):
value = values.get(value.lower(), value)
return bool(value)
class Select(Widget):
allow_multiple_selected = False
def __init__(self, attrs=None, choices=()):
super(Select, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select{}>', flatatt(final_attrs))]
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html('<option value="{}"{}>{}</option>',
option_value,
selected_html,
force_text(option_label))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(format_html('<optgroup label="{}">', force_text(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
output.append('</optgroup>')
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (('1', ugettext_lazy('Unknown')),
('2', ugettext_lazy('Yes')),
('3', ugettext_lazy('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
value = '1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{}>', flatatt(final_attrs))]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name)
@html_safe
@python_2_unicode_compatible
class ChoiceInput(SubWidget):
"""
An object used by ChoiceFieldRenderer that represents a single
<input type='$input_type'>.
"""
input_type = None # Subclasses must define this
def __init__(self, name, value, attrs, choice, index):
self.name = name
self.value = value
self.attrs = attrs
self.choice_value = force_text(choice[0])
self.choice_label = force_text(choice[1])
self.index = index
if 'id' in self.attrs:
self.attrs['id'] += "_%d" % self.index
def __str__(self):
return self.render()
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
return format_html(
'<label{}>{} {}</label>', label_for, self.tag(attrs), self.choice_label
)
def is_checked(self):
return self.value == self.choice_value
def tag(self, attrs=None):
attrs = attrs or self.attrs
final_attrs = dict(attrs, type=self.input_type, name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return format_html('<input{} />', flatatt(final_attrs))
@property
def id_for_label(self):
return self.attrs.get('id', '')
class RadioChoiceInput(ChoiceInput):
input_type = 'radio'
def __init__(self, *args, **kwargs):
super(RadioChoiceInput, self).__init__(*args, **kwargs)
self.value = force_text(self.value)
class CheckboxChoiceInput(ChoiceInput):
input_type = 'checkbox'
def __init__(self, *args, **kwargs):
super(CheckboxChoiceInput, self).__init__(*args, **kwargs)
self.value = set(force_text(v) for v in self.value)
def is_checked(self):
return self.choice_value in self.value
@html_safe
@python_2_unicode_compatible
class ChoiceFieldRenderer(object):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
choice_input_class = None
outer_html = '<ul{id_attr}>{content}</ul>'
inner_html = '<li>{choice_value}{sub_widgets}</li>'
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propagate
return self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""
Outputs a <ul> for this set of choice fields.
If an id was given to the field, it is applied to the <ul> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id')
output = []
for i, choice in enumerate(self.choices):
choice_value, choice_label = choice
if isinstance(choice_label, (tuple, list)):
attrs_plus = self.attrs.copy()
if id_:
attrs_plus['id'] += '_{}'.format(i)
sub_ul_renderer = self.__class__(
name=self.name,
value=self.value,
attrs=attrs_plus,
choices=choice_label,
)
sub_ul_renderer.choice_input_class = self.choice_input_class
output.append(format_html(self.inner_html, choice_value=choice_value,
sub_widgets=sub_ul_renderer.render()))
else:
w = self.choice_input_class(self.name, self.value,
self.attrs.copy(), choice, i)
output.append(format_html(self.inner_html,
choice_value=force_text(w), sub_widgets=''))
return format_html(self.outer_html,
id_attr=format_html(' id="{}"', id_) if id_ else '',
content=mark_safe('\n'.join(output)))
class RadioFieldRenderer(ChoiceFieldRenderer):
choice_input_class = RadioChoiceInput
class CheckboxFieldRenderer(ChoiceFieldRenderer):
choice_input_class = CheckboxChoiceInput
class RendererMixin(object):
renderer = None # subclasses must define this
_empty_value = None
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RendererMixin, self).__init__(*args, **kwargs)
def subwidgets(self, name, value, attrs=None, choices=()):
for widget in self.get_renderer(name, value, attrs, choices):
yield widget
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None:
value = self._empty_value
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# Widgets using this RendererMixin are made of a collection of
# subwidgets, each with their own <label>, and distinct ID.
# The IDs are made distinct by y "_X" suffix, where X is the zero-based
# index of the choice field. Thus, the label for the main widget should
# reference the first subwidget, hence the "_0" suffix.
if id_:
id_ += '_0'
return id_
class RadioSelect(RendererMixin, Select):
renderer = RadioFieldRenderer
_empty_value = ''
class CheckboxSelectMultiple(RendererMixin, SelectMultiple):
renderer = CheckboxFieldRenderer
_empty_value = []
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method is different than other widgets', because it has to
figure out how to split a single value for display in multiple widgets.
The ``value`` argument can be one of two things:
* A list.
* A normal value (e.g., a string) that has been "compressed" from
a list of values.
In the second case -- i.e., if the value is NOT a list -- render() will
first "decompress" the value into a list before rendering it. It does so by
calling the decompress() method, which MultiWidget subclasses must
implement. This method takes a single "compressed" value and returns a
list.
When render() does its HTML rendering, each value in the list is rendered
with the corresponding widget -- the first value is rendered in the first
widget, the second value is rendered in the second widget, etc.
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns a string of HTML that formats them any way you'd like.
You'll probably want to use this class with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super(MultiWidget, self).__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
This hook allows you to format the HTML design of the widgets, if
needed.
"""
return ''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(MultiWidget, self).__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
def __init__(self, attrs=None, date_format=None, time_format=None):
widgets = (DateInput(attrs=attrs, format=date_format),
TimeInput(attrs=attrs, format=time_format))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
def __init__(self, attrs=None, date_format=None, time_format=None):
super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
select_widget = Select
date_re = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = (0, empty_label[0])
self.month_none_value = (0, empty_label[1])
self.day_none_value = (0, empty_label[2])
else:
if empty_label is not None:
self.none_value = (0, empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, six.string_types):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
v = datetime.datetime.strptime(force_str(value), input_format)
year_val, month_val, day_val = v.year, v.month, v.day
except ValueError:
pass
if year_val is None:
match = self.date_re.match(value)
if match:
year_val, month_val, day_val = [int(val) for val in match.groups()]
html = {}
choices = [(i, i) for i in self.years]
html['year'] = self.create_select(name, self.year_field, value, year_val, choices, self.year_none_value)
choices = list(self.months.items())
html['month'] = self.create_select(name, self.month_field, value, month_val, choices, self.month_none_value)
choices = [(i, i) for i in range(1, 32)]
html['day'] = self.create_select(name, self.day_field, value, day_val, choices, self.day_none_value)
output = []
for field in self._parse_date_fmt():
output.append(html[field])
return mark_safe('\n'.join(output))
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name)
def create_select(self, name, field, value, val, choices, none_value):
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
if not self.is_required:
choices.insert(0, none_value)
local_attrs = self.build_attrs(id=field % id_)
s = self.select_widget(choices=choices)
select_html = s.render(field % name, val, local_attrs)
return select_html
| bsd-3-clause |
project-magpie/enigma2-openpli | lib/python/Components/PluginList.py | 6 | 1926 | from MenuList import MenuList
from Tools.Directories import resolveFilename, SCOPE_SKIN_IMAGE
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from enigma import eListboxPythonMultiContent, gFont
from Tools.LoadPixmap import LoadPixmap
def PluginEntryComponent(plugin, width=440):
if plugin.icon is None:
png = LoadPixmap(resolveFilename(SCOPE_SKIN_IMAGE, "skin_default/icons/plugin.png"))
else:
png = plugin.icon
return [
plugin,
MultiContentEntryText(pos=(120, 5), size=(width-120, 25), font=0, text=plugin.name),
MultiContentEntryText(pos=(120, 26), size=(width-120, 17), font=1, text=plugin.description),
MultiContentEntryPixmapAlphaTest(pos=(10, 5), size=(100, 40), png = png)
]
def PluginCategoryComponent(name, png, width=440):
return [
name,
MultiContentEntryText(pos=(80, 5), size=(width-80, 25), font=0, text=name),
MultiContentEntryPixmapAlphaTest(pos=(10, 0), size=(60, 50), png = png)
]
def PluginDownloadComponent(plugin, name, version=None, width=440):
if plugin.icon is None:
png = LoadPixmap(resolveFilename(SCOPE_SKIN_IMAGE, "skin_default/icons/plugin.png"))
else:
png = plugin.icon
if version:
if "+git" in version:
# remove git "hash"
version = "+".join(version.split("+")[:2])
elif version.startswith('experimental-'):
version = version[13:]
name += " (" + version + ")"
return [
plugin,
MultiContentEntryText(pos=(80, 5), size=(width-80, 25), font=0, text=name),
MultiContentEntryText(pos=(80, 26), size=(width-80, 17), font=1, text=plugin.description),
MultiContentEntryPixmapAlphaTest(pos=(10, 0), size=(60, 50), png = png)
]
class PluginList(MenuList):
def __init__(self, list, enableWrapAround=True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", 20))
self.l.setFont(1, gFont("Regular", 14))
self.l.setItemHeight(50)
| gpl-2.0 |
paolodedios/shift-detect | build.py | 1 | 6581 | # -*- c-file-style: "sourcery" -*-
#
# Use and distribution of this software and its source code is governed
# by the terms and conditions defined in the "LICENSE" file that is part
# of this source code package.
#
from pybuilder.core import use_bldsup
from pybuilder.core import use_plugin
from pybuilder.core import init
from pybuilder.core import task
from pybuilder.core import Author
from pybuilder.utils import assert_can_execute
import glob
import os
import shutil
use_plugin("python.core")
use_plugin("python.flake8")
use_plugin("python.unittest")
use_plugin("python.integrationtest")
use_plugin("python.install_dependencies")
# Import local build support plugins
use_bldsup(build_support_dir="support/build")
use_plugin("copy_files")
use_plugin("clean_project")
use_plugin("distribute")
use_plugin("devpi")
use_plugin("exec")
# Declare default build phase tasks to execute
default_task = [ "clean_project", "analyze", "install_dependencies", "publish" ]
# Declare top level project properties
authors = [Author("Paolo de Dios", "[email protected]")]
name = "shift-detect"
url = "http://paolodedios.com"
summary = "Covariate shift detector."
version = "0.1.0"
license = "MPL"
@init
def set_properties(project) :
# Generate build and runtime dependency specs
project.set_property("dir_deps_requirements", "support/deps")
project.set_property("analyze_command", "support/deps/pip-compile-deps.sh {}".format(project.get_property("dir_deps_requirements")))
project.set_property("analyze_propagate_stdout", True)
project.set_property("analyze_propagate_stderr", True)
# Declare project build dependencies
project.build_depends_on_requirements("{}/requirements-build.txt".format(project.get_property("dir_deps_requirements")))
# Declare project runtime dependencies
project.depends_on_requirements("{}/requirements.txt".format(project.get_property("dir_deps_requirements")))
# Declare the location of all unit tests
project.set_property("dir_source_unittest_python", "src/test/unit/python")
project.set_property("unittest_module_glob", "*_tests")
project.set_property("unittest_test_method_prefix", "test")
# Declare the location of all integration tests
project.set_property("dir_source_integrationtest_python", "src/test/integration/python")
project.set_property("integrationtest_module_glob", "*_tests")
project.set_property("integrationtest_test_method_prefix", "test")
# Disable Teamcity output during normal builds. When the TEAMCITY_VERSION
# environment variable is set (by either Teamcity or a user), teamcity
# output will be generated automatically
project.set_property("teamcity_output", False)
# Specify unit and integration test artifacts that can be removed with the
# "clean_project" task
project.get_property("clean_project_files_glob").extend([
"{}/requirements-build.txt".format(project.get_property("dir_deps_requirements")),
"{}/requirements.txt".format(project.get_property("dir_deps_requirements")),
"{}/__pycache__".format(project.get_property("dir_source_unittest_python")),
"{}/*.pyc".format(project.get_property("dir_source_unittest_python")),
"{}/__pycache__".format(project.get_property("dir_source_integrationtest_python")),
"{}/*.pyc".format(project.get_property("dir_source_integrationtest_python"))
])
# Check sources during the analyze phase, but ignore certain PEP8 error codes.
# @see http://pep8.readthedocs.org/en/latest/intro.html#error-codes
project.set_property("flake8_ignore", "E201,E202,E203,E221,E272,E302,E303,E501")
project.set_property("flake8_verbose_output", True)
project.set_property("flake8_include_test_sources", True)
project.set_property("flake8_break_build", False)
# Copy files to the top level of the distribution staging directory
project.set_property("copy_root_files_target", "$dir_dist")
project.get_property("copy_root_files_glob").extend([
"LICENSE",
"README.rst",
"support/deps/requirements.txt",
"support/dist/setup.cfg",
"support/dist/tox.ini"
])
# Declare which copied resources will be packaged for installation via
# MAINIFEST.in
project.install_file(".", "LICENSE")
project.install_file(".", "README.rst")
project.install_file(".", "requirements.txt")
project.install_file(".", "tox.ini")
# Package all scripts in the bin directory
project.set_property("dir_dist_scripts", "bin")
# Add PyPi package metdata data classifiers.
#
# Note: Invoking "setup.py release" will typically release all code to the
# wild. In order to ensure that this doesn't accidentally happen during the
# publish phase of the build, the "Private" classifier property is specified
# by default. As a result the public PyPI service will reject this package
# but a private PyPi or DevPI server will accept it.
#
# For a complete classifier list, @see http://pypi.python.org/pypi?%3Aaction=list_classifiers
project.set_property("distutils_classifiers", [
"Private :: Do Not Upload",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: Other/Proprietary License"
"Operating System :: Unix",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Utilities"
])
# Force setup.py to generate and install a shell script for the entry point
project.set_property("distutils_console_scripts", [
"shift_detect = shift_detect.__main__:main"
])
# Extend the list of setup.py commands to be executed from sdist, bdist_dumb
project.get_property("distutils_commands").extend([ "bdist_egg", "bdist_wheel" ])
# Set user name and destination index for local devpi/PyPi central
# repository
project.set_property("devpi_user", "root")
project.set_property("devpi_developer_index", "dev")
project.set_property("devpi_staging_index" , "staging")
project.set_property("devpi_release_index" , "release")
| mpl-2.0 |
zafar-hussain/or-tools | examples/python/traffic_lights.py | 32 | 3902 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Traffic lights problem in Google CP Solver.
CSPLib problem 16
http://www.cs.st-andrews.ac.uk/~ianm/CSPLib/prob/prob016/index.html
'''
Specification:
Consider a four way traffic junction with eight traffic lights. Four of the
traffic
lights are for the vehicles and can be represented by the variables V1 to V4
with domains
{r,ry,g,y} (for red, red-yellow, green and yellow). The other four traffic
lights are
for the pedestrians and can be represented by the variables P1 to P4 with
domains {r,g}.
The constraints on these variables can be modelled by quaternary constraints
on
(Vi, Pi, Vj, Pj ) for 1<=i<=4, j=(1+i)mod 4 which allow just the tuples
{(r,r,g,g), (ry,r,y,r), (g,g,r,r), (y,r,ry,r)}.
It would be interesting to consider other types of junction (e.g. five roads
intersecting) as well as modelling the evolution over time of the traffic
light sequence.
...
Results
Only 2^2 out of the 2^12 possible assignments are solutions.
(V1,P1,V2,P2,V3,P3,V4,P4) =
{(r,r,g,g,r,r,g,g), (ry,r,y,r,ry,r,y,r), (g,g,r,r,g,g,r,r),
(y,r,ry,r,y,r,ry,r)}
[(1,1,3,3,1,1,3,3), ( 2,1,4,1, 2,1,4,1), (3,3,1,1,3,3,1,1), (4,1, 2,1,4,1,
2,1)}
The problem has relative few constraints, but each is very tight. Local
propagation
appears to be rather ineffective on this problem.
'''
Note: In this model we use only the constraint solver.AllowedAssignments().
Compare with these models:
* MiniZinc: http://www.hakank.org/minizinc/traffic_lights.mzn
* Comet : http://www.hakank.org/comet/traffic_lights.co
* ECLiPSe : http://www.hakank.org/eclipse/traffic_lights.ecl
* Gecode : http://hakank.org/gecode/traffic_lights.cpp
* SICStus : http://hakank.org/sicstus/traffic_lights.pl
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import string
import sys
from ortools.constraint_solver import pywrapcp
def main(base=10, start=1, len1=1, len2=4):
# Create the solver.
solver = pywrapcp.Solver("Traffic lights")
#
# data
#
n = 4
r, ry, g, y = range(n)
lights = ["r", "ry", "g", "y"]
# The allowed combinations
allowed = []
allowed.extend([(r, r, g, g),
(ry, r, y, r),
(g, g, r, r),
(y, r, ry, r)])
#
# declare variables
#
V = [solver.IntVar(0, n - 1, "V[%i]" % i) for i in range(n)]
P = [solver.IntVar(0, n - 1, "P[%i]" % i) for i in range(n)]
#
# constraints
#
for i in range(n):
for j in range(n):
if j == (1 + i) % n:
solver.Add(solver.AllowedAssignments((V[i], P[i], V[j], P[j]), allowed))
#
# Search and result
#
db = solver.Phase(V + P,
solver.INT_VAR_SIMPLE,
solver.INT_VALUE_DEFAULT)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
for i in range(n):
print "%+2s %+2s" % (lights[V[i].Value()], lights[P[i].Value()]),
print
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
print
if __name__ == "__main__":
main()
| apache-2.0 |
isaac-playground/git-python | run_git_python.py | 1 | 1941 | """
"""
import os.path
import git
REPO_ROOT = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.join(REPO_ROOT, 'data')
CURRENT_EXECUTION_VERSION = 16
NEW_AND_MODIFIED = '.'
REMOVED = '-A'
COMMIT_MSG='-m "Automated commit {index}. Running through."'.format(index=CURRENT_EXECUTION_VERSION)
VERSION_TAG = 'v1.0.{build}'.format(build=CURRENT_EXECUTION_VERSION)
print("Repo root: " + REPO_ROOT)
print("Data directory: " + DATA_DIR)
repo = git.Repo(REPO_ROOT)
git_driver = repo.git
# Making some changes that we can commit.
new_file = os.path.join(DATA_DIR, "created {number}.txt".format(number=CURRENT_EXECUTION_VERSION))
old_file = os.path.join(DATA_DIR, "created {number}.txt".format(number=CURRENT_EXECUTION_VERSION-1))
modifiable_file = os.path.join(DATA_DIR, "modifiable.txt".format(number=CURRENT_EXECUTION_VERSION-1))
with open(new_file, mode='w') as fout:
contents = "Created file {number}".format(number=CURRENT_EXECUTION_VERSION)
fout.write(contents)
with open(modifiable_file, mode='a') as fout:
contents = "Modified {number} times.\n".format(number=CURRENT_EXECUTION_VERSION)
fout.write(contents)
if os.path.exists(old_file):
print("Removing file: " + old_file)
os.remove(old_file)
print("Repo is dirty: " + repr(repo.is_dirty()))
# Adding new and modified, and deleting removed files from the repo.
print('Adding new and modified....')
git_driver.add(NEW_AND_MODIFIED)
print('Removing deleted from tree....')
git_driver.add(REMOVED)
print(git_driver.status())
print('Committing changes....')
print(git_driver.commit(COMMIT_MSG))
# Let's tag this version if the tag doesn't exist and push it preventing override.
if VERSION_TAG not in repo.tags:
print('Tagging repository with: {tag}....'.format(tag=VERSION_TAG))
repo.create_tag(VERSION_TAG, message='Annotated tag {version}'.format(version=VERSION_TAG))
print('Pushing changes....')
git_driver.push('--follow-tags')
| mit |
ahamilton/vigil | eris/webserver.py | 1 | 2593 | #!/usr/bin/env python3.8
# Copyright (C) 2018-2019 Andrew Hamilton. All rights reserved.
# Licensed under the Artistic License 2.0.
import gzip
import http.server
import os
import sys
import pickle
import eris.fill3 as fill3
import eris.tools as tools
USAGE = """Usage:
eris-webserver <directory>
Example:
eris-webserver my_project
"""
def make_page(body_html, title):
return (f"<html><head><title>{title}</title></head><body><style>body "
f"{{ background-color: black; }} </style>{body_html}</body></html>"
).encode("utf-8")
class Webserver(http.server.BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
self._set_headers()
if self.path == "/":
page = summary_page
elif "/" in self.path[1:]:
path, tool = os.path.split(self.path[1:])
result = index[(path, tool)]
body = fill3.appearance_as_html(
fill3.Border(result).appearance_min())
page = make_page(body, f"{tool} of {path}")
else:
return
self.wfile.write(page)
def do_HEAD(self):
self._set_headers()
def do_POST(self):
self._set_headers()
self.wfile.write("posted".encode("utf-8"))
def make_summary_page(project_name, summary):
summary_html, summary_styles = summary.as_html()
body_html = ("\n".join(style.as_html() for style in summary_styles)
+ "\n" + summary_html)
return make_page(body_html, "Summary of " + project_name)
def run(server_class=http.server.HTTPServer, handler_class=Webserver, port=80):
server_address = ("", port)
httpd = server_class(server_address, handler_class)
print("Starting httpd…")
httpd.serve_forever()
def main():
global summary_page, index
if len(sys.argv) == 1:
print(USAGE)
sys.exit(1)
project_path = os.path.abspath(sys.argv[1])
os.chdir(project_path)
project_name = os.path.basename(project_path)
pickle_path = os.path.join(project_path, tools.CACHE_PATH,
"summary.pickle")
with gzip.open(pickle_path, "rb") as file_:
screen = pickle.load(file_)
summary_page = make_summary_page(project_name, screen._summary)
index = {}
for row in screen._summary._entries:
for result in row:
index[(result.path[2:], result.tool.__name__)] = result.result
run()
if __name__ == "__main__":
main()
| artistic-2.0 |
yd0str/infernal-twin | build/pillow/PIL/FliImagePlugin.py | 26 | 4782 | #
# The Python Imaging Library.
# $Id$
#
# FLI/FLC file handling.
#
# History:
# 95-09-01 fl Created
# 97-01-03 fl Fixed parser, setup decoder tile
# 98-07-15 fl Renamed offset attribute to avoid name clash
#
# Copyright (c) Secret Labs AB 1997-98.
# Copyright (c) Fredrik Lundh 1995-97.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.2"
from PIL import Image, ImageFile, ImagePalette, _binary
i8 = _binary.i8
i16 = _binary.i16le
i32 = _binary.i32le
o8 = _binary.o8
#
# decoder
def _accept(prefix):
return len(prefix) >= 6 and i16(prefix[4:6]) in [0xAF11, 0xAF12]
##
# Image plugin for the FLI/FLC animation format. Use the <b>seek</b>
# method to load individual frames.
class FliImageFile(ImageFile.ImageFile):
format = "FLI"
format_description = "Autodesk FLI/FLC Animation"
def _open(self):
# HEAD
s = self.fp.read(128)
magic = i16(s[4:6])
if not (magic in [0xAF11, 0xAF12] and
i16(s[14:16]) in [0, 3] and # flags
s[20:22] == b"\x00\x00"): # reserved
raise SyntaxError("not an FLI/FLC file")
# image characteristics
self.mode = "P"
self.size = i16(s[8:10]), i16(s[10:12])
# animation speed
duration = i32(s[16:20])
if magic == 0xAF11:
duration = (duration * 1000) / 70
self.info["duration"] = duration
# look for palette
palette = [(a, a, a) for a in range(256)]
s = self.fp.read(16)
self.__offset = 128
if i16(s[4:6]) == 0xF100:
# prefix chunk; ignore it
self.__offset = self.__offset + i32(s)
s = self.fp.read(16)
if i16(s[4:6]) == 0xF1FA:
# look for palette chunk
s = self.fp.read(6)
if i16(s[4:6]) == 11:
self._palette(palette, 2)
elif i16(s[4:6]) == 4:
self._palette(palette, 0)
palette = [o8(r)+o8(g)+o8(b) for (r, g, b) in palette]
self.palette = ImagePalette.raw("RGB", b"".join(palette))
# set things up to decode first frame
self.__frame = -1
self.__fp = self.fp
self.__rewind = self.fp.tell()
self._n_frames = None
self._is_animated = None
self.seek(0)
def _palette(self, palette, shift):
# load palette
i = 0
for e in range(i16(self.fp.read(2))):
s = self.fp.read(2)
i = i + i8(s[0])
n = i8(s[1])
if n == 0:
n = 256
s = self.fp.read(n * 3)
for n in range(0, len(s), 3):
r = i8(s[n]) << shift
g = i8(s[n+1]) << shift
b = i8(s[n+2]) << shift
palette[i] = (r, g, b)
i += 1
@property
def n_frames(self):
if self._n_frames is None:
current = self.tell()
try:
while True:
self.seek(self.tell() + 1)
except EOFError:
self._n_frames = self.tell() + 1
self.seek(current)
return self._n_frames
@property
def is_animated(self):
if self._is_animated is None:
current = self.tell()
try:
self.seek(1)
self._is_animated = True
except EOFError:
self._is_animated = False
self.seek(current)
return self._is_animated
def seek(self, frame):
if frame == self.__frame:
return
if frame < self.__frame:
self._seek(0)
last_frame = self.__frame
for f in range(self.__frame + 1, frame + 1):
try:
self._seek(f)
except EOFError:
self.seek(last_frame)
raise EOFError("no more images in FLI file")
def _seek(self, frame):
if frame == 0:
self.__frame = -1
self.__fp.seek(self.__rewind)
self.__offset = 128
if frame != self.__frame + 1:
raise ValueError("cannot seek to frame %d" % frame)
self.__frame = frame
# move to next frame
self.fp = self.__fp
self.fp.seek(self.__offset)
s = self.fp.read(4)
if not s:
raise EOFError
framesize = i32(s)
self.decodermaxblock = framesize
self.tile = [("fli", (0, 0)+self.size, self.__offset, None)]
self.__offset += framesize
def tell(self):
return self.__frame
#
# registry
Image.register_open("FLI", FliImageFile, _accept)
Image.register_extension("FLI", ".fli")
Image.register_extension("FLI", ".flc")
| gpl-3.0 |
imzers/gsutil-with-php | gslib/tests/test_defacl.py | 22 | 8847 | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for the defacl command."""
from __future__ import absolute_import
import re
from gslib.cs_api_map import ApiSelector
import gslib.tests.testcase as case
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
PUBLIC_READ_JSON_ACL_TEXT = '"entity":"allUsers","role":"READER"'
@SkipForS3('S3 does not support default object ACLs.')
class TestDefacl(case.GsUtilIntegrationTestCase):
"""Integration tests for the defacl command."""
_defacl_ch_prefix = ['defacl', 'ch']
_defacl_get_prefix = ['defacl', 'get']
_defacl_set_prefix = ['defacl', 'set']
def _MakeScopeRegex(self, role, entity_type, email_address):
template_regex = (r'\{.*"entity":\s*"%s-%s".*"role":\s*"%s".*\}' %
(entity_type, email_address, role))
return re.compile(template_regex, flags=re.DOTALL)
def testChangeDefaultAcl(self):
"""Tests defacl ch."""
bucket = self.CreateBucket()
test_regex = self._MakeScopeRegex(
'OWNER', 'group', self.GROUP_TEST_ADDRESS)
test_regex2 = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
json_text = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':FC', suri(bucket)])
json_text2 = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text2, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ', suri(bucket)])
json_text3 = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text3, test_regex2)
stderr = self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':WRITE',
suri(bucket)],
return_stderr=True, expected_status=1)
self.assertIn('WRITER cannot be set as a default object ACL', stderr)
def testChangeDefaultAclEmpty(self):
"""Tests adding and removing an entry from an empty default object ACL."""
bucket = self.CreateBucket()
# First, clear out the default object ACL on the bucket.
self.RunGsUtil(self._defacl_set_prefix + ['private', suri(bucket)])
json_text = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
empty_regex = r'\[\]\s*'
self.assertRegexpMatches(json_text, empty_regex)
group_regex = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ', suri(bucket)])
json_text2 = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text2, group_regex)
if self.test_api == ApiSelector.JSON:
# TODO: Enable when JSON service respects creating a private (no entries)
# default object ACL via PATCH. For now, only supported in XML.
return
# After adding and removing a group, the default object ACL should be empty.
self.RunGsUtil(self._defacl_ch_prefix +
['-d', self.GROUP_TEST_ADDRESS, suri(bucket)])
json_text3 = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text3, empty_regex)
def testChangeMultipleBuckets(self):
"""Tests defacl ch on multiple buckets."""
bucket1 = self.CreateBucket()
bucket2 = self.CreateBucket()
test_regex = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket1)],
return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket2)],
return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ',
suri(bucket1), suri(bucket2)])
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket1)],
return_stdout=True)
self.assertRegexpMatches(json_text, test_regex)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket2)],
return_stdout=True)
self.assertRegexpMatches(json_text, test_regex)
def testChangeMultipleAcls(self):
"""Tests defacl ch with multiple ACL entries."""
bucket = self.CreateBucket()
test_regex_group = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
test_regex_user = self._MakeScopeRegex(
'OWNER', 'user', self.USER_TEST_ADDRESS)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket)],
return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex_group)
self.assertNotRegexpMatches(json_text, test_regex_user)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ',
'-u', self.USER_TEST_ADDRESS+':fc', suri(bucket)])
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket)],
return_stdout=True)
self.assertRegexpMatches(json_text, test_regex_group)
self.assertRegexpMatches(json_text, test_regex_user)
def testEmptyDefAcl(self):
bucket = self.CreateBucket()
self.RunGsUtil(self._defacl_set_prefix + ['private', suri(bucket)])
stdout = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket)],
return_stdout=True)
self.assertEquals(stdout.rstrip(), '[]')
self.RunGsUtil(self._defacl_ch_prefix +
['-u', self.USER_TEST_ADDRESS+':fc', suri(bucket)])
def testDeletePermissionsWithCh(self):
"""Tests removing permissions with defacl ch."""
bucket = self.CreateBucket()
test_regex = self._MakeScopeRegex(
'OWNER', 'user', self.USER_TEST_ADDRESS)
json_text = self.RunGsUtil(
self._defacl_get_prefix + [suri(bucket)], return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-u', self.USER_TEST_ADDRESS+':fc', suri(bucket)])
json_text = self.RunGsUtil(
self._defacl_get_prefix + [suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-d', self.USER_TEST_ADDRESS, suri(bucket)])
json_text = self.RunGsUtil(
self._defacl_get_prefix + [suri(bucket)], return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
def testTooFewArgumentsFails(self):
"""Tests calling defacl with insufficient number of arguments."""
# No arguments for get, but valid subcommand.
stderr = self.RunGsUtil(self._defacl_get_prefix, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for set, but valid subcommand.
stderr = self.RunGsUtil(self._defacl_set_prefix, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for ch, but valid subcommand.
stderr = self.RunGsUtil(self._defacl_ch_prefix, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# Neither arguments nor subcommand.
stderr = self.RunGsUtil(['defacl'], return_stderr=True, expected_status=1)
self.assertIn('command requires at least', stderr)
class TestDefaclOldAlias(TestDefacl):
_defacl_ch_prefix = ['chdefacl']
_defacl_get_prefix = ['getdefacl']
_defacl_set_prefix = ['setdefacl']
| apache-2.0 |
ddelemeny/calligra | 3rdparty/google-breakpad/src/tools/gyp/test/small/gyptest-small.py | 89 | 1405 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Runs small tests.
"""
import imp
import os
import sys
import unittest
import TestGyp
test = TestGyp.TestGyp()
# Add pylib to the import path (so tests can import their dependencies).
# This is consistant with the path.append done in the top file "gyp".
sys.path.append(os.path.join(test._cwd, 'pylib'))
# Add new test suites here.
files_to_test = [
'pylib/gyp/MSVSSettings_test.py',
'pylib/gyp/easy_xml_test.py',
'pylib/gyp/generator/msvs_test.py',
'pylib/gyp/generator/ninja_test.py',
'pylib/gyp/common_test.py',
]
# Collect all the suites from the above files.
suites = []
for filename in files_to_test:
# Carve the module name out of the path.
name = os.path.splitext(os.path.split(filename)[1])[0]
# Find the complete module path.
full_filename = os.path.join(test._cwd, filename)
# Load the module.
module = imp.load_source(name, full_filename)
# Add it to the list of test suites.
suites.append(unittest.defaultTestLoader.loadTestsFromModule(module))
# Create combined suite.
all_tests = unittest.TestSuite(suites)
# Run all the tests.
result = unittest.TextTestRunner(verbosity=2).run(all_tests)
if result.failures or result.errors:
test.fail_test()
test.pass_test()
| gpl-2.0 |
claws/txcosm | examples/subscribe.py | 1 | 3214 | #!/usr/bin/env python
"""
Subscribe to a feed or a datastream that is visible to the supplied Cosm user API key
To use this script you must create a text file containing your API key
and pass it to this script using the --keyfile argument as follows:
Subscribe for updates to a particular feed:
$ simple_subscribe.py --keyfile=/path/to/apikey/file --feed=XXX
Subscribe for updates to a particular datastream within a feed:
$ simple_subscribe.py --keyfile=path/to/apikey/file --feed=XXX --datastream=YYY
txcosm must be installed or visible on the PYTHONPATH.
"""
import logging
from optparse import OptionParser
import os
import sys
from twisted.internet import reactor
from txcosm.PAWSClient import PAWSClient
parser = OptionParser("")
parser.add_option("-k", "--keyfile", dest="keyfile", default=None, help="Path to file containing your Cosm API key")
parser.add_option("-f", "--feed", dest="feed", default=None, help="The feed to subscribe to")
parser.add_option("-d", "--datastream", dest="datastream", default=None, help="The datastream within the feed to subscribe to")
(options, args) = parser.parse_args()
#
# Set up callback handlers
#
def updateHandler(dataStructure):
"""
Handle a txcosm data structure object resulting from the receipt
of a subscription update message received from Cosm.
The data structure returned will vary depending on the particular
resource subscribed to.
If a datastream is specified the returned data structure will be
a txcosm.Datastream object. If just a feed is specified then the
returned data structure will be a txcosm.Environment object.
"""
logging.info("Subscription update message received:\n%s\n" % str(dataStructure))
def do_subscribe(connected, client, resource):
""" Subscribe to the specified resource if the connection is established """
if connected:
def handleSubscribeResponse(result):
token, response_code = result
print "Subscription token is: %s" % token
print "Subscribe response status: %s" % response_code
return result
d = client.subscribe(resource, updateHandler)
d.addCallback(handleSubscribeResponse)
else:
print "Connection failed"
reactor.callLater(0.1, reactor.stop)
return
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s : %(message)s")
# confirm keyfile is suppplied and valid
if options.keyfile is None:
print parser.get_usage()
sys.exit(1)
keyfile = os.path.expanduser(options.keyfile)
if not os.path.exists(keyfile):
print "Invalid API key file path: %s" % keyfile
sys.exit(1)
fd = open(keyfile, 'r')
key = fd.read().strip()
fd.close()
if options.feed is None:
print "No feed identifier specified"
print parser.get_usage()
sys.exit(1)
if options.datastream:
resource = "/feeds/%s/datastreams/%s" % (options.feed, options.datastream)
else:
resource = "/feeds/%s" % (options.feed)
client = PAWSClient(api_key=key)
d = client.connect()
d.addCallback(do_subscribe, client, resource)
reactor.run()
| mit |
followloda/PornGuys | FlaskServer/venv/Lib/site-packages/werkzeug/exceptions.py | 176 | 18733 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
# Because of bootstrapping reasons we need to manually patch ourselves
# onto our parent module.
import werkzeug
werkzeug.exceptions = sys.modules[__name__]
from werkzeug._internal import _get_environ
from werkzeug._compat import iteritems, integer_types, text_type, \
implements_to_string
from werkzeug.wrappers import Response
@implements_to_string
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
Exception.__init__(self)
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, *args, **kwargs):
cls.__init__(self, *args, **kwargs)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES.get(self.code, 'Unknown Error')
def get_description(self, environ=None):
"""Get the description."""
return u'<p>%s</p>' % escape(self.description)
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type((
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u'<title>%(code)s %(name)s</title>\n'
u'<h1>%(name)s</h1>\n'
u'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
})
def get_headers(self, environ=None):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return '%d: %s' % (self.code, self.name)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'The browser (or proxy) sent a request that this server could '
'not understand.'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extent this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class BadHost(BadRequest):
"""Raised if the submitted host is badly formatted.
.. versionadded:: 0.11.2
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'The requested URL is no longer available on this server and there '
'is no forwarding address. If you followed a link from a foreign '
'page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'A request with this method requires a valid <code>Content-'
'Length</code> header.'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'The precondition on the request for the URL failed positive '
'evaluation.'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'The data value transmitted exceeds the capacity limit.'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'The server does not support the media type transmitted in '
'the request.'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'This server is a teapot, not a coffee machine'
)
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
'The request was well-formed but was unable to be followed '
'due to semantic errors.'
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
'This request is required to be conditional; try using "If-Match" '
'or "If-Unmodified-Since".'
)
class TooManyRequests(HTTPException):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives responses, and
this request exceeds that rate. (The server may use any convenient method
to identify users and their request rates). The server may include a
"Retry-After" header to indicate how long the user should wait before
retrying.
"""
code = 429
description = (
'This user has exceeded an allotted request count. Try again later.'
)
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = (
'One or more header fields exceeds the maximum size.'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'The server does not support the action requested by the '
'browser.'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'The proxy server received an invalid response from an upstream '
'server.'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.'
)
class GatewayTimeout(HTTPException):
"""*504* `Gateway Timeout`
Status code you should return if a connection to an upstream server
times out.
"""
code = 504
description = (
'The connection to an upstream server timed out.'
)
class HTTPVersionNotSupported(HTTPException):
"""*505* `HTTP Version Not Supported`
The server does not support the HTTP protocol version used in the request.
"""
code = 505
description = (
'The server does not support the HTTP protocol version used in the '
'request.'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in iteritems(globals()):
try:
is_http_exception = issubclass(obj, HTTPException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.code is None:
continue
__all__.append(obj.__name__)
old_obj = default_exceptions.get(obj.code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
default_exceptions[obj.code] = obj
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
from werkzeug.http import HTTP_STATUS_CODES
| gpl-3.0 |
flit/cmdis | cmdis/formatter.py | 1 | 6910 | # Copyright (c) 2016-2019 Chris Reed
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .utilities import bytes_to_le16
from .registers import CORE_REGISTER_NAMES
from .helpers import SRType
class Operand(object):
def format(self, formatter):
raise NotImplemented()
class RegisterOperand(Operand):
def __init__(self, reg, wback=False):
self._reg = reg
self._wback = wback
def format(self, formatter):
result = CORE_REGISTER_NAMES[self._reg]
if self._wback:
result += "!"
return result
class ReglistOperand(Operand):
def __init__(self, reglist):
self._reglist = reglist
def format(self, formatter):
regs = []
startRange = -1
endRange = -1
def add_reg_range(regs):
if startRange != -1:
if startRange == endRange:
regs.append(CORE_REGISTER_NAMES[startRange])
else:
startReg = CORE_REGISTER_NAMES[startRange]
endReg = CORE_REGISTER_NAMES[endRange]
regs.append("%s-%s" % (startReg, endReg))
for n, b in enumerate(self._reglist):
if b:
if startRange == -1:
startRange = n
endRange = n
else:
add_reg_range(regs)
startRange = -1
endRange = -1
add_reg_range(regs)
return '{' + ','.join(regs) + '}'
class ImmediateOperand(Operand):
def __init__(self, imm, hideIfZero=False):
self._imm = imm
self._hideIfZero = hideIfZero
def format(self, formatter):
if self._imm == 0 and self._hideIfZero:
return None
if self._imm > 9:
comment = "0x%x" % (self._imm)
formatter.add_comment(comment)
return "#%d" % self._imm
class LabelOperand(Operand):
def __init__(self, offset):
self._offset = offset
def format(self, formatter):
# Add a comment with the absolute address of the label.
comment = "0x%x" % (formatter.instruction.address + 4 + self._offset)
formatter.add_comment(comment)
return ".%+d" % (self._offset + 4)
class ShiftRotateOperand(Operand):
OP_NAMES = ["None",
"LSL",
"LSR",
"ASR",
"ROR",
"RRX",]
def __init__(self, type, amount):
self._type = type
self._amount = amount
def format(self, formatter):
if self._type == SRType.SRType_None:
return None
return "%s #%d" % (self.OP_NAMES[self._type.value], self._amount)
class BarrierOperand(Operand):
def __init__(self, option):
self._option = option
def format(self, formatter):
if self._option == 0b1111:
return "sy"
else:
return "#%d" % self._option
class MemoryAccessOperand(Operand):
def __init__(self, *args, **kwargs):
self._operands = args
self._wback = kwargs.get("wback", False)
def format(self, formatter):
formattedOperands = []
for o in self._operands:
formatted = o.format(formatter)
if formatted is not None:
formattedOperands.append(formatted)
result = "[" + ", ".join(formattedOperands) + "]"
if self._wback:
result += "!"
return result
class CpsOperand(Operand):
def __init__(self, affectPri, affectFault):
self._affectPri = affectPri
self._affectFault = affectFault
def format(self, formatter):
result = ""
if self._affectPri:
result += "i"
if self._affectFault:
result += "f"
return result
class SpecialRegisterOperand(Operand):
def __init__(self, spec, mask=-1):
self._spec = spec
self._mask = mask
def format(self, formatter):
result = ""
upper = self._spec[3:8]
lower = self._spec[0:3]
if upper == '00000':
if lower == '000':
result = "APSR"
elif lower == '001':
result = "IAPSR"
elif lower == '010':
result = "EAPSR"
elif lower == '011':
result = "XPSR"
elif lower == '101':
result = "IPSR"
elif lower == '110':
result = "EPSR"
elif lower == '111':
result = "IEPSR"
if lower < 4 and self._mask != -1:
if self._mask == '10':
result += '_nzcvq'
elif self._mask == '01':
result += '_g'
elif self._mask == '11':
result += '_nzcvqg'
elif upper == '00001':
if lower == '000':
result = "MSP"
elif lower == '001':
result = "PSP"
elif upper == '00010':
if lower == '000':
result = "PRIMASK"
elif lower == '001':
result = "BASEPRI"
elif lower == '010':
result = "BASEPRI_MAX"
elif lower == '011':
result = "FAULTMASK"
elif lower == '100':
result = "CONTROL"
return result
class Formatter(object):
def __init__(self, cpu):
self.instruction = None
self.cpu = cpu
self._comments = []
def format(self, instruction):
self.instruction = instruction
self._comments = []
b = instruction.bytes
hw1 = bytes_to_le16(b, 0)
byteString = "%04x" % hw1
if len(b) == 4:
hw2 = bytes_to_le16(b, 2)
byteString += " %04x" % hw2
result = "{0:<12} {1:<8}".format(byteString, self.instruction.mnemonic)
formattedOperands = []
for o in self.instruction.operands:
formatted = o.format(self)
if formatted is not None:
formattedOperands.append(formatted)
result += ", ".join(formattedOperands)
if self._comments:
result = "{0:<36} ; {1}".format(result, " ".join(self._comments))
self.instruction = None
return result
def add_comment(self, comment):
self._comments.append(comment)
| apache-2.0 |
mdblv2/joatu-django | application/site-packages/django/contrib/admindocs/utils.py | 216 | 3801 | "Misc. utility functions/classes for admin documentation generator."
import re
from email.parser import HeaderParser
from email.errors import HeaderParseError
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.utils.encoding import force_bytes
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def trim_docstring(docstring):
"""
Uniformly trims leading/trailing whitespace from docstrings.
Based on http://www.python.org/peps/pep-0257.html#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min([len(line) - len(line.lstrip()) for line in lines if line.lstrip()])
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip()
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Returns (title, body, metadata).
"""
docstring = trim_docstring(docstring)
parts = re.split(r'\n{2,}', docstring)
title = parts[0]
if len(parts) == 1:
body = ''
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
'doctitle_xform' : True,
'inital_header_level' : 3,
"default_reference_context" : default_reference_context,
"link_base" : reverse('django-admindocs-docroot').rstrip('/')
}
if thing_being_parsed:
thing_being_parsed = force_bytes("<%s>" % thing_being_parsed)
parts = docutils.core.publish_parts(text, source_path=thing_being_parsed,
destination_path=None, writer_name='html',
settings_overrides=overrides)
return mark_safe(parts['fragment'])
#
# reST roles
#
ROLES = {
'model' : '%s/models/%s/',
'view' : '%s/views/%s/',
'template' : '%s/templates/%s/',
'filter' : '%s/filters/#%s',
'tag' : '%s/tags/#%s',
}
def create_reference_role(rolename, urlbase):
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
node = docutils.nodes.reference(rawtext, text, refuri=(urlbase % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(rawtext, text, refuri=(ROLES[context] % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'cmsreference'
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
| apache-2.0 |
ismtabo/huffman_algorithm | main.py | 2 | 2098 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python implementation of Huffman Coding
This script test the implementation software of Huffman Coding at \'main.py\'.
"""
__author__ = 'Ismael Taboada'
__version__= '1.0'
from collections import defaultdict
import csv
import os.path
import time
from huffman import HuffmanCoding
from graphviz import Digraph
DEBUG = False
DIA_FILE = 'huffman.tree'
LOG_FILE = 'log.csv'
TEST = "this is an example for huffman encoding"
"""Test for Graphviz software
"""
try:
dot = Digraph()
except Exception as e:
raise
print "Error: Graphviz software not found.\nPlease install Graphviz software on your computer.(http://www.graphviz.org/Download.php)"
exit(1)
"""User input
"""
txtin = raw_input("Write some symbols(blank for sample case):")
txtin = TEST if txtin=="" else txtin
txtout = txtin
"""Extract frecuency of each symbol of set
"""
symb2freq = defaultdict(int)
for ch in txtin:
symb2freq[ch] += 1
"""Implementation of Huffman Algorithm
"""
start = time.time()
huff = HuffmanCoding()
huff.encode(symb2freq)
end = time.time()
time_lapse = end - start
"""Conversion from Huffman Coding Tree to Coding table
"""
coding_table = huff.tree_to_table()
"""Outputs
"""
print "Codes table"
print "Symbol\tFrec\tCode"
for coding in coding_table:
print "\t".join(map(str,coding))
# Replace at the input text the symbol with the propper code
txtout = txtout.replace(coding[0],coding[2])
print "Time: ",time_lapse,"ms"
print "\nText input:",txtin
print "Text output:",txtout
"""Huffman tree Graphviz visualization
"""
dot = huff.tree_to_graph()
print "\nDiagram saved at: ",DIA_FILE+'.png'
dot.render(DIA_FILE, view=DEBUG)
"""Log of input's size and execution time
"""
log_exits = os.path.isfile(LOG_FILE)
with open(LOG_FILE, 'ab') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
if not log_exits:
spamwriter.writerow(['length', 'time'])
spamwriter.writerow([len(txtin), time_lapse])
print 'Log update at: ',LOG_FILE
| gpl-2.0 |
harterj/moose | modules/tensor_mechanics/test/tests/capped_mohr_coulomb/small_deform_hard_21.py | 12 | 1567 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(ini, res, ini_x, res_x):
lo2 = 0.5 * (res_x - ini_x)
alpha = (ini - res) / 4.0 / lo2**3
beta = -3.0 * alpha * lo2**2
data = [ini_x + i*(res_x - ini_x)/100 for i in range(100)]
data = [(x, alpha * (x - ini_x - lo2)**3 + beta * (x - ini_x - lo2) + (ini + res) / 2.0) for x in data]
return zip(*data)
def moose(fn):
sinphi = np.sin(30.0 * np.pi / 180.0)
cosphi = np.cos(30.0 * np.pi / 180.0)
f = open(fn)
data = [map(float, line.strip().split(",")) for line in f.readlines()[4:-1]]
f.close()
intnl = [d[2] for d in data]
coh = [(0.5 * (d[5] - d[7]) + 0.5 * (d[5] + d[7]) * sinphi) / cosphi for d in data]
return (intnl, coh)
plt.figure()
expect21 = expected(10.0, 20.0, 0.0, 5E-6)
m21 = moose("gold/small_deform_hard21.csv")
plt.plot(expect21[0], expect21[1], 'k-', linewidth = 3.0, label = 'expected')
plt.plot(m21[0], m21[1], 'k^', label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("internal parameter")
plt.ylabel("Cohesion")
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.title("Cohesion hardening")
plt.savefig("figures/small_deform_hard_21.eps")
sys.exit(0)
| lgpl-2.1 |
zas/picard | picard/webservice/api_helpers.py | 3 | 11862 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018, 2020 Laurent Monin
# Copyright (C) 2018-2021 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from PyQt5.QtCore import QUrl
from picard import PICARD_VERSION_STR
from picard.config import get_config
from picard.const import (
ACOUSTID_HOST,
ACOUSTID_KEY,
ACOUSTID_PORT,
CAA_HOST,
CAA_PORT,
)
from picard.webservice import (
CLIENT_STRING,
DEFAULT_RESPONSE_PARSER_TYPE,
ratecontrol,
)
ratecontrol.set_minimum_delay((ACOUSTID_HOST, ACOUSTID_PORT), 333)
ratecontrol.set_minimum_delay((CAA_HOST, CAA_PORT), 0)
def escape_lucene_query(text):
return re.sub(r'([+\-&|!(){}\[\]\^"~*?:\\/])', r'\\\1', text)
def _wrap_xml_metadata(data):
return ('<?xml version="1.0" encoding="UTF-8"?>'
'<metadata xmlns="http://musicbrainz.org/ns/mmd-2.0#">%s</metadata>'
% data)
class APIHelper(object):
def __init__(self, host, port, api_path, webservice):
self._host = host
self._port = port
self.api_path = api_path
self._webservice = webservice
@property
def webservice(self):
return self._webservice
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def get(self, path_list, handler, priority=False, important=False, mblogin=False,
cacheloadcontrol=None, refresh=False, queryargs=None, parse_response_type=DEFAULT_RESPONSE_PARSER_TYPE):
path = self.api_path + "/".join(path_list)
return self._webservice.get(self.host, self.port, path, handler,
priority=priority, important=important, mblogin=mblogin,
refresh=refresh, queryargs=queryargs, parse_response_type=parse_response_type)
def post(self, path_list, data, handler, priority=False, important=False,
mblogin=True, queryargs=None, parse_response_type=DEFAULT_RESPONSE_PARSER_TYPE,
request_mimetype=None):
path = self.api_path + "/".join(path_list)
return self._webservice.post(self.host, self.port, path, data, handler,
priority=priority, important=important, mblogin=mblogin,
queryargs=queryargs, parse_response_type=parse_response_type,
request_mimetype=request_mimetype)
def put(self, path_list, data, handler, priority=True, important=False,
mblogin=True, queryargs=None, request_mimetype=None):
path = self.api_path + "/".join(path_list)
return self._webservice.put(self.host, self.port, path, data, handler,
priority=priority, important=important, mblogin=mblogin,
queryargs=queryargs, request_mimetype=request_mimetype)
def delete(self, path_list, handler, priority=True, important=False,
mblogin=True, queryargs=None):
path = self.api_path + "/".join(path_list)
return self._webservice.delete(self.host, self.port, path, handler,
priority=priority, important=important, mblogin=mblogin,
queryargs=queryargs)
class MBAPIHelper(APIHelper):
def __init__(self, webservice):
super().__init__(None, None, "/ws/2/", webservice)
@property
def host(self):
config = get_config()
return config.setting['server_host']
@property
def port(self):
config = get_config()
return config.setting['server_port']
def _get_by_id(self, entitytype, entityid, handler, inc=None, queryargs=None,
priority=False, important=False, mblogin=False, refresh=False):
path_list = [entitytype, entityid]
if queryargs is None:
queryargs = {}
if inc:
queryargs["inc"] = "+".join(inc)
return self.get(path_list, handler,
priority=priority, important=important, mblogin=mblogin,
refresh=refresh, queryargs=queryargs)
def get_release_by_id(self, releaseid, handler, inc=None,
priority=False, important=False, mblogin=False, refresh=False):
if inc is None:
inc = []
return self._get_by_id('release', releaseid, handler, inc,
priority=priority, important=important, mblogin=mblogin, refresh=refresh)
def get_track_by_id(self, trackid, handler, inc=None,
priority=False, important=False, mblogin=False, refresh=False):
if inc is None:
inc = []
return self._get_by_id('recording', trackid, handler, inc,
priority=priority, important=important, mblogin=mblogin, refresh=refresh)
def lookup_discid(self, discid, handler, priority=True, important=True, refresh=False):
inc = ['artist-credits', 'labels']
return self._get_by_id('discid', discid, handler, inc, queryargs={"cdstubs": "no"},
priority=priority, important=important, refresh=refresh)
def _find(self, entitytype, handler, **kwargs):
filters = []
limit = kwargs.pop("limit")
if limit:
filters.append(("limit", limit))
is_search = kwargs.pop("search", False)
if is_search:
config = get_config()
use_advanced_search = kwargs.pop("advanced_search", config.setting["use_adv_search_syntax"])
if use_advanced_search:
query = kwargs["query"]
else:
query = escape_lucene_query(kwargs["query"]).strip().lower()
filters.append(("dismax", 'true'))
else:
query = []
for name, value in kwargs.items():
value = escape_lucene_query(value).strip().lower()
if value:
query.append('%s:(%s)' % (name, value))
query = ' '.join(query)
if query:
filters.append(("query", query))
queryargs = {}
for name, value in filters:
queryargs[name] = bytes(QUrl.toPercentEncoding(str(value))).decode()
path_list = [entitytype]
return self.get(path_list, handler, queryargs=queryargs,
priority=True, important=True, mblogin=False,
refresh=False)
def find_releases(self, handler, **kwargs):
return self._find('release', handler, **kwargs)
def find_tracks(self, handler, **kwargs):
return self._find('recording', handler, **kwargs)
def find_artists(self, handler, **kwargs):
return self._find('artist', handler, **kwargs)
def _browse(self, entitytype, handler, inc=None, **kwargs):
path_list = [entitytype]
queryargs = kwargs
if inc:
queryargs["inc"] = "+".join(inc)
return self.get(path_list, handler, queryargs=queryargs,
priority=True, important=True, mblogin=False,
refresh=False)
def browse_releases(self, handler, **kwargs):
inc = ["media", "labels"]
return self._browse("release", handler, inc, **kwargs)
def submit_ratings(self, ratings, handler):
path_list = ['rating']
params = {"client": CLIENT_STRING}
recordings = (''.join(['<recording id="%s"><user-rating>%s</user-rating></recording>' %
(i[1], j*20) for i, j in ratings.items() if i[0] == 'recording']))
data = _wrap_xml_metadata('<recording-list>%s</recording-list>' % recordings)
return self.post(path_list, data, handler, priority=True,
queryargs=params, parse_response_type="xml",
request_mimetype="application/xml; charset=utf-8")
def get_collection(self, collection_id, handler, limit=100, offset=0):
path_list = ["collection"]
queryargs = None
if collection_id is not None:
inc = ["releases", "artist-credits", "media"]
path_list.extend([collection_id, "releases"])
queryargs = {}
queryargs["inc"] = "+".join(inc)
queryargs["limit"] = limit
queryargs["offset"] = offset
return self.get(path_list, handler, priority=True, important=True,
mblogin=True, queryargs=queryargs)
def get_collection_list(self, handler):
return self.get_collection(None, handler)
@staticmethod
def _collection_request(collection_id, releases):
while releases:
ids = ";".join(releases if len(releases) <= 400 else releases[:400])
releases = releases[400:]
yield ["collection", collection_id, "releases", ids]
@staticmethod
def _get_client_queryarg():
return {"client": CLIENT_STRING}
def put_to_collection(self, collection_id, releases, handler):
for path_list in self._collection_request(collection_id, releases):
self.put(path_list, "", handler,
queryargs=self._get_client_queryarg())
def delete_from_collection(self, collection_id, releases, handler):
for path_list in self._collection_request(collection_id, releases):
self.delete(path_list, handler,
queryargs=self._get_client_queryarg())
class AcoustIdAPIHelper(APIHelper):
def __init__(self, webservice):
super().__init__(ACOUSTID_HOST, ACOUSTID_PORT,
'/v2/', webservice)
@staticmethod
def _encode_acoustid_args(args, format_='json'):
filters = []
args['client'] = ACOUSTID_KEY
args['clientversion'] = PICARD_VERSION_STR
args['format'] = format_
for name, value in args.items():
value = bytes(QUrl.toPercentEncoding(value)).decode()
filters.append('%s=%s' % (name, value))
return '&'.join(filters)
def query_acoustid(self, handler, **args):
path_list = ['lookup']
body = self._encode_acoustid_args(args)
return self.post(path_list, body, handler, priority=False, important=False,
mblogin=False, request_mimetype="application/x-www-form-urlencoded")
def submit_acoustid_fingerprints(self, submissions, handler):
path_list = ['submit']
config = get_config()
args = {'user': config.setting["acoustid_apikey"]}
for i, submission in enumerate(submissions):
args['fingerprint.%d' % i] = submission.fingerprint
args['duration.%d' % i] = str(submission.duration)
args['mbid.%d' % i] = submission.recordingid
if submission.puid:
args['puid.%d' % i] = submission.puid
body = self._encode_acoustid_args(args, format_='json')
return self.post(path_list, body, handler, priority=True, important=False,
mblogin=False, request_mimetype="application/x-www-form-urlencoded")
| gpl-2.0 |
WIZARD-CXY/container-agent | tests/run_containers_test.py | 6 | 16625 | #!/usr/bin/python
"""Tests for run_containers."""
import unittest
import yaml
from container_agent import run_containers
class RunContainersTest(unittest.TestCase):
def testKnownVersion(self):
yaml_code = """
version: v1beta1
"""
run_containers.CheckVersion(yaml.load(yaml_code))
def testNoVersion(self):
yaml_code = """
not_version: not valid
"""
with self.assertRaises(SystemExit):
run_containers.CheckVersion(yaml.load(yaml_code))
def testUnknownVersion(self):
yaml_code = """
version: not valid
"""
with self.assertRaises(SystemExit):
run_containers.CheckVersion(yaml.load(yaml_code))
def testRfc1035Name(self):
self.assertFalse(run_containers.IsRfc1035Name('1'))
self.assertFalse(run_containers.IsRfc1035Name('123'))
self.assertFalse(run_containers.IsRfc1035Name('123abc'))
self.assertFalse(run_containers.IsRfc1035Name('123abc'))
self.assertFalse(run_containers.IsRfc1035Name('a_b'))
self.assertFalse(run_containers.IsRfc1035Name('a:b'))
self.assertFalse(run_containers.IsRfc1035Name('a b'))
self.assertFalse(run_containers.IsRfc1035Name('A.B'))
self.assertFalse(run_containers.IsRfc1035Name('ab-'))
self.assertTrue(run_containers.IsRfc1035Name('a'))
self.assertTrue(run_containers.IsRfc1035Name('abc'))
self.assertTrue(run_containers.IsRfc1035Name('abc123'))
self.assertTrue(run_containers.IsRfc1035Name('abc123def'))
self.assertTrue(run_containers.IsRfc1035Name('abc-123-def'))
def testVolumeValid(self):
yaml_code = """
- name: abc
- name: abc-123
- name: a
"""
x = run_containers.LoadVolumes(yaml.load(yaml_code))
self.assertEqual(3, len(x))
self.assertEqual('abc', x[0])
self.assertEqual('abc-123', x[1])
self.assertEqual('a', x[2])
def testVolumeNoName(self):
yaml_code = """
- notname: notgood
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumes(yaml.load(yaml_code))
def testVolumeInvalidName(self):
yaml_code = """
- name: 123abc
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumes(yaml.load(yaml_code))
def testVolumeDupName(self):
yaml_code = """
- name: abc123
- name: abc123
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumes(yaml.load(yaml_code))
def testContainerValidMinimal(self):
yaml_code = """
- name: abc123
image: foo/bar
- name: abc124
image: foo/bar
"""
user = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(2, len(user))
self.assertEqual('abc123', user[0].name)
self.assertEqual('abc124', user[1].name)
infra = run_containers.LoadInfraContainers(user)
self.assertEqual(1, len(infra))
self.assertEqual('.net', infra[0].name)
def testContainerValidFull(self):
yaml_code = """
- name: abc123
image: foo/bar
command:
- one
- two
workingDir: /tmp
ports:
- name: port1
hostPort: 111
containerPort: 2222
protocol: UDP
volumeMounts:
- name: vol1
path: /mnt
readOnly: true
env:
- key: KEY
value: value str
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), ['vol1'])
self.assertEqual(1, len(x))
self.assertEqual('abc123', x[0].name)
self.assertEqual('foo/bar', x[0].image)
self.assertEqual(['one', 'two'], x[0].command)
self.assertEqual('/tmp', x[0].working_dir)
self.assertEqual((111, 2222, '/udp'), x[0].ports[0])
self.assertEqual('/export/vol1:/mnt:ro', x[0].mounts[0])
self.assertEqual('KEY=value str', x[0].env_vars[0])
def testContainerValidFullJson(self):
"""Proves that the same YAML parsing code handles JSON."""
json_code = """
[
{
"name": "abc123",
"image": "foo/bar",
"command": [
"one",
"two"
],
"workingDir": "/tmp",
"ports": [
{
"name": "port1",
"hostPort": 111,
"containerPort": 2222,
"protocol": "UDP"
}
],
"volumeMounts": [
{
"name": "vol1",
"path": "/mnt",
"readOnly": true
}
],
"env": [
{
"key": "KEY",
"value": "value str"
}
]
}
]
"""
x = run_containers.LoadUserContainers(yaml.load(json_code), ['vol1'])
self.assertEqual(1, len(x))
self.assertEqual('abc123', x[0].name)
self.assertEqual('foo/bar', x[0].image)
self.assertEqual(['one', 'two'], x[0].command)
self.assertEqual('/tmp', x[0].working_dir)
self.assertEqual((111, 2222, '/udp'), x[0].ports[0])
self.assertEqual('/export/vol1:/mnt:ro', x[0].mounts[0])
self.assertEqual('KEY=value str', x[0].env_vars[0])
def testContainerNoName(self):
yaml_code = """
- notname: notgood
image: foo/bar
"""
with self.assertRaises(SystemExit):
run_containers.LoadUserContainers(yaml.load(yaml_code), [])
def testContainerInvalidName(self):
yaml_code = """
- name: not_good
image: foo/bar
"""
with self.assertRaises(SystemExit):
run_containers.LoadUserContainers(yaml.load(yaml_code), [])
def testContainerDupName(self):
yaml_code = """
- name: abc123
image: foo/bar
- name: abc123
image: foo/bar
"""
with self.assertRaises(SystemExit):
run_containers.LoadUserContainers(yaml.load(yaml_code), [])
def testContainerNoImage(self):
yaml_code = """
- name: abc123
notimage: foo/bar
"""
with self.assertRaises(SystemExit):
run_containers.LoadUserContainers(yaml.load(yaml_code), [])
def testContainerWithoutCommand(self):
yaml_code = """
- name: abc123
image: foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(1, len(x))
self.assertEqual(0, len(x[0].command))
def testContainerWithCommand(self):
yaml_code = """
- name: abc123
image: foo/bar
command:
- first
- second
- third fourth
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(1, len(x))
self.assertEqual(3, len(x[0].command))
def testContainerWithoutWorkingDir(self):
yaml_code = """
- name: abc123
image: foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertIsNone(x[0].working_dir)
def testContainerWithWorkingDir(self):
yaml_code = """
- name: abc123
image: foo/bar
workingDir: /foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual('/foo/bar', x[0].working_dir)
def testContainerWorkingDirNotAbsolute(self):
yaml_code = """
- name: abc123
image: foo/bar
workingDir: foo/bar
"""
with self.assertRaises(SystemExit):
run_containers.LoadUserContainers(yaml.load(yaml_code), [])
def testContainerWithoutPorts(self):
yaml_code = """
- name: abc123
image: foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(0, len(x[0].ports))
def testPortValidMinimal(self):
yaml_code = """
- containerPort: 1
- containerPort: 65535
"""
x = run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(2, len(x))
self.assertEqual((1, 1, ''), x[0])
self.assertEqual((65535, 65535, ''), x[1])
def testPortWithName(self):
yaml_code = """
- name: abc123
containerPort: 123
"""
x = run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(1, len(x))
self.assertEqual((123, 123, ''), x[0])
def testPortInvalidName(self):
yaml_code = """
- name: 123abc
containerPort: 123
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortDupName(self):
yaml_code = """
- name: abc123
containerPort: 123
- name: abc123
containerPort: 124
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortNoContainerPort(self):
yaml_code = """
- name: abc123
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortTooLowContainerPort(self):
yaml_code = """
- containerPort: 0
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortTooHighContainerPort(self):
yaml_code = """
- containerPort: 65536
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortWithHostPort(self):
yaml_code = """
- containerPort: 123
hostPort: 456
"""
x = run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(1, len(x))
self.assertEqual((456, 123, ''), x[0])
def testPortTooLowHostPort(self):
yaml_code = """
- containerPort: 123
hostPort: 0
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortTooHighHostPort(self):
yaml_code = """
- containerPort: 123
hostPort: 65536
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortDupHostPort(self):
yaml_code = """
- containerPort: 123
hostPort: 123
- containerPort: 124
hostPort: 123
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortWithProtocolTcp(self):
yaml_code = """
- containerPort: 123
protocol: TCP
"""
x = run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(1, len(x))
self.assertEqual((123, 123, ''), x[0])
def testPortWithProtocolUdp(self):
yaml_code = """
- containerPort: 123
protocol: UDP
"""
x = run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(1, len(x))
self.assertEqual((123, 123, '/udp'), x[0])
def testPortWithInvalidProtocol(self):
yaml_code = """
- containerPort: 123
protocol: IGMP
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testContainerWithoutMounts(self):
yaml_code = """
- name: abc123
image: foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(0, len(x[0].mounts))
def testMountValidMinimal(self):
yaml_code = """
- name: vol1
path: /mnt/vol1
- name: vol2
path: /mnt/vol2
"""
x = run_containers.LoadVolumeMounts(
yaml.load(yaml_code), ['vol1', 'vol2'], 'ctr_name')
self.assertEqual(2, len(x))
self.assertEqual('/export/vol1:/mnt/vol1:rw', x[0])
self.assertEqual('/export/vol2:/mnt/vol2:rw', x[1])
def testMountNoName(self):
yaml_code = """
- path: /mnt/vol1
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumeMounts(
yaml.load(yaml_code), ['vol1'], 'ctr_name')
def testMountInvalidName(self):
yaml_code = """
- name: 1vol
path: /mnt/vol1
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumeMounts(
yaml.load(yaml_code), ['1vol'], 'ctr_name')
def testMountUnknownName(self):
yaml_code = """
- name: vol1
path: /mnt/vol1
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumeMounts(
yaml.load(yaml_code), [], 'ctr_name')
def testMountNoPath(self):
yaml_code = """
- name: vol1
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumeMounts(
yaml.load(yaml_code), ['vol1'], 'ctr_name')
def testMountInvalidPath(self):
yaml_code = """
- name: vol1
path: mnt/vol1
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumeMounts(
yaml.load(yaml_code), ['vol1'], 'ctr_name')
def testContainerWithoutEnv(self):
yaml_code = """
- name: abc123
image: foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(0, len(x[0].env_vars))
def testEnvValidMinimal(self):
yaml_code = """
- key: key1
value: value
- key: key2
value: value too
"""
x = run_containers.LoadEnvVars(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(2, len(x))
self.assertEqual('key1=value', x[0])
self.assertEqual('key2=value too', x[1])
def testEnvNoKey(self):
yaml_code = """
- value: value
"""
with self.assertRaises(SystemExit):
run_containers.LoadEnvVars(yaml.load(yaml_code), 'ctr_name')
def testEnvInvalidKey(self):
yaml_code = """
- key: 1value
value: value
"""
with self.assertRaises(SystemExit):
run_containers.LoadEnvVars(yaml.load(yaml_code), 'ctr_name')
def testEnvNoValue(self):
yaml_code = """
- key: key
"""
with self.assertRaises(SystemExit):
run_containers.LoadEnvVars(yaml.load(yaml_code), 'ctr_name')
def testFlagList(self):
self.assertEqual([], run_containers.FlagList([], '-x'))
self.assertEqual(['-x', 'a'], run_containers.FlagList(['a'], '-x'))
self.assertEqual(['-x', 'a', '-x', 'b', '-x', 'c'],
run_containers.FlagList(['a', 'b', 'c'], '-x'))
def testFlagOrNothing(self):
self.assertEqual([], run_containers.FlagOrNothing(None, '-x'))
self.assertEqual(['-x', 'a'], run_containers.FlagOrNothing('a', '-x'))
def testCheckGroupWideConflictsOk(self):
containers = []
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(80, 80, '')]
containers.append(c)
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(81, 81, '')]
containers.append(c)
c = run_containers.Container('name2', 'ubuntu')
c.ports = [(81, 81, '/udp')]
containers.append(c)
run_containers.CheckGroupWideConflicts(containers)
def testCheckGroupWideConflictsDupHostPort(self):
containers = []
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(80, 80, '')]
containers.append(c)
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(80, 81, '')]
containers.append(c)
with self.assertRaises(SystemExit):
run_containers.CheckGroupWideConflicts(containers)
def testCheckGroupWideConflictsDupContainerPort(self):
containers = []
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(80, 80, '')]
containers.append(c)
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(81, 80, '')]
containers.append(c)
with self.assertRaises(SystemExit):
run_containers.CheckGroupWideConflicts(containers)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jonathonwalz/ansible | lib/ansible/modules/network/panos/panos_lic.py | 78 | 4915 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_lic
short_description: apply authcode to a device/instance
description:
- Apply an authcode to a device.
- The authcode should have been previously registered on the Palo Alto Networks support portal.
- The device should have Internet access.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
auth_code:
description:
- authcode to be applied
required: true
force:
description:
- whether to apply authcode even if device is already licensed
required: false
default: "false"
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: fetch license
panos_lic:
ip_address: "192.168.1.1"
password: "paloalto"
auth_code: "IBADCODE"
register: result
- name: Display serialnumber (if already registered)
debug:
var: "{{result.serialnumber}}"
'''
RETURN = '''
serialnumber:
description: serialnumber of the device in case that it has been already registered
returned: success
type: string
sample: 007200004214
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_serial(xapi, module):
xapi.op(cmd="show system info", cmd_xml=True)
r = xapi.element_root
serial = r.find('.//serial')
if serial is None:
module.fail_json(msg="No <serial> tag in show system info")
serial = serial.text
return serial
def apply_authcode(xapi, module, auth_code):
try:
xapi.op(cmd='request license fetch auth-code "%s"' % auth_code,
cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def fetch_authcode(xapi, module):
try:
xapi.op(cmd='request license fetch', cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
auth_code=dict(),
username=dict(default='admin'),
force=dict(type='bool', default=False)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
auth_code = module.params["auth_code"]
force = module.params['force']
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if not force:
serialnumber = get_serial(xapi, module)
if serialnumber != 'unknown':
return module.exit_json(changed=False, serialnumber=serialnumber)
if auth_code:
apply_authcode(xapi, module, auth_code)
else:
fetch_authcode(xapi, module)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
tcheehow/MissionPlanner | Lib/sgmllib.py | 64 | 18437 | """A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
from warnings import warnpy3k
warnpy3k("the sgmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
| gpl-3.0 |
paplorinc/intellij-community | plugins/hg4idea/testData/bin/hgext/inotify/linux/watcher.py | 92 | 10504 | # watcher.py - high-level interfaces to the Linux inotify subsystem
# Copyright 2006 Bryan O'Sullivan <[email protected]>
# This library is free software; you can redistribute it and/or modify
# it under the terms of version 2.1 of the GNU Lesser General Public
# License, or any later version.
'''High-level interfaces to the Linux inotify subsystem.
The inotify subsystem provides an efficient mechanism for file status
monitoring and change notification.
The watcher class hides the low-level details of the inotify
interface, and provides a Pythonic wrapper around it. It generates
events that provide somewhat more information than raw inotify makes
available.
The autowatcher class is more useful, as it automatically watches
newly-created directories on your behalf.'''
__author__ = "Bryan O'Sullivan <[email protected]>"
import _inotify as inotify
import array
import errno
import fcntl
import os
import termios
class event(object):
'''Derived inotify event class.
The following fields are available:
mask: event mask, indicating what kind of event this is
cookie: rename cookie, if a rename-related event
path: path of the directory in which the event occurred
name: name of the directory entry to which the event occurred
(may be None if the event happened to a watched directory)
fullpath: complete path at which the event occurred
wd: watch descriptor that triggered this event'''
__slots__ = (
'cookie',
'fullpath',
'mask',
'name',
'path',
'raw',
'wd',
)
def __init__(self, raw, path):
self.path = path
self.raw = raw
if raw.name:
self.fullpath = path + '/' + raw.name
else:
self.fullpath = path
self.wd = raw.wd
self.mask = raw.mask
self.cookie = raw.cookie
self.name = raw.name
def __repr__(self):
r = repr(self.raw)
return 'event(path=' + repr(self.path) + ', ' + r[r.find('(') + 1:]
_event_props = {
'access': 'File was accessed',
'modify': 'File was modified',
'attrib': 'Attribute of a directory entry was changed',
'close_write': 'File was closed after being written to',
'close_nowrite': 'File was closed without being written to',
'open': 'File was opened',
'moved_from': 'Directory entry was renamed from this name',
'moved_to': 'Directory entry was renamed to this name',
'create': 'Directory entry was created',
'delete': 'Directory entry was deleted',
'delete_self': 'The watched directory entry was deleted',
'move_self': 'The watched directory entry was renamed',
'unmount': 'Directory was unmounted, and can no longer be watched',
'q_overflow': 'Kernel dropped events due to queue overflow',
'ignored': 'Directory entry is no longer being watched',
'isdir': 'Event occurred on a directory',
}
for k, v in _event_props.iteritems():
mask = getattr(inotify, 'IN_' + k.upper())
def getter(self):
return self.mask & mask
getter.__name__ = k
getter.__doc__ = v
setattr(event, k, property(getter, doc=v))
del _event_props
class watcher(object):
'''Provide a Pythonic interface to the low-level inotify API.
Also adds derived information to each event that is not available
through the normal inotify API, such as directory name.'''
__slots__ = (
'fd',
'_paths',
'_wds',
)
def __init__(self):
'''Create a new inotify instance.'''
self.fd = inotify.init()
self._paths = {}
self._wds = {}
def fileno(self):
'''Return the file descriptor this watcher uses.
Useful for passing to select and poll.'''
return self.fd
def add(self, path, mask):
'''Add or modify a watch.
Return the watch descriptor added or modified.'''
path = os.path.normpath(path)
wd = inotify.add_watch(self.fd, path, mask)
self._paths[path] = wd, mask
self._wds[wd] = path, mask
return wd
def remove(self, wd):
'''Remove the given watch.'''
inotify.remove_watch(self.fd, wd)
self._remove(wd)
def _remove(self, wd):
path_mask = self._wds.pop(wd, None)
if path_mask is not None:
self._paths.pop(path_mask[0])
def path(self, path):
'''Return a (watch descriptor, event mask) pair for the given path.
If the path is not being watched, return None.'''
return self._paths.get(path)
def wd(self, wd):
'''Return a (path, event mask) pair for the given watch descriptor.
If the watch descriptor is not valid or not associated with
this watcher, return None.'''
return self._wds.get(wd)
def read(self, bufsize=None):
'''Read a list of queued inotify events.
If bufsize is zero, only return those events that can be read
immediately without blocking. Otherwise, block until events are
available.'''
events = []
for evt in inotify.read(self.fd, bufsize):
events.append(event(evt, self._wds[evt.wd][0]))
if evt.mask & inotify.IN_IGNORED:
self._remove(evt.wd)
elif evt.mask & inotify.IN_UNMOUNT:
self.close()
return events
def close(self):
'''Shut down this watcher.
All subsequent method calls are likely to raise exceptions.'''
os.close(self.fd)
self.fd = None
self._paths = None
self._wds = None
def __len__(self):
'''Return the number of active watches.'''
return len(self._paths)
def __iter__(self):
'''Yield a (path, watch descriptor, event mask) tuple for each
entry being watched.'''
for path, (wd, mask) in self._paths.iteritems():
yield path, wd, mask
def __del__(self):
if self.fd is not None:
os.close(self.fd)
ignored_errors = [errno.ENOENT, errno.EPERM, errno.ENOTDIR]
def add_iter(self, path, mask, onerror=None):
'''Add or modify watches over path and its subdirectories.
Yield each added or modified watch descriptor.
To ensure that this method runs to completion, you must
iterate over all of its results, even if you do not care what
they are. For example:
for wd in w.add_iter(path, mask):
pass
By default, errors are ignored. If optional arg "onerror" is
specified, it should be a function; it will be called with one
argument, an OSError instance. It can report the error to
continue with the walk, or raise the exception to abort the
walk.'''
# Add the IN_ONLYDIR flag to the event mask, to avoid a possible
# race when adding a subdirectory. In the time between the
# event being queued by the kernel and us processing it, the
# directory may have been deleted, or replaced with a different
# kind of entry with the same name.
submask = mask | inotify.IN_ONLYDIR
try:
yield self.add(path, mask)
except OSError, err:
if onerror and err.errno not in self.ignored_errors:
onerror(err)
for root, dirs, names in os.walk(path, topdown=False, onerror=onerror):
for d in dirs:
try:
yield self.add(root + '/' + d, submask)
except OSError, err:
if onerror and err.errno not in self.ignored_errors:
onerror(err)
def add_all(self, path, mask, onerror=None):
'''Add or modify watches over path and its subdirectories.
Return a list of added or modified watch descriptors.
By default, errors are ignored. If optional arg "onerror" is
specified, it should be a function; it will be called with one
argument, an OSError instance. It can report the error to
continue with the walk, or raise the exception to abort the
walk.'''
return [w for w in self.add_iter(path, mask, onerror)]
class autowatcher(watcher):
'''watcher class that automatically watches newly created directories.'''
__slots__ = (
'addfilter',
)
def __init__(self, addfilter=None):
'''Create a new inotify instance.
This instance will automatically watch newly created
directories.
If the optional addfilter parameter is not None, it must be a
callable that takes one parameter. It will be called each time
a directory is about to be automatically watched. If it returns
True, the directory will be watched if it still exists,
otherwise, it will be skipped.'''
super(autowatcher, self).__init__()
self.addfilter = addfilter
_dir_create_mask = inotify.IN_ISDIR | inotify.IN_CREATE
def read(self, bufsize=None):
events = super(autowatcher, self).read(bufsize)
for evt in events:
if evt.mask & self._dir_create_mask == self._dir_create_mask:
if self.addfilter is None or self.addfilter(evt):
parentmask = self._wds[evt.wd][1]
# See note about race avoidance via IN_ONLYDIR above.
mask = parentmask | inotify.IN_ONLYDIR
try:
self.add_all(evt.fullpath, mask)
except OSError, err:
if err.errno not in self.ignored_errors:
raise
return events
class threshold(object):
'''Class that indicates whether a file descriptor has reached a
threshold of readable bytes available.
This class is not thread-safe.'''
__slots__ = (
'fd',
'threshold',
'_iocbuf',
)
def __init__(self, fd, threshold=1024):
self.fd = fd
self.threshold = threshold
self._iocbuf = array.array('i', [0])
def readable(self):
'''Return the number of bytes readable on this file descriptor.'''
fcntl.ioctl(self.fd, termios.FIONREAD, self._iocbuf, True)
return self._iocbuf[0]
def __call__(self):
'''Indicate whether the number of readable bytes has met or
exceeded the threshold.'''
return self.readable() >= self.threshold
| apache-2.0 |
ChenJunor/hue | desktop/core/src/desktop/redaction/tests.py | 30 | 14341 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import random
import re
import tempfile
import threading
from desktop.redaction.engine import RedactionEngine, \
RedactionPolicy, \
RedactionRule, \
parse_redaction_policy_from_file, \
_convert_java_pattern_to_python
from desktop.redaction.logfilter import add_log_redaction_filter_to_logger
from nose.tools import assert_true, assert_equal, assert_not_equal, raises
MESSAGE = "This string is not redacted"
def get_path(filename):
return os.path.join(os.path.dirname(__file__), 'test_data', filename)
class MockLoggingHandler(logging.Handler):
def __init__(self, *args, **kwargs):
logging.Handler.__init__(self, *args, **kwargs)
self.records = []
def emit(self, record):
self.records.append(record)
def reset(self):
del self.records[:]
class TestRedactionRule(object):
def test_redaction_rule_works(self):
rule = RedactionRule('password=', 'password=".*"', 'password="???"')
test_strings = [
('message', 'message'),
('password="a password"', 'password="???"'),
('before password="a password" after', 'before password="???" after'),
]
for message, redacted_message in test_strings:
assert_equal(rule.redact(message), redacted_message)
def test_non_redacted_string_returns_same_string(self):
rule = RedactionRule('password=', 'password=".*"', 'password="???"')
message = 'message'
assert_true(rule.redact(message) is message)
def test_equality(self):
rule1 = RedactionRule('password=', 'password=".*"', 'password="???"')
rule2 = RedactionRule('password=', 'password=".*"', 'password="???"')
rule3 = RedactionRule('ssn=', 'ssn=\d{3}-\d{2}-\d{4}', 'ssn=XXX-XX-XXXX'),
assert_equal(rule1, rule2)
assert_not_equal(rule1, rule3)
def test_parse_redaction_policy_from_file(self):
with tempfile.NamedTemporaryFile() as f:
json.dump({
'version': 1,
'rules': [
{
'description': 'redact passwords',
'trigger': 'password=',
'search': 'password=".*"',
'replace': 'password="???"',
},
{
'description': 'redact social security numbers',
'search': '\d{3}-\d{2}-\d{4}',
'replace': 'XXX-XX-XXXX',
},
]
}, f)
f.flush()
policy = parse_redaction_policy_from_file(f.name)
assert_equal(policy.rules, [
RedactionRule(u'password=', u'password=".*"', u'password="???"'),
RedactionRule(None, u'\d{3}-\d{2}-\d{4}', u'XXX-XX-XXXX'),
])
class TestRedactionEngine(object):
def test_redaction_works(self):
redaction_engine = RedactionEngine([
RedactionRule('password=', 'password=".*"', 'password="???"'),
RedactionRule('ssn=', 'ssn=\d{3}-\d{2}-\d{4}', 'ssn=XXX-XX-XXXX'),
])
test_strings = [
('message', 'message'),
('password="a password"', 'password="???"'),
('before password="a password" after', 'before password="???" after'),
('an ssn=123-45-6789', 'an ssn=XXX-XX-XXXX'),
]
for message, redacted_message in test_strings:
assert_equal(redaction_engine.redact(message), redacted_message)
def test_equality(self):
engine1 = RedactionEngine([
RedactionRule('password=', 'password=".*"', 'password="???"'),
])
engine2 = RedactionEngine([
RedactionRule('password=', 'password=".*"', 'password="???"'),
])
engine3 = RedactionEngine([
RedactionRule('ssn=', 'ssn=\d{3}-\d{2}-\d{4}', 'ssn=XXX-XX-XXXX'),
])
assert_equal(engine1, engine2)
assert_not_equal(engine1, engine3)
class TestRedactionLogFilter(object):
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger(cls.__name__)
cls.handler = MockLoggingHandler()
cls.logger.addHandler(cls.handler)
policy = RedactionPolicy([
RedactionRule('password=', 'password=".*"', 'password="???"'),
RedactionRule('ssn=', 'ssn=\d{3}-\d{2}-\d{4}', 'ssn=XXX-XX-XXXX'),
])
engine = RedactionEngine([policy])
add_log_redaction_filter_to_logger(engine, cls.logger)
@classmethod
def tearDownClass(cls):
cls.logger.handlers = []
def tearDown(self):
self.handler.reset()
def test_redaction_filter(self):
test_strings = [
{
'message': 'message',
'result_message': 'message',
'result_msg': 'message',
'result_args': (),
},
{
'message': 'message %s',
'args': ['an arg'],
'result_message': 'message an arg',
'result_msg': 'message %s',
'result_args': ('an arg',),
},
{
'message': 'password="a password"',
'result_message': 'password="???"',
},
{
'message': 'password="%s"',
'args': ['a password'],
'result_message': 'password="???"',
},
{
'message': 'password=%s',
'args': ['"a password"'],
'result_message': 'password="???"',
},
{
'message': 'before password="%s" after',
'args': ['a password'],
'result_message': 'before password="???" after',
},
{
'message': 'ssn=%s-%s-%s',
'args': ['123', '45', '6789'],
'result_message': 'ssn=XXX-XX-XXXX',
},
]
for test in test_strings:
self.logger.debug(test['message'], *test.get('args', ()))
for test, record in zip(test_strings, self.handler.records):
assert_equal(record.getMessage(), test['result_message'])
assert_equal(record.message, test['result_message'])
assert_equal(record.msg, test.get('result_msg', test['result_message']))
assert_equal(record.args, test.get('result_args'))
def test_convert_java_pattern_to_python(self):
assert_equal(_convert_java_pattern_to_python('1-2'), '1-2')
assert_equal(_convert_java_pattern_to_python('$1-$2'), '\\1-\\2')
assert_equal(_convert_java_pattern_to_python('\\$1-$2'), '$1-\\2')
assert_equal(_convert_java_pattern_to_python('\\$$1-$2'), '$\\1-\\2')
@raises(IOError)
def test_does_not_exist(self):
path = get_path('thisfiledoesnotexist.json')
parse_redaction_policy_from_file(path)
@raises(IOError)
def test_is_dir(self):
path = '/tmp'
parse_redaction_policy_from_file(path)
@raises(IOError)
def test_is_not_json(self):
path = get_path('not-json.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_no_version(self):
path = get_path('no-version.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_unknown_version(self):
path = get_path('unknown-version.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_alpha_version(self):
path = get_path('alpha-version.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_no_search(self):
path = get_path('no-search.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_no_replace(self):
path = get_path('no-replace.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_no_brace(self):
path = get_path('no-brace.json')
parse_redaction_policy_from_file(path)
@raises(re.error)
def test_bad_regex(self):
path = get_path('bad-regex.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_extra_attr(self):
path = get_path('extra-attr.json')
parse_redaction_policy_from_file(path)
def test_empty_file(self):
path = get_path('empty.json')
policy = parse_redaction_policy_from_file(path)
assert_equal(MESSAGE, policy.redact(MESSAGE))
def test_empty_rules(self):
path = get_path('empty-rules.json')
policy = parse_redaction_policy_from_file(path)
assert_equal(MESSAGE, policy.redact(MESSAGE))
def test_basic_good1(self):
path = get_path('good-1.json')
policy = parse_redaction_policy_from_file(path)
assert_equal("Hxllx, wxrld", policy.redact("Hello, world"))
def test_int_version(self):
path = get_path('verint.json')
policy = parse_redaction_policy_from_file(path)
assert_equal("Hxllx, wxrld", policy.redact("Hello, world"))
def test_real_rules(self):
path = get_path('real-1.json')
policy = parse_redaction_policy_from_file(path)
messages = [
("Hello, world", "Hello, world"),
("CC 1234-2345-3456-4576", "CC XXXX-XXXX-XXXX-XXXX"),
("CC 1234234534654576", "CC XXXXXXXXXXXXXXXX"),
("CC 1234,2345,3456,4576", "CC XXXX-XXXX-XXXX-XXXX"),
("SSN 123-45-6789", "SSN XXX-XX-XXXX"),
("SSN 123456789", "SSN XXXXXXXXX"),
("My password=Hello123", "My password=xxxxx"),
("Host www.cloudera.com", "Host HOSTNAME.REDACTED"),
("www.c1-foo.org rules!", "HOSTNAME.REDACTED rules!"),
("IP1 8.8.8.8", "IP1 0.0.0.0"),
("IP2 192.168.0.1", "IP2 0.0.0.0"),
("My email is [email protected]", "My email is [email protected]"),
("[email protected] is interesting", "[email protected] is interesting"),
("Multi 1234-2345-3456-4567\nLine 123-45-6789", "Multi XXXX-XXXX-XXXX-XXXX\nLine XXX-XX-XXXX"),
]
for message, redacted_message in messages:
assert_equal(redacted_message, policy.redact(message))
def test_huge_rules(self):
path = get_path('huge-1.json')
policy = parse_redaction_policy_from_file(path)
assert_equal("This string is not redadted", policy.redact(MESSAGE))
def test_back_refs(self):
path = get_path('replace-1.json')
policy = parse_redaction_policy_from_file(path)
messages = [
("Hello, world", "Hello, world"),
("1234-2345-3456-4576", "XXXX-XXXX-XXXX-4576"),
("Words www.gmail.com is cool", "Words HOSTNAME.REDACTED.com is cool"),
("short.org", "HOSTNAME.REDACTED.org"),
("long.n4me.h-1.co.fr", "HOSTNAME.REDACTED.fr"),
("Ping 192.168.0.1", "Ping 0.192.1.168"),
("Magic word", "word: Magic word, word"),
]
for message, redacted_message in messages:
assert_equal(redacted_message, policy.redact(message))
def test_ordering(self):
path = get_path('ordering-1.json')
policy = parse_redaction_policy_from_file(path)
messages = [
("Hello, world", "Hello, world"),
("one", "four"),
("This one is a nice one", "This four is a nice four"),
("Please help me: ten", "Please help me: thirteen"),
("HappY abc", "HappY stu"),
]
for message, redacted_message in messages:
assert_equal(redacted_message, policy.redact(message))
def test_case_sensitivity(self):
path = get_path('case-1.json')
policy = parse_redaction_policy_from_file(path)
messages = [
("Hello, world", "Hello, world"),
("Say aAa! aaa! AAAAAA!", "Say bbb! bbb! bbbbbb!"),
("I like dddogs. dDd", "I like dddogs. dDd"),
("Cccats. Dddogs", "Cccats. eEeogs"),
("Trigger fff gGg", "Trigger fff gGg"),
("Trigger fFf Ggg", "Trigger fFf Ggg"),
("Trigger fFf gGg", "Trigger fFf hHh"),
]
for message, redacted_message in messages:
assert_equal(redacted_message, policy.redact(message))
def test_multithreading(self):
path = get_path('numbers.json')
policy = parse_redaction_policy_from_file(path)
assert_equal("asdf####fdas### H#ll# w#rld", policy.redact("asdf1234fdas666 H3ll0 w0rld"))
errors = []
lock = threading.Lock()
regex = re.compile(r"[0-9]")
class TestThread(threading.Thread):
def run(self):
for i in xrange(500):
message = u''.join(random_utf8_char() for _ in xrange(128))
redacted_message = policy.redact(message)
if regex.search(redacted_message):
with lock:
errors.append((message, redacted_message))
break
threads = []
for i in xrange(10):
threads.append(TestThread())
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert_equal(errors, [])
def byte_range(first, last):
return list(range(first, last+1))
first_values = byte_range(0x00, 0x7F) + byte_range(0xC2, 0xF4)
trailing_values = byte_range(0x80, 0xBF)
def random_utf8_char():
first = random.choice(first_values)
if first <= 0x7F:
value = bytearray([first])
elif first <= 0xDF:
value = bytearray([first, random.choice(trailing_values)])
elif first == 0xE0:
value = bytearray([first, random.choice(byte_range(0xA0, 0xBF)), random.choice(trailing_values)])
elif first == 0xED:
value = bytearray([first, random.choice(byte_range(0x80, 0x9F)), random.choice(trailing_values)])
elif first <= 0xEF:
value = bytearray([first, random.choice(trailing_values), random.choice(trailing_values)])
elif first == 0xF0:
value = bytearray([first, random.choice(byte_range(0x90, 0xBF)), random.choice(trailing_values), random.choice(trailing_values)])
elif first <= 0xF3:
value = bytearray([first, random.choice(trailing_values), random.choice(trailing_values), random.choice(trailing_values)])
elif first == 0xF4:
value = bytearray([first, random.choice(byte_range(0x80, 0x8F)), random.choice(trailing_values), random.choice(trailing_values)])
return value.decode('utf8')
| apache-2.0 |
mtbc/openmicroscopy | components/tools/OmeroPy/src/omero/plugins/cecog.py | 11 | 6684 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
| gpl-2.0 |
yordan-desta/QgisIns | python/plugins/processing/gui/ScriptEditorDialog.py | 2 | 7417 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptDialog.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing.modeler.ModelerUtils import ModelerUtils
__author__ = 'Alexander Bruy'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import codecs
import sys
import json
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qsci import *
from qgis.core import *
from qgis.utils import iface
from processing.gui.ParametersDialog import ParametersDialog
from processing.gui.HelpEditionDialog import HelpEditionDialog
from processing.algs.r.RAlgorithm import RAlgorithm
from processing.algs.r.RUtils import RUtils
from processing.script.ScriptAlgorithm import ScriptAlgorithm
from processing.script.ScriptUtils import ScriptUtils
from processing.ui.ui_DlgScriptEditor import Ui_DlgScriptEditor
import processing.resources_rc
class ScriptEditorDialog(QDialog, Ui_DlgScriptEditor):
SCRIPT_PYTHON = 0
SCRIPT_R = 1
hasChanged = False
def __init__(self, algType, alg):
QDialog.__init__(self)
self.setupUi(self)
self.setWindowFlags(Qt.WindowMinimizeButtonHint |
Qt.WindowMaximizeButtonHint |
Qt.WindowCloseButtonHint)
# Set icons
self.btnSave.setIcon(
QgsApplication.getThemeIcon('/mActionFileSave.svg'))
self.btnSaveAs.setIcon(
QgsApplication.getThemeIcon('/mActionFileSaveAs.svg'))
self.btnEditHelp.setIcon(QIcon(':/processing/images/edithelp.png'))
self.btnRun.setIcon(QIcon(':/processing/images/runalgorithm.png'))
self.btnCut.setIcon(QgsApplication.getThemeIcon('/mActionEditCut.png'))
self.btnCopy.setIcon(
QgsApplication.getThemeIcon('/mActionEditCopy.png'))
self.btnPaste.setIcon(
QgsApplication.getThemeIcon('/mActionEditPaste.png'))
self.btnUndo.setIcon(QgsApplication.getThemeIcon('/mActionUndo.png'))
self.btnRedo.setIcon(QgsApplication.getThemeIcon('/mActionRedo.png'))
# Connect signals and slots
self.btnSave.clicked.connect(self.save)
self.btnSaveAs.clicked.connect(self.saveAs)
self.btnEditHelp.clicked.connect(self.editHelp)
self.btnRun.clicked.connect(self.runAlgorithm)
self.btnCut.clicked.connect(self.editor.cut)
self.btnCopy.clicked.connect(self.editor.copy)
self.btnPaste.clicked.connect(self.editor.paste)
self.btnUndo.clicked.connect(self.editor.undo)
self.btnRedo.clicked.connect(self.editor.redo)
self.editor.textChanged.connect(lambda: self.setHasChanged(True))
self.alg = alg
self.algType = algType
if self.alg is not None:
self.filename = self.alg.descriptionFile
self.editor.setText(self.alg.script)
else:
self.filename = None
self.update = False
self.help = None
self.setHasChanged(False)
self.editor.setLexerType(self.algType)
def editHelp(self):
if self.alg is None:
if self.algType == self.SCRIPT_PYTHON:
alg = ScriptAlgorithm(None, unicode(self.editor.text()))
elif self.algType == self.SCRIPT_R:
alg = RAlgorithm(None, unicode(self.editor.text()))
else:
alg = self.alg
dlg = HelpEditionDialog(alg)
dlg.exec_()
# We store the description string in case there were not saved
# because there was no filename defined yet
if self.alg is None and dlg.descriptions:
self.help = dlg.descriptions
def save(self):
self.saveScript(False)
def saveAs(self):
self.saveScript(True)
def saveScript(self, saveAs):
if self.filename is None or saveAs:
if self.algType == self.SCRIPT_PYTHON:
scriptDir = ScriptUtils.scriptsFolder()
filterName = self.tr('Python scripts (*.py)')
elif self.algType == self.SCRIPT_R:
scriptDir = RUtils.RScriptsFolder()
filterName = self.tr('Processing R script (*.rsx)')
self.filename = unicode(QFileDialog.getSaveFileName(self,
self.tr('Save script'), scriptDir,
filterName))
if self.filename:
if self.algType == self.SCRIPT_PYTHON \
and not self.filename.lower().endswith('.py'):
self.filename += '.py'
if self.algType == self.SCRIPT_R \
and not self.filename.lower().endswith('.rsx'):
self.filename += '.rsx'
text = unicode(self.editor.text())
if self.alg is not None:
self.alg.script = text
try:
with codecs.open(self.filename, 'w', encoding='utf-8') as fout:
fout.write(text)
except IOError:
QMessageBox.warning(self, self.tr('I/O error'),
self.tr('Unable to save edits. Reason:\n %s')
% unicode(sys.exc_info()[1]))
return
self.update = True
# If help strings were defined before saving the script for
# the first time, we do it here
if self.help:
with open(self.filename + '.help', 'w') as f:
json.dump(self.help, f)
self.help = None
self.setHasChanged(False)
else:
self.filename = None
def setHasChanged(self, hasChanged):
self.hasChanged = hasChanged
self.btnSave.setEnabled(hasChanged)
def runAlgorithm(self):
if self.algType == self.SCRIPT_PYTHON:
alg = ScriptAlgorithm(None, unicode(self.editor.text()))
alg.provider = ModelerUtils.providers['script']
if self.algType == self.SCRIPT_R:
alg = RAlgorithm(None, unicode(self.editor.text()))
alg.provider = ModelerUtils.providers['r']
dlg = alg.getCustomParametersDialog()
if not dlg:
dlg = ParametersDialog(alg)
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
| gpl-2.0 |
miki725/django-sub-query | sub_query/db/models/sql/compiler.py | 2 | 2389 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from django.contrib.gis.db.models.sql.compiler import * # noqa
class SubQueryGeoSQLCompiler(GeoSQLCompiler):
def __init__(self, *args, **kwargs):
super(SubQueryGeoSQLCompiler, self).__init__(*args, **kwargs)
self.is_subquery = False
def get_ordering(self):
if hasattr(self, '_get_ordering'):
values = self._get_ordering
else:
values = self._get_ordering = super(SubQueryGeoSQLCompiler, self).get_ordering()
ordering, o_params, ordering_group_by = values
if self.is_subquery:
ordering = []
return ordering, o_params, ordering_group_by
def pre_sql_setup(self):
if hasattr(self, '_pre_sql_setup'):
return self._pre_sql_setup
self._pre_sql_setup = super(SubQueryGeoSQLCompiler, self).pre_sql_setup()
return self._pre_sql_setup
def get_columns(self, with_aliases=False):
if hasattr(self, '_get_columns'):
return self._get_columns
self._get_columns = super(SubQueryGeoSQLCompiler, self).get_columns(with_aliases)
return self._get_columns
def as_sql(self, with_limits=True, with_col_aliases=False):
# these calls are required in order to get ordering columns
self.pre_sql_setup()
self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
self.is_subquery = False
if self.query.distinct and ordering:
distinct_ordering_pairs = list(zip(distinct_fields, ordering))
if not all(map(lambda i: i[1].startswith(i[0]), distinct_ordering_pairs)):
self.is_subquery = True
sql, params = super(SubQueryGeoSQLCompiler, self).as_sql(
with_limits=with_limits, with_col_aliases=with_col_aliases
)
if self.is_subquery:
sql = ' '.join(filter(None, [
'SELECT',
'*',
'FROM (',
'{}'.format(sql),
')',
'"{}"'.format(self.query.model._meta.db_table),
'ORDER BY',
'{}'.format(', '.join(ordering)),
] + o_params))
self.is_subquery = False
return sql, params
| mit |
luisgg/iteexe | exe/export/singlepage.py | 1 | 6261 | # ===========================================================================
# eXe
# Copyright 2004-2005, University of Auckland
# Copyright 2004-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
This class transforms an eXe node into a page on a single page website
"""
import logging
import re
from cgi import escape
from urllib import quote
from exe.webui.blockfactory import g_blockFactory
from exe.engine.error import Error
from exe.engine.path import Path
from exe.export.pages import Page, uniquifyNames
from exe.webui import common
from exe import globals as G
log = logging.getLogger(__name__)
# ===========================================================================
class SinglePage(Page):
"""
This class transforms an eXe node into a page on a single page website
"""
def save(self, filename, for_print=0):
"""
Save page to a file.
'outputDir' is the directory where the filenames will be saved
(a 'path' instance)
"""
outfile = open(filename, "wb")
outfile.write(self.render(self.node.package,for_print).encode('utf8'))
outfile.close()
def render(self, package, for_print=0):
"""
Returns an XHTML string rendering this page.
"""
html = self.renderHeader(package.title, for_print)
if for_print:
# include extra onload bit:
html += u'<body onload="print_page()">\n'
else:
html += u"<body>\n"
html += u"<div id=\"content\">\n"
html += u"<div id=\"header\">\n"
html += "<h1>"+escape(package.title)+"</h1>"
html += u"</div>\n"
html += u"<div id=\"main\">\n"
html += self.renderNode(package.root, 1)
html += u"</div>\n"
html += self.renderLicense()
html += self.renderFooter()
html += u"</div>\n"
html += u"</body></html>\n"
# JR: Eliminamos los atributos de las ecuaciones
aux = re.compile("exe_math_latex=\"[^\"]*\"")
html = aux.sub("", html)
aux = re.compile("exe_math_size=\"[^\"]*\"")
html = aux.sub("", html)
#JR: Cambio la ruta de los enlaces del glosario y el &
html = html.replace("../../../../../mod/glossary", "../../../../mod/glossary")
html = html.replace("&concept", "&concept")
return html
def renderHeader(self, name, for_print=0):
"""
Returns an XHTML string for the header of this page.
"""
html = u"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
html += u'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 '
html += u'Transitional//EN" '
html += u'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
lenguaje = G.application.config.locale
html += u"<html lang=\"" + lenguaje + "\" xml:lang=\"" + lenguaje + "\" xmlns=\"http://www.w3.org/1999/xhtml\">\n"
html += u"<head>\n"
html += u"<style type=\"text/css\">\n"
html += u"@import url(base.css);\n"
html += u"@import url(content.css);\n"
html += u"</style>"
html += u"<title>"
html += name
html += "</title>\n"
html += u"<meta http-equiv=\"Content-Type\" content=\"text/html; "
html += u" charset=utf-8\" />\n";
html += u'<script type="text/javascript" src="common.js"></script>\n'
if for_print:
# include extra print-script for onload bit
html += u'<script type="text/javascript">\n'
html += u'function print_page() {\n'
html += u' window.print();\n'
html += u' window.close();\n'
html += u'}\n'
html += u'</script>\n'
html += u"</head>\n"
return html
#JR: modifico esta funcion para que ponga hX en cada nodo
def renderNode(self, node, nivel):
"""
Returns an XHTML string for this node and recurse for the children
"""
html = ""
html += '<div class="node">\n'
html += ' <div class=\"nodeDecoration\">'
html += '<h' + str(nivel) + ' class=\"nodeTitle\">'
html += escape(node.titleLong)
html += '</h' + str(nivel) + '></div>\n'
style = self.node.package.style
for idevice in node.idevices:
html += u' <div class="%s" id="id%s">\n' % (idevice.klass,
idevice.id)
block = g_blockFactory.createBlock(None, idevice)
if not block:
log.critical("Unable to render iDevice.")
raise Error("Unable to render iDevice.")
if hasattr(idevice, "isQuiz"):
html += block.renderJavascriptForWeb()
html += self.processInternalLinks(block.renderView(style))
html += u' </div>\n' # iDevice div
html += '</div>\n' # node div
for child in node.children:
html += self.renderNode(child, nivel+1)
return html
def processInternalLinks(self, html):
"""
take care of any internal links which are in the form of:
href="exe-node:Home:Topic:etc#Anchor"
For this SinglePage Export, go ahead and keep the #Anchor portion,
but remove the 'exe-node:Home:Topic:etc', since it is all
exported into the same file.
"""
return common.removeInternalLinkNodes(html)
| gpl-2.0 |
kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/modules/health_icd10/__init__.py | 1 | 1084 | # -*- coding: utf-8 -*-
##############################################################################
#
# GNU Health: The Free Health and Hospital Information System
# Copyright (C) 2008-2016 Luis Falcon <[email protected]>
# Copyright (C) 2011-2016 GNU Solidario <[email protected]>
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from health_icd10 import *
| gpl-3.0 |
weynsee/chompy | chompy/chom.py | 1 | 10122 | # Copyright 2009 Wayne See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import server
import appuifw
import e32
import chm_filebrowser
import os
import e32dbm
CONF_FILE = u"E:\\Data\\chompy\\chompy.cfg"
INIT_FILE = u"E:\\Data\\chompy\\online.html"
LOCAL_FILE = u"E:\\Data\\chompy\\offline.html"
SEPARATOR = u"/"
INIT_HTML = u"""<html>
<body>
<script type="text/javascript">
location.replace("http://localhost:""" + unicode(server.PORT) + """/%s")
</script>
</body>
</html>
"""
ERROR_TEMPLATE = """<html>
<body>
%s
</body>
</html>
"""
ERR_READING = u"CHM File cannot be read"
ERR_NO_HHC = u"CHM File contains no HHC file"
if not os.path.exists("E:\\Data\\chompy"):
os.makedirs("E:\\Data\\chompy")
class Chompy:
def __init__(self):
self.app_lock = e32.Ao_lock()
self.fb = chm_filebrowser.Filebrowser()
self.load_recent()
self.hhc_callback = e32.ao_callgate(self.load_hhc_viewer)
def load_recent(self):
try:
db = e32dbm.open(CONF_FILE, "c")
recent = db["recent"]
if recent:
self.recent = recent.split(SEPARATOR)
else:
self.recent = []
db.close()
except:
self.recent = []
def save_recent(self):
db = e32dbm.open(CONF_FILE, "wf")
try:
db["recent"] = SEPARATOR.join(self.recent)
finally:
db.close()
def browse(self):
self.fb.show()
selected = self.fb.selected
if selected:
file = unicode(selected)
if file not in self.recent:
self.recent.append(file)
self.update_list(len(self.recent) - 1)
self.open(file)
else:
self.refresh()
def to_display(self, filename):
return unicode(os.path.basename(filename))
def update_list(self, selected_index=None):
if self.recent:
self.lb.set_list(self.get_list(), selected_index)
else:
self.lb.set_list(self.get_list())
def get_list(self):
if self.recent:
return map(self.to_display, self.recent)
else:
return [u"Select file"]
def lb_observe(self, index=None):
if index is None:
index = self.lb.current()
if not self.recent:
self.browse()
else:
self.open(self.recent[index])
def open(self, filename=None):
if filename is None:
filename = self.recent[self.lb.current()]
res = appuifw.popup_menu([u"Offline Mode", u"Online Mode"])
if res == 0:
self.open_offline(filename)
elif res == 1:
self.open_online(filename)
def open_online(self, filename):
server.start(filename, self.hhc_callback)
stall()
def open_offline(self, filename):
stall()
e32.ao_yield()
import pychmlib
try:
chm_file = pychmlib.chm.chm(filename)
except:
appuifw.note(ERR_READING, "error")
self.refresh()
return
try:
hhc_file = chm_file.get_hhc()
if hhc_file:
import hhc
hhc_obj = hhc.parse(hhc_file.get_content())
viewer = HHCViewer(filename, hhc_obj, chm_file.encoding)
viewer.set_as_offline(chm_file)
viewer.show()
self.quit()
else:
appuifw.note(ERR_NO_HHC, "error")
self.refresh()
return
finally:
chm_file.close()
def load_hhc_viewer(self, filename=None, contents=None, encoding=None, error=None):
if not error:
viewer = HHCViewer(filename, contents, encoding)
viewer.show()
server.stop() #if there is an error, no need to stop server
self.exit_screen()
else:
if error == server.ERR_INVALID_CHM:
appuifw.note(ERR_READING, "error")
elif error == server.ERR_NO_HHC:
appuifw.note(ERR_NO_HHC, "error")
self.refresh()
def remove(self):
index = self.lb.current()
del self.recent[index]
self.update_list(index)
def quit(self):
self.save_recent()
self.app_lock.signal()
def refresh(self):
menu_list = [(u"Browse for file", self.browse), (u"Exit", self.quit)]
if self.recent:
menu_list.insert(0, (u"Open", self.open))
menu_list.insert(2, (u"Remove", self.remove))
appuifw.app.menu = menu_list
appuifw.app.exit_key_handler = self.quit
appuifw.app.title = u"chompy"
appuifw.app.body = self.lb
def exit_screen(self):
appuifw.app.menu = []
appuifw.app.exit_key_handler = self.quit
appuifw.app.title = u"chompy"
text = appuifw.Text()
text.set(u"Application can now be safely closed.")
appuifw.app.body = text
def show(self):
self.lb = appuifw.Listbox(self.get_list(), self.lb_observe)
self.refresh()
self.app_lock.wait()
self.lb = None
self.hhc_callback = None
self.fb = None
appuifw.app.body = None
appuifw.app.exit_key_handler = None
class HHCViewer:
def __init__(self, filename, hhc_obj, encoding):
self.title = os.path.basename(filename)
self.chm_file = None
self.current_context = hhc_obj
self.encoding = encoding
self.app_lock = e32.Ao_lock()
def set_as_offline(self, chm_file):
self.chm_file = chm_file
def to_displayable_list(self):
entries = map(lambda x: x.name.decode(self.encoding), self.current_context.children)
if not self.current_context.is_root:
entries.insert(0, u"..")
return entries
def lb_observe(self, index=None):
if index is None:
index = self.lb.current()
if index == 0 and not self.current_context.is_root:
#go back up
selected = self.current_context.parent
else:
selected_index = index
if not self.current_context.is_root:
selected_index -= 1
selected = self.current_context.children[selected_index]
if selected.is_inner_node:
if selected.local:
res = appuifw.popup_menu([u"Load page", u"List contents"])
if res == 0:
self.load_in_viewer(selected.local)
elif res == 1:
self.load_directory(selected)
else:
self.load_directory(selected)
else:
self.load_in_viewer(selected.local)
def load_directory(self, entry):
self.current_context = entry
entries = self.to_displayable_list()
self.lb.set_list(entries)
def load_in_viewer(self, local):
if self.chm_file:
self.load_offline(local)
else:
self.load_online(local)
def load_online(self, local):
self.open_local_html(INIT_FILE, INIT_HTML % local)
def open_local_html(self, filename, content):
html_file = open(filename, "wb")
try:
html_file.write(content)
finally:
html_file.close()
browser_lock = e32.Ao_lock()
viewer = appuifw.Content_handler(browser_lock.signal)
viewer.open(filename)
browser_lock.wait()
def load_offline(self, local):
stall(u"Please wait while page is extracted from the archive...")
e32.ao_yield()
ui = self.chm_file.resolve_object("/"+local)
try:
if ui:
content = ui.get_content()
else:
content = ERROR_TEMPLATE % "Page cannot be found"
except:
content = ERROR_TEMPLATE % "Page could not be displayed"
try:
self.open_local_html(LOCAL_FILE, content)
self.refresh()
except:
self.refresh()
def quit(self):
appuifw.app.exit_key_handler = None
self.app_lock.signal()
def open(self):
self.lb_observe()
def refresh(self):
appuifw.app.menu = [(u"Open", self.open), (u"Exit", self.quit)]
appuifw.app.exit_key_handler = self.quit
appuifw.app.title = self.title
appuifw.app.body = self.lb
def show(self):
entries = self.to_displayable_list()
self.lb = appuifw.Listbox(entries, self.lb_observe)
self.refresh()
self.app_lock.wait()
self.lb = None
appuifw.app.body = None
def stall(msg = u"Please wait while CHM file is being read..."):
appuifw.app.menu = []
appuifw.app.title = u"Loading..."
text = appuifw.Text()
text.style = appuifw.STYLE_ITALIC
text.set(msg)
appuifw.app.body = text
appuifw.app.exit_key_handler = stop_quit
def stop_quit():
appuifw.note(u"Cannot exit until process has finished", "info")
if __name__ == '__main__':
Chompy().show() | apache-2.0 |
TaintTrap/platform_external_chromium | testing/gtest/test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
cubarco/tunasync | tunasync/jobs.py | 1 | 4053 | #!/usr/bin/env python2
# -*- coding:utf-8 -*-
import sh
import sys
from setproctitle import setproctitle
import signal
import Queue
import traceback
def run_job(sema, child_q, manager_q, provider, **settings):
aquired = False
setproctitle("tunasync-{}".format(provider.name))
def before_quit(*args):
provider.terminate()
if aquired:
print("{} release semaphore".format(provider.name))
sema.release()
sys.exit(0)
def sleep_wait(timeout):
try:
msg = child_q.get(timeout=timeout)
if msg == "terminate":
manager_q.put(("CONFIG_ACK", (provider.name, "QUIT")))
return True
except Queue.Empty:
return False
signal.signal(signal.SIGTERM, before_quit)
if provider.delay > 0:
if sleep_wait(provider.delay):
return
max_retry = settings.get("max_retry", 1)
def _real_run(idx=0, stage="job_hook", ctx=None):
"""\
4 stages:
0 -> job_hook, 1 -> set_retry, 2 -> exec_hook, 3 -> exec
"""
assert(ctx is not None)
if stage == "exec":
# exec_job
try:
provider.run(ctx=ctx)
provider.wait()
except sh.ErrorReturnCode:
status = "fail"
else:
status = "success"
return status
elif stage == "set_retry":
# enter stage 3 with retry
for retry in range(max_retry):
status = "syncing"
manager_q.put(("UPDATE", (provider.name, status, ctx)))
print("start syncing {}, retry: {}".format(provider.name, retry))
status = _real_run(idx=0, stage="exec_hook", ctx=ctx)
if status == "success":
break
return status
# job_hooks
elif stage == "job_hook":
if idx == len(provider.hooks):
return _real_run(idx=idx, stage="set_retry", ctx=ctx)
hook = provider.hooks[idx]
hook_before, hook_after = hook.before_job, hook.after_job
status = "pre-syncing"
elif stage == "exec_hook":
if idx == len(provider.hooks):
return _real_run(idx=idx, stage="exec", ctx=ctx)
hook = provider.hooks[idx]
hook_before, hook_after = hook.before_exec, hook.after_exec
status = "syncing"
try:
# print("%s run before_%s, %d" % (provider.name, stage, idx))
hook_before(provider=provider, ctx=ctx)
status = _real_run(idx=idx+1, stage=stage, ctx=ctx)
except Exception:
traceback.print_exc()
status = "fail"
finally:
# print("%s run after_%s, %d" % (provider.name, stage, idx))
# job may break when syncing
if status != "success":
status = "fail"
try:
hook_after(provider=provider, status=status, ctx=ctx)
except Exception:
traceback.print_exc()
return status
while 1:
try:
sema.acquire(True)
except:
break
aquired = True
ctx = {} # put context info in it
ctx['current_dir'] = provider.local_dir
ctx['mirror_name'] = provider.name
status = "pre-syncing"
manager_q.put(("UPDATE", (provider.name, status, ctx)))
try:
status = _real_run(idx=0, stage="job_hook", ctx=ctx)
except Exception:
traceback.print_exc()
status = "fail"
finally:
sema.release()
aquired = False
print("syncing {} finished, sleep {} minutes for the next turn".format(
provider.name, provider.interval
))
manager_q.put(("UPDATE", (provider.name, status, ctx)))
if sleep_wait(timeout=provider.interval * 60):
break
# vim: ts=4 sw=4 sts=4 expandtab
| gpl-3.0 |
SickGear/SickGear | lib/hachoir_py2/parser/archive/cab.py | 2 | 11532 | """
Microsoft Cabinet (CAB) archive.
Author: Victor Stinner, Robert Xiao
Creation date: 31 january 2007
- Microsoft Cabinet SDK
http://msdn2.microsoft.com/en-us/library/ms974336.aspx
"""
from __future__ import absolute_import
from hachoir_py2.parser import Parser
from hachoir_py2.field import (FieldSet, Enum,
CString, String,
UInt8, UInt16, UInt32, Bit, Bits, PaddingBits, NullBits,
DateTimeMSDOS32, RawBytes, CustomFragment)
from hachoir_py2.core.text_handler import textHandler, hexadecimal, filesizeHandler
from hachoir_py2.core.endian import LITTLE_ENDIAN
from hachoir_py2.core.tools import paddingSize
from hachoir_py2.stream import StringInputStream
from hachoir_py2.parser.archive.lzx import LZXStream, lzx_decompress
from hachoir_py2.parser.archive.zlib import DeflateBlock
MAX_NB_FOLDER = 30
COMPRESSION_NONE = 0
COMPRESSION_NAME = {
0: "Uncompressed",
1: "Deflate",
2: "Quantum",
3: "LZX",
}
class Folder(FieldSet):
def createFields(self):
yield UInt32(self, "offset", "Offset to data (from file start)")
yield UInt16(self, "data_blocks", "Number of data blocks which are in this cabinet")
yield Enum(Bits(self, "compr_method", 4, "Compression method"), COMPRESSION_NAME)
if self["compr_method"].value in [2, 3]: # Quantum or LZX use compression level
yield PaddingBits(self, "padding[]", 4)
yield Bits(self, "compr_level", 5, "Compression level")
yield PaddingBits(self, "padding[]", 3)
else:
yield PaddingBits(self, "padding[]", 12)
if self["../flags/has_reserved"].value and self["../reserved_folder_size"].value:
yield RawBytes(self, "reserved_folder", self["../reserved_folder_size"].value, "Per-folder reserved area")
def createDescription(self):
text = "Folder: compression %s" % self["compr_method"].display
if self["compr_method"].value in [2, 3]: # Quantum or LZX use compression level
text += " (level %u: window size %u)" % (self["compr_level"].value, 2 ** self["compr_level"].value)
return text
class CabFileAttributes(FieldSet):
def createFields(self):
yield Bit(self, "readonly")
yield Bit(self, "hidden")
yield Bit(self, "system")
yield Bits(self, "reserved[]", 2)
yield Bit(self, "archive", "Has the file been modified since the last backup?")
yield Bit(self, "exec", "Run file after extraction?")
yield Bit(self, "name_is_utf", "Is the filename using UTF-8?")
yield Bits(self, "reserved[]", 8)
class File(FieldSet):
def createFields(self):
yield filesizeHandler(UInt32(self, "filesize", "Uncompressed file size"))
yield UInt32(self, "folder_offset", "File offset in uncompressed folder")
yield Enum(UInt16(self, "folder_index", "Containing folder ID (index)"), {
0xFFFD: "Folder continued from previous cabinet (real folder ID = 0)",
0xFFFE: "Folder continued to next cabinet (real folder ID = %i)" % (self["../nb_folder"].value - 1),
0xFFFF: "Folder spanning previous, current and next cabinets (real folder ID = 0)"})
yield DateTimeMSDOS32(self, "timestamp")
yield CabFileAttributes(self, "attributes")
if self["attributes/name_is_utf"].value:
yield CString(self, "filename", charset="UTF-8")
else:
yield CString(self, "filename", charset="ASCII")
def createDescription(self):
return "File %s (%s)" % (
self["filename"].display, self["filesize"].display)
class Flags(FieldSet):
static_size = 16
def createFields(self):
yield Bit(self, "has_previous")
yield Bit(self, "has_next")
yield Bit(self, "has_reserved")
yield NullBits(self, "padding", 13)
class DataBlock(FieldSet):
def __init__(self, *args, **kwargs):
FieldSet.__init__(self, *args, **kwargs)
size = (self["size"].value + 8) * 8 # +8 for header values
if self["/flags/has_reserved"].value:
size += self["/reserved_data_size"].value * 8
self._size = size
def createFields(self):
yield textHandler(UInt32(self, "crc32"), hexadecimal)
yield UInt16(self, "size")
yield UInt16(self, "uncompressed_size", "If this is 0, this block is continued in a subsequent cabinet")
if self["/flags/has_reserved"].value and self["/reserved_data_size"].value:
yield RawBytes(self, "reserved_data", self["/reserved_data_size"].value, "Per-datablock reserved area")
compr_method = self.parent.folder["compr_method"].value
if compr_method == 0: # Uncompressed
yield RawBytes(self, "data", self["size"].value, "Folder Data")
self.parent.uncompressed_data += self["data"].value
elif compr_method == 1: # MSZIP
yield String(self, "mszip_signature", 2, "MSZIP Signature (CK)")
yield DeflateBlock(self, "deflate_block", self.parent.uncompressed_data)
padding = paddingSize(self.current_size, 8)
if padding:
yield PaddingBits(self, "padding[]", padding)
self.parent.uncompressed_data = self["deflate_block"].uncomp_data
elif compr_method == 2: # Quantum
yield RawBytes(self, "compr_data", self["size"].value, "Compressed Folder Data")
elif compr_method == 3: # LZX
group = getattr(self.parent.folder, "lzx_group", None)
field = CustomFragment(self, "data", self["size"].value * 8, LZXStream, "LZX data fragment", group)
if group is None:
field.group.args["compr_level"] = self.parent.folder["compr_level"].value
self.parent.folder.lzx_group = field.group
yield field
class FolderParser(Parser):
endian = LITTLE_ENDIAN
def createFields(self):
for file in sorted(self.files, key=lambda x: x["folder_offset"].value):
padding = self.seekByte(file["folder_offset"].value)
if padding:
yield padding
yield RawBytes(self, "file[]", file["filesize"].value, file.description)
class FolderData(FieldSet):
def __init__(self, parent, name, folder, files, *args, **kwargs):
FieldSet.__init__(self, parent, name, *args, **kwargs)
def createInputStream(cis, source=None, **args):
stream = cis(source=source)
tags = args.setdefault("tags", [])
tags.extend(stream.tags)
tags.append(("class", FolderParser))
tags.append(("args", {'files': files}))
for unused in self:
pass
if folder["compr_method"].value == 3: # LZX
self.uncompressed_data = lzx_decompress(self["block[0]/data"].getSubIStream(),
folder["compr_level"].value)
return StringInputStream(self.uncompressed_data, source=source, **args)
self.setSubIStream(createInputStream)
self.files = files
self.folder = folder # Folder fieldset
def createFields(self):
self.uncompressed_data = ""
for index in xrange(self.folder["data_blocks"].value):
block = DataBlock(self, "block[]")
for i in block:
pass
yield block
class CabFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = "MSCF"
PARSER_TAGS = {
"id": "cab",
"category": "archive",
"file_ext": ("cab",),
"mime": (u"application/vnd.ms-cab-compressed",),
"magic": ((MAGIC, 0),),
"min_size": 1 * 8, # header + file entry
"description": "Microsoft Cabinet archive"
}
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid magic"
if self["major_version"].value != 1 or self["minor_version"].value != 3:
return "Unknown version (%i.%i)" % (self["major_version"].value, self["minor_version"].value)
if not (1 <= self["nb_folder"].value <= MAX_NB_FOLDER):
return "Invalid number of folder (%s)" % self["nb_folder"].value
return True
def createFields(self):
yield String(self, "magic", 4, "Magic (MSCF)", charset="ASCII")
yield textHandler(UInt32(self, "hdr_checksum", "Header checksum (0 if not used)"), hexadecimal)
yield filesizeHandler(UInt32(self, "filesize", "Cabinet file size"))
yield textHandler(UInt32(self, "fld_checksum", "Folders checksum (0 if not used)"), hexadecimal)
yield UInt32(self, "off_file", "Offset of first file")
yield textHandler(UInt32(self, "files_checksum", "Files checksum (0 if not used)"), hexadecimal)
yield UInt8(self, "minor_version", "Minor version (should be 3)")
yield UInt8(self, "major_version", "Major version (should be 1)")
yield UInt16(self, "nb_folder", "Number of folders")
yield UInt16(self, "nb_files", "Number of files")
yield Flags(self, "flags")
yield UInt16(self, "setid")
yield UInt16(self, "cabinet_serial", "Zero-based cabinet number")
if self["flags/has_reserved"].value:
yield UInt16(self, "reserved_header_size", "Size of per-cabinet reserved area")
yield UInt8(self, "reserved_folder_size", "Size of per-folder reserved area")
yield UInt8(self, "reserved_data_size", "Size of per-datablock reserved area")
if self["reserved_header_size"].value:
yield RawBytes(self, "reserved_header", self["reserved_header_size"].value, "Per-cabinet reserved area")
if self["flags/has_previous"].value:
yield CString(self, "previous_cabinet", "File name of previous cabinet", charset="ASCII")
yield CString(self, "previous_disk", "Description of disk/media on which previous cabinet resides",
charset="ASCII")
if self["flags/has_next"].value:
yield CString(self, "next_cabinet", "File name of next cabinet", charset="ASCII")
yield CString(self, "next_disk", "Description of disk/media on which next cabinet resides", charset="ASCII")
folders = []
files = []
for index in xrange(self["nb_folder"].value):
folder = Folder(self, "folder[]")
yield folder
folders.append(folder)
for index in xrange(self["nb_files"].value):
file = File(self, "file[]")
yield file
files.append(file)
folders = sorted(enumerate(folders), key=lambda x: x[1]["offset"].value)
for i in xrange(len(folders)):
index, folder = folders[i]
padding = self.seekByte(folder["offset"].value)
if padding:
yield padding
files = []
for file in files:
if file["folder_index"].value == index:
files.append(file)
if i + 1 == len(folders):
size = (self.size // 8) - folder["offset"].value
else:
size = (folders[i + 1][1]["offset"].value) - folder["offset"].value
yield FolderData(self, "folder_data[%i]" % index, folder, files, size=size * 8)
end = self.seekBit(self.size, "endraw")
if end:
yield end
def createContentSize(self):
return self["filesize"].value * 8
| gpl-3.0 |
topliceanu/learn | python/python_koans/python2/koans/about_lists.py | 1 | 3314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutArrays in the Ruby Koans
#
from runner.koan import *
class AboutLists(Koan):
def test_creating_lists(self):
empty_list = list()
self.assertEqual(list, type(empty_list))
self.assertEqual(0, len(empty_list))
def test_list_literals(self):
nums = list()
self.assertEqual([], nums, 'empty lists should equal')
nums[0:] = [1]
self.assertEqual([1], nums)
nums[1:] = [2]
self.assertEqual([1, 2], nums)
nums.append(333)
self.assertEqual([1, 2, 333], nums)
def test_accessing_list_elements(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual('peanut', noms[0])
self.assertEqual('jelly', noms[3])
self.assertEqual('jelly', noms[-1])
self.assertEqual('butter', noms[-3])
def test_slicing_lists(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(['peanut'], noms[0:1])
self.assertEqual(['peanut', 'butter'], noms[0:2])
self.assertEqual([], noms[2:2])
self.assertEqual(['and', 'jelly'], noms[2:20])
self.assertEqual([], noms[4:0])
self.assertEqual([], noms[4:100])
self.assertEqual([], noms[5:0])
def test_slicing_to_the_edge(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(['and', 'jelly'], noms[2:])
self.assertEqual(['peanut', 'butter'], noms[:2])
def test_lists_and_ranges(self):
self.assertEqual(list, type(range(5)))
self.assertEqual([0, 1, 2, 3, 4], range(5))
self.assertEqual([5, 6, 7, 8], range(5, 9))
def test_ranges_with_steps(self):
self.assertEqual([0, 2, 4, 6], range(0, 8, 2))
self.assertEqual([1, 4, 7], range(1, 8, 3))
self.assertEqual([5, 1, -3], range(5, -7, -4))
self.assertEqual([5, 1, -3, -7], range(5, -8, -4))
def test_insertions(self):
knight = ['you', 'shall', 'pass']
knight.insert(2, 'not')
self.assertEqual(['you', 'shall', 'not', 'pass'], knight)
knight.insert(0, 'Arthur')
self.assertEqual(['Arthur', 'you', 'shall', 'not', 'pass'], knight)
def test_popping_lists(self):
stack = [10, 20, 30, 40]
stack.append('last')
self.assertEqual([10, 20, 30, 40, 'last'], stack)
popped_value = stack.pop()
self.assertEqual('last', popped_value)
self.assertEqual([10, 20, 30, 40], stack)
popped_value = stack.pop(1)
self.assertEqual(20, popped_value)
self.assertEqual([10, 30, 40], stack)
# Notice that there is a "pop" but no "push" in python?
# Part of the Python philosophy is that there ideally should be one and
# only one way of doing anything. A 'push' is the same as an 'append'.
# To learn more about this try typing "import this" from the python
# console... ;)
def test_use_deques_for_making_queues(self):
from collections import deque
queue = deque([1, 2])
queue.append('last')
self.assertEqual([1, 2, 'last'], list(queue))
popped_value = queue.popleft()
self.assertEqual(1, popped_value)
self.assertEqual([2, 'last'], list(queue))
| mit |
ghevcoul/pycraft | pycraft/window.py | 1 | 10596 | # python imports
import math
# 3rd party imports
import pyglet.clock
import pyglet.graphics
import pyglet.window
from pyglet.gl import * # noqa
from pyglet.window import key, mouse
from pycraft.util import sectorize, cube_vertices, normalize
from pycraft.objects.block import get_block
from pycraft.configuration import ConfigurationLoader
# TICKS_PER_SEC = 60
# Convenience list of num keys.
NUMERIC_KEYS = [
key._1, key._2, key._3, key._4, key._5,
key._6, key._7, key._8, key._9, key._0
]
class Window(pyglet.window.Window):
def __init__(self, ticks_ps, *args, **kwargs):
super(Window, self).__init__(*args, **kwargs)
self.set_world(None)
self.set_player(None)
self.ticks_per_second = ticks_ps
# The crosshairs at the center of the screen.
self.reticle = None
# The label that is displayed in the top left of the canvas.
self.game_info_label = pyglet.text.Label(
'', font_name='Arial', font_size=18,
x=10, y=self.height - 10, anchor_x='left', anchor_y='top',
color=(0, 0, 0, 255))
self.current_item_label = pyglet.text.Label(
'', font_name='Arial', font_size=18,
x=self.width - 10, y=10, anchor_x='right', anchor_y='bottom',
color=(0, 0, 0, 255))
# Whether or not the window exclusively captures the mouse.
self.set_exclusive_mouse(False)
# This call schedules the `update()` method to be called
# TICKS_PER_SEC. This is the main game event loop.
# pyglet.clock.schedule_interval(self.update, 1.0 / TICKS_PER_SEC)
pyglet.clock.schedule_interval(self.update, 1.0 / self.ticks_per_second)
config_loader = ConfigurationLoader()
self.config_data = config_loader.load_configuration_file()
def set_world(self, world):
self.world = world
def set_player(self, player):
self.player = player
def set_exclusive_mouse(self, exclusive):
"""If `exclusive` is True, the game will capture the mouse, if False the
game will ignore the mouse.
"""
super(Window, self).set_exclusive_mouse(exclusive)
self.exclusive = exclusive
def on_mouse_press(self, x, y, button, modifiers):
"""Called when a mouse button is pressed. See pyglet docs for button
amd modifier mappings.
Parameters
----------
x, y : int
The coordinates of the mouse click. Always center of the screen if
the mouse is captured.
button : int
Number representing mouse button that was clicked. 1 = left button,
4 = right button.
modifiers : int
Number representing any modifying keys that were pressed when the
mouse button was clicked.
"""
if self.exclusive:
vector = self.player.get_sight_vector()
block, previous = self.world.hit_test(self.player.position, vector)
if (button == mouse.RIGHT) or \
((button == mouse.LEFT) and (modifiers & key.MOD_CTRL)):
# ON OSX, control + left click = right click.
player_x, player_y, player_z = normalize(self.player.position)
if previous and self.player.block and \
previous != (player_x, player_y, player_z) and \
previous != (player_x, player_y - 1, player_z):
# make sure the block isn't in the players head or feet
self.world.add_block(previous, get_block(self.player.block))
self.player.adjust_inventory(self.player.block)
elif button == pyglet.window.mouse.LEFT and block:
texture = self.world.objects[block]
if texture.hit_and_destroy():
self.world.remove_block(block)
else:
self.set_exclusive_mouse(True)
def on_mouse_motion(self, x, y, dx, dy):
"""Called when the player moves the mouse.
Parameters
----------
x, y : int
The coordinates of the mouse click. Always center of the screen if
the mouse is captured.
dx, dy : float
The movement of the mouse.
"""
if self.exclusive:
m = 0.15
x, y = self.player.rotation
x, y = x + dx * m, y + dy * m
y = max(-90, min(90, y))
self.player.rotation = (x, y)
def on_key_press(self, symbol, modifiers):
"""Called when the player presses a key. See pyglet docs for key
mappings.
Parameters
----------
symbol : int
Number representing the key that was pressed.
modifiers : int
Number representing any modifying keys that were pressed.
"""
if symbol == getattr(key, self.config_data['controls']['forward']):
self.player.strafe_forward()
elif symbol == getattr(key, self.config_data['controls']['backward']):
self.player.strafe_backward()
elif symbol == getattr(key, self.config_data['controls']['right']):
self.player.strafe_right()
elif symbol == getattr(key, self.config_data['controls']['left']):
self.player.strafe_left()
elif symbol == getattr(key, self.config_data['controls']['jump']):
self.player.jump()
elif symbol == getattr(key, self.config_data['controls']['down']):
self.player.strafe_down()
elif symbol == key.ESCAPE:
self.set_exclusive_mouse(False)
elif symbol == getattr(key, self.config_data['controls']['fly']):
self.player.fly()
elif symbol in NUMERIC_KEYS:
self.player.switch_inventory(symbol - NUMERIC_KEYS[0])
def on_key_release(self, symbol, modifiers):
"""Called when the player releases a key. See pyglet docs for key
mappings.
Parameters
----------
symbol : int
Number representing the key that was pressed.
modifiers : int
Number representing any modifying keys that were pressed.
"""
if symbol == getattr(key, self.config_data['controls']['forward']):
self.player.strafe_backward()
elif symbol == getattr(key, self.config_data['controls']['backward']):
self.player.strafe_forward()
elif symbol == getattr(key, self.config_data['controls']['left']):
self.player.strafe_right()
elif symbol == getattr(key, self.config_data['controls']['right']):
self.player.strafe_left()
elif symbol == getattr(key, self.config_data['controls']['jump']):
self.player.strafe_down()
elif symbol == getattr(key, self.config_data['controls']['down']):
self.player.strafe_up()
def on_resize(self, width, height):
"""Called when the window is resized to a new `width` and `height`."""
# label
self.game_info_label.y = height - 10
self.current_item_label.x = width - 10
# reticle
if self.reticle:
self.reticle.delete()
x, y = self.width // 2, self.height // 2
n = 10
self.reticle = pyglet.graphics.vertex_list(
4,
('v2i', (x - n, y, x + n, y, x, y - n, x, y + n))
)
def on_draw(self):
"""Called by pyglet to draw the canvas."""
self.clear()
self.set_3d()
glColor3d(1, 1, 1)
self.world.start_shader()
self.world.batch.draw()
self.world.stop_shader()
self.draw_focused_block()
self.set_2d()
self.draw_labels()
self.draw_reticle()
def set_3d(self):
"""Configure OpenGL to draw in 3d."""
width, height = self.get_size()
glEnable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(65.0, width / float(height), 0.1, 60.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
x, y = self.player.rotation
glRotatef(x, 0, 1, 0)
glRotatef(-y, math.cos(math.radians(x)), 0, math.sin(math.radians(x)))
x, y, z = self.player.position
glTranslatef(-x, -y, -z)
def set_2d(self):
"""Configure OpenGL to draw in 2d."""
width, height = self.get_size()
glDisable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, 0, height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def update(self, dt):
"""This method is scheduled to be called repeatedly by the pyglet
clock.
Parameters
----------
dt : float
The change in time since the last call.
"""
self.world.process_queue(self.ticks_per_second)
sector = sectorize(self.player.position)
if sector != self.world.sector:
self.world.change_sectors(self.world.sector, sector)
if self.world.sector is None:
self.world.process_entire_queue()
self.world.sector = sector
m = 8
dt = min(dt, 0.2)
for _ in range(m):
self.player.update(dt / m, self.world.objects)
def draw_focused_block(self):
"""Draw black edges around the block that is currently under the
crosshairs.
"""
vector = self.player.get_sight_vector()
block = self.world.hit_test(self.player.position, vector)[0]
if block:
x, y, z = block
vertex_data = cube_vertices(x, y, z, 0.51)
glColor3d(0, 0, 0)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
pyglet.graphics.draw(24, GL_QUADS, ('v3f/static', vertex_data))
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
def draw_labels(self):
"""Draw the label in the top left of the screen."""
x, y, z = self.player.position
self.game_info_label.text = '%02d (%.2f, %.2f, %.2f) %d / %d' % (
pyglet.clock.get_fps(), x, y, z,
len(self.world._shown), len(self.world.objects))
self.game_info_label.draw()
self.current_item_label.text = self.player.block if self.player.block else "No items in inventory"
self.current_item_label.draw()
def draw_reticle(self):
"""Draw the crosshairs in the center of the screen."""
glColor3d(0, 0, 0)
self.reticle.draw(GL_LINES)
| mit |
jcfrank/myrepo | tests/test_git_config.py | 90 | 1229 | import os
import unittest
import git_config
def fixture(*paths):
"""Return a path relative to test/fixtures.
"""
return os.path.join(os.path.dirname(__file__), 'fixtures', *paths)
class GitConfigUnitTest(unittest.TestCase):
"""Tests the GitConfig class.
"""
def setUp(self):
"""Create a GitConfig object using the test.gitconfig fixture.
"""
config_fixture = fixture('test.gitconfig')
self.config = git_config.GitConfig(config_fixture)
def test_GetString_with_empty_config_values(self):
"""
Test config entries with no value.
[section]
empty
"""
val = self.config.GetString('section.empty')
self.assertEqual(val, None)
def test_GetString_with_true_value(self):
"""
Test config entries with a string value.
[section]
nonempty = true
"""
val = self.config.GetString('section.nonempty')
self.assertEqual(val, 'true')
def test_GetString_from_missing_file(self):
"""
Test missing config file
"""
config_fixture = fixture('not.present.gitconfig')
config = git_config.GitConfig(config_fixture)
val = config.GetString('empty')
self.assertEqual(val, None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
glorizen/nupic | examples/opf/clients/hotgym/prediction/one_gym/run.py | 21 | 5172 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.predictionmetricsmanager import MetricsManager
import nupic_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
"NOTE: You must run ./swarm.py before this, because model parameters\n"
"are required to run NuPIC.\n"
)
GYM_NAME = "rec-center-hourly" # or use "rec-center-every-15m-large"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"
_METRIC_SPECS = (
MetricSpec(field='kw_energy_consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
)
def createModel(modelParams):
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "kw_energy_consumption"})
return model
def getModelParamsFromName(gymName):
importName = "model_params.%s_model_params" % (
gymName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% gymName)
return importedModelParams
def runIoThroughNupic(inputData, model, gymName, plot):
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_output.NuPICPlotOutput([gymName])
else:
output = nupic_output.NuPICFileOutput([gymName])
metricsManager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(),
model.getInferenceType())
counter = 0
for row in csvReader:
counter += 1
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
consumption = float(row[1])
result = model.run({
"timestamp": timestamp,
"kw_energy_consumption": consumption
})
result.metrics = metricsManager.update(result)
if counter % 100 == 0:
print "Read %i lines..." % counter
print ("After %i records, 1-step altMAPE=%f" % (counter,
result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='altMAPE':steps=1:window=1000:"
"field=kw_energy_consumption"]))
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
output.write([timestamp], [consumption], [prediction])
inputFile.close()
output.close()
def runModel(gymName, plot=False):
print "Creating model from %s..." % gymName
model = createModel(getModelParamsFromName(gymName))
inputData = "%s/%s.csv" % (DATA_DIR, gymName.replace(" ", "_"))
runIoThroughNupic(inputData, model, gymName, plot)
if __name__ == "__main__":
print DESCRIPTION
plot = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
runModel(GYM_NAME, plot=plot)
| agpl-3.0 |
eternity-group/eternity | qa/rpc-tests/listtransactions.py | 1 | 10134 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(4, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
# rbf is disabled in Eternity Core
# self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| mit |
wkfwkf/statsmodels | examples/run_all.py | 34 | 1740 | """run all examples to make sure we don't get an exception
Note:
If an example contaings plt.show(), then all plot windows have to be closed
manually, at least in my setup.
uncomment plt.show() to show all plot windows
"""
from __future__ import print_function
from statsmodels.compat import input
stop_on_error = True
filelist = ['example_glsar.py', 'example_wls.py', 'example_gls.py',
'example_glm.py', 'example_ols_tftest.py', # 'example_rpy.py',
'example_ols.py', 'example_rlm.py',
'example_discrete.py', 'example_predict.py',
'example_ols_table.py',
# time series
'tsa/ex_arma2.py', 'tsa/ex_dates.py']
if __name__ == '__main__':
#temporarily disable show
import matplotlib.pyplot as plt
plt_show = plt.show
def noop(*args):
pass
plt.show = noop
msg = """Are you sure you want to run all of the examples?
This is done mainly to check that they are up to date.
(y/n) >>> """
cont = input(msg)
if 'y' in cont.lower():
for run_all_f in filelist:
try:
print('\n\nExecuting example file', run_all_f)
print('-----------------------' + '-' * len(run_all_f))
exec(open(run_all_f).read())
except:
# f might be overwritten in the executed file
print('**********************' + '*' * len(run_all_f))
print('ERROR in example file', run_all_f)
print('**********************' + '*' * len(run_all_f))
if stop_on_error:
raise
# reenable show after closing windows
plt.close('all')
plt.show = plt_show
plt.show()
| bsd-3-clause |
kavasoglu/ocl_web | ocl_web/apps/collections/views.py | 1 | 33279 | """
OCL Collection views
"""
import logging
import re
import requests
import simplejson as json
from apps.core.utils import SearchStringFormatter
from apps.core.views import UserOrOrgMixin
from braces.views import LoginRequiredMixin
from django.contrib import messages
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import (HttpResponseRedirect, Http404)
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView, View
from django.views.generic.edit import FormView
from libs.ocl import OclApi, OclSearch, OclConstants
from .forms import (CollectionCreateForm, CollectionEditForm,
CollectionDeleteForm, CollectionVersionAddForm, CollectionVersionsEditForm)
logger = logging.getLogger('oclweb')
class CollectionsBaseView(UserOrOrgMixin):
def get_args(self):
super(CollectionsBaseView, self).get_args()
self.collection_id = self.kwargs.get('collection')
self.collection_version_id = self.kwargs.get('collection_version')
def get_collection_data(self, owner_type, owner_id, collection_id, field_name,
collection_version_id=None, search_params=None):
searcher = OclSearch(search_type=field_name,
search_scope=OclConstants.SEARCH_SCOPE_RESTRICTED,
params=search_params)
api = OclApi(self.request, debug=True, facets=True)
if collection_version_id:
search_response = api.get(
owner_type, owner_id, 'collections', collection_id,
collection_version_id, field_name,
params=searcher.search_params)
else:
search_response = api.get(
owner_type, owner_id, 'collections', collection_id, field_name,
params=searcher.search_params)
if search_response.status_code == 404:
raise Http404
elif search_response.status_code != 200:
search_response.raise_for_status()
# Process the results
searcher.process_search_results(
search_type=searcher.search_type, search_response=search_response,
search_params=search_params)
return searcher
def get_collection_versions(self, owner_type, owner_id, collection_id, search_params=None):
# Perform the search
searcher = OclSearch(search_type=OclConstants.RESOURCE_NAME_COLLECTION_VERSIONS,
search_scope=OclConstants.SEARCH_SCOPE_RESTRICTED,
params=search_params)
api = OclApi(self.request, debug=True, facets=False)
search_response = api.get(owner_type, owner_id, 'collections', collection_id, 'versions',
params=searcher.search_params)
if search_response.status_code == 404:
raise Http404
elif search_response.status_code != 200:
search_response.raise_for_status()
# Process the results
searcher.process_search_results(
search_type=searcher.search_type, search_response=search_response,
search_params=search_params)
return searcher
class CollectionReferencesView(CollectionsBaseView, TemplateView):
""" collection concept view. """
template_name = "collections/collection_references.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionReferencesView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
params = self.request.GET.copy()
params['verbose'] = 'true'
params['limit'] = '10'
versions = self.get_collection_versions(
self.owner_type, self.owner_id, self.collection_id,
search_params={'limit': '0'})
searcher = self.get_collection_data(
self.owner_type, self.owner_id, self.collection_id, 'references',
collection_version_id=self.collection_version_id,
search_params=params)
search_results_paginator = Paginator(range(searcher.num_found), searcher.num_per_page)
search_results_current_page = search_results_paginator.page(searcher.current_page)
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['selected_tab'] = 'References'
context['collection'] = collection
context['references'] = searcher.search_results
context['results'] = searcher.search_results
context['current_page'] = search_results_current_page
context['pagination_url'] = self.request.get_full_path()
context['search_query'] = searcher.get_query()
context['search_filters'] = searcher.search_filter_list
context['search_sort_options'] = searcher.get_sort_options()
context['search_sort'] = self.request.GET.get('search_sort', 'ASC')
context['search_facets_json'] = searcher.search_facets
context['search_filters_debug'] = str(searcher.search_filter_list)
context['collection_versions'] = versions.search_results
return context
class CollectionMappingsView(CollectionsBaseView, TemplateView):
""" collection concept view. """
template_name = "collections/collection_mappings.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionMappingsView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
# to fetch all , set limit to 0
params = self.request.GET.copy()
params['verbose'] = 'true'
params['limit'] = '10'
versions = self.get_collection_versions(
self.owner_type, self.owner_id, self.collection_id,
search_params={'limit': '0'})
searcher = self.get_collection_data(
self.owner_type, self.owner_id, self.collection_id, OclConstants.RESOURCE_NAME_MAPPINGS,
collection_version_id=self.collection_version_id,
search_params=params)
search_results_paginator = Paginator(range(searcher.num_found), searcher.num_per_page)
search_results_current_page = search_results_paginator.page(searcher.current_page)
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['selected_tab'] = 'Mappings'
context['collection'] = collection
context['collection_version'] = self.collection_version_id
context['results'] = searcher.search_results
context['current_page'] = search_results_current_page
context['pagination_url'] = self.request.get_full_path()
context['search_query'] = searcher.get_query()
context['search_filters'] = searcher.search_filter_list
context['search_sort_options'] = searcher.get_sort_options()
context['search_sort'] = searcher.get_sort()
context['search_facets_json'] = searcher.search_facets
context['search_filters_debug'] = str(searcher.search_filter_list)
context['collection_versions'] = versions.search_results
return context
def get(self, request, *args, **kwargs):
if request.is_ajax():
self.get_args()
searcher = self.get_collection_data(
self.owner_type, self.owner_id, self.collection_id,
OclConstants.RESOURCE_NAME_MAPPINGS,
collection_version_id=self.collection_version_id,
search_params=self.request.GET
)
response = {
'items': searcher.search_results,
'per_page': searcher.num_per_page,
'total': searcher.num_found,
}
return HttpResponse(
json.dumps(response),
content_type="application/json"
)
return super(CollectionMappingsView, self).get(self, *args, **kwargs)
class CollectionConceptsView(CollectionsBaseView, TemplateView):
""" collection concept view. """
template_name = "collections/collection_concepts.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionConceptsView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
params = self.request.GET.copy()
params['verbose'] = 'true'
params['limit'] = '10'
# to fetch all , set limit to 0
versions = self.get_collection_versions(
self.owner_type, self.owner_id, self.collection_id,
search_params={'limit': '0'})
searcher = self.get_collection_data(
self.owner_type, self.owner_id, self.collection_id, OclConstants.RESOURCE_NAME_CONCEPTS,
collection_version_id=self.collection_version_id,
search_params=params)
search_results_paginator = Paginator(range(searcher.num_found), searcher.num_per_page)
search_results_current_page = search_results_paginator.page(searcher.current_page)
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['selected_tab'] = 'Concepts'
context['collection'] = collection
context['collection_version'] = self.collection_version_id
context['results'] = searcher.search_results
context['current_page'] = search_results_current_page
context['pagination_url'] = self.request.get_full_path()
context['search_query'] = self.search_string if hasattr(self, 'search_string') else ''
context['search_filters'] = searcher.search_filter_list
context['search_sort_options'] = searcher.get_sort_options()
context['search_sort'] = searcher.get_sort()
context['search_facets_json'] = searcher.search_facets
context['search_filters_debug'] = str(searcher.search_filter_list)
context['collection_versions'] = versions.search_results
return context
def get(self, request, *args, **kwargs):
self.search_string = request.GET.get('q', '')
SearchStringFormatter.add_wildcard(request)
if request.is_ajax():
self.get_args()
# Load the concepts in this collection, applying search parameters
searcher = self.get_collection_data(
self.owner_type, self.owner_id, self.collection_id,
OclConstants.RESOURCE_NAME_CONCEPTS,
collection_version_id=self.collection_version_id,
search_params=self.request.GET
)
response = {
'items': searcher.search_results,
'per_page': searcher.num_per_page,
'total': searcher.num_found,
}
return HttpResponse(
json.dumps(response),
content_type="application/json"
)
return super(CollectionConceptsView, self).get(self, *args, **kwargs)
class CollectionVersionsView(CollectionsBaseView, TemplateView):
""" collection About view. """
template_name = "collections/collection_versions.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionVersionsView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
# Load the collection versions
params = self.request.GET.copy()
params['verbose'] = 'true'
params['limit'] = '10'
searcher = self.get_collection_versions(
self.owner_type, self.owner_id, self.collection_id,
search_params=params)
search_results_paginator = Paginator(range(searcher.num_found), searcher.num_per_page)
search_results_current_page = search_results_paginator.page(searcher.current_page)
for collection_version in searcher.search_results:
if '_ocl_processing' in collection_version and collection_version['_ocl_processing']:
collection_version['is_processing'] = 'True'
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['current_page'] = search_results_current_page
context['pagination_url'] = self.request.get_full_path()
context['selected_tab'] = 'Versions'
context['collection'] = collection
context['collection_versions'] = searcher.search_results
return context
def get(self, request, *args, **kwargs):
self.get_args()
if request.is_ajax():
api = OclApi(self.request, debug=True)
result = api.get(self.owner_type, self.owner_id, 'collections',
kwargs.get('collection'), 'versions', params={'limit': '0'})
return HttpResponse(json.dumps(result.json()), content_type="application/json")
return super(CollectionVersionsView, self).get(self, *args, **kwargs)
class CollectionAboutView(CollectionsBaseView, TemplateView):
""" Collection About view. """
template_name = "collections/collection_about.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionAboutView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
about = None
if ('extras' in collection and isinstance(collection['extras'], dict) and
'about' in collection['extras']):
about = collection['extras'].get('about')
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['selected_tab'] = 'About'
context['collection'] = collection
context['about'] = about
return context
class CollectionDetailView(CollectionsBaseView, TemplateView):
""" Collection detail views """
template_name = "collections/collection_details.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionDetailView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
if results.status_code != 200:
if results.status_code == 404:
raise Http404
else:
results.raise_for_status()
collection = results.json()
context['kwargs'] = self.kwargs
context['collection'] = collection
context['selected_tab'] = 'Details'
return context
class CollectionCreateView(CollectionsBaseView, FormView):
"""
Create new Collection, either for an org or a user.
"""
form_class = CollectionCreateForm
template_name = "collections/collection_create.html"
def get_initial(self):
""" Load some useful data, not really for form display but internal use """
self.get_args()
data = {
'org_id': self.org_id,
'user_id': self.user_id,
'from_org': self.from_org,
'from_user': self.from_user,
'request': self.request,
}
return data
def get_context_data(self, *args, **kwargs):
context = super(CollectionCreateView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
org = ocl_user = None
if self.from_org:
org = api.get('orgs', self.org_id).json()
else:
ocl_user = api.get('users', self.user_id).json()
# Set the context
context['org'] = org
context['ocl_user'] = ocl_user
context['from_user'] = self.from_user
context['from_org'] = self.from_org
return context
def form_valid(self, form):
"""
collection input is good, update API backend.
"""
self.get_args()
data = form.cleaned_data
short_code = data.pop('short_code')
data['id'] = short_code
if re.compile('^[a-zA-Z0-9\-]+$').match(short_code):
api = OclApi(self.request, debug=True)
result = api.post(self.owner_type, self.owner_id, 'collections', **data)
if not result.status_code == requests.codes.created:
emsg = result.json().get('detail', None)
if not emsg:
for msg in result.json().get('__all__'):
messages.add_message(self.request, messages.ERROR, msg)
else:
messages.add_message(self.request, messages.ERROR, emsg)
return HttpResponseRedirect(self.request.path)
messages.add_message(self.request, messages.INFO, _('Collection created'))
if self.from_org:
return HttpResponseRedirect(reverse("collection-home",
kwargs={"org": self.org_id,
'collection': short_code}))
else:
return HttpResponseRedirect(reverse("collection-home",
kwargs={"user": self.user_id,
'collection': short_code}))
else:
validator_template = ' Short Code \'%s\' is not valid. Allowed characters are : Alphabets(a-z,A-Z), Numbers(0-9) and Hyphen(-) '
messages.add_message(self.request, messages.ERROR, validator_template % short_code)
return HttpResponseRedirect(self.request.path)
class CollectionAddReferenceView(CollectionsBaseView, TemplateView):
template_name = "collections/collection_add_reference.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionAddReferenceView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['collection'] = collection
return context
def get_success_url(self):
""" Return URL for redirecting browser """
if self.from_org:
return reverse('collection-references',
kwargs={'org': self.org_id, 'collection':self.collection_id})
else:
return reverse(
'collection-references',
kwargs={"user": self.request.user.username, 'collection':self.collection_id})
def post(self, request, *args, **kwargs):
self.get_args()
data = json.loads(request.body)
api = OclApi(self.request, debug=True)
result = api.put(
self.owner_type,
self.owner_id,
'collections',
self.collection_id,
'references',
data=data
)
errors = result.json() if result.status_code == requests.codes.bad else []
return HttpResponse(
json.dumps({
'success_url': self.get_success_url(),
'errors': errors
}),
content_type="application/json"
)
class CollectionReferencesDeleteView(CollectionsBaseView, TemplateView):
def delete(self, request, *args, **kwargs):
self.get_args()
references = request.GET.get('references').split(',')
api = OclApi(self.request, debug=True)
data = {'references': references}
res = api.delete(self.owner_type, self.owner_id, 'collections',
self.collection_id, 'references', **data)
return HttpResponse(res.content, status=200)
class CollectionDeleteView(CollectionsBaseView, FormView):
"""
View for deleting Collection.
"""
template_name = "collections/collection_delete.html"
form_class = CollectionDeleteForm
def get_context_data(self, *args, **kwargs):
context = super(CollectionDeleteView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['collection'] = collection
return context
def get_success_url(self):
""" Return URL for redirecting browser """
if self.collection_version_id:
if self.from_org:
return reverse('collection-details',
kwargs={'org': self.org_id,
'collection': self.collection_id})
else:
return reverse('collection-details',
kwargs={'user': self.user_id,
'collection': self.collection_id})
else:
if self.from_org:
return reverse('org-collections',
kwargs={'org': self.org_id})
else:
return reverse('users:detail',
kwargs={"username": self.request.user.username})
def form_valid(self, form, *args, **kwargs):
""" Use validated form data to delete the collection"""
self.get_args()
api = OclApi(self.request, debug=True)
if self.collection_version_id:
result = api.delete(self.owner_type, self.owner_id, 'collections',
self.collection_id, self.collection_version_id, **kwargs)
else:
result = api.delete(
self.owner_type, self.owner_id, 'collections', self.collection_id, **kwargs)
if result.status_code != 204:
emsg = result.json().get('detail', 'Error')
messages.add_message(self.request, messages.ERROR, emsg)
return HttpResponseRedirect(self.request.path)
else:
messages.add_message(self.request, messages.INFO, _('Collection Deleted'))
return HttpResponseRedirect(self.get_success_url())
class CollectionEditView(CollectionsBaseView, FormView):
""" Edit collection, either for an org or a user. """
template_name = "collections/collection_edit.html"
def get_form_class(self):
""" Trick to load initial data """
self.get_args()
api = OclApi(self.request, debug=True)
self.collection = api.get(self.owner_type, self.owner_id, 'collections',
self.collection_id).json()
return CollectionEditForm
def get_initial(self):
""" Load some useful data, not really for form display but internal use """
data = {
'org_id': self.org_id,
'user_id': self.user_id,
'from_org': self.from_org,
'from_user': self.from_user,
'request': self.request,
}
data.update(self.collection)
# convert supported locales to string
supported_locale_list = self.collection.get('supported_locales')
if supported_locale_list is None:
data['supported_locales'] = ''
else:
data['supported_locales'] = ','.join(supported_locale_list)
return data
def get_context_data(self, *args, **kwargs):
""" Get collection details for the edit form """
context = super(CollectionEditView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
org = ocl_user = None
if self.from_org:
org = api.get('orgs', self.org_id).json()
else:
ocl_user = api.get('users', self.user_id).json()
# Set the context
context['kwargs'] = self.kwargs
context['org'] = org
context['ocl_user'] = ocl_user
context['from_user'] = self.from_user
context['from_org'] = self.from_org
context['collection'] = self.collection
return context
def form_valid(self, form):
""" If Collection input is valid, then update API backend. """
self.get_args()
# Submit updated collection data to the API
data = form.cleaned_data
api = OclApi(self.request, debug=True)
result = api.update_collection(self.owner_type, self.owner_id, self.collection_id, data)
messages.add_message(self.request, messages.INFO, _('Collection updated'))
if self.from_org:
return HttpResponseRedirect(reverse('collection-details',
kwargs={'org': self.org_id,
'collection': self.collection_id}))
else:
return HttpResponseRedirect(reverse('collection-details',
kwargs={'user': self.user_id,
'collection': self.collection_id}))
class CollectionVersionsNewView(CollectionsBaseView, UserOrOrgMixin, FormView):
form_class = CollectionVersionAddForm
template_name = "collections/collection_versions_new.html"
def get_initial(self):
super(CollectionVersionsNewView, self).get_initial()
self.get_args()
api = OclApi(self.request, debug=True)
# collection_version = None
if self.from_org:
collection_version = api.get('orgs', self.org_id, 'collections', self.collection_id,
'versions', params={'limit': 1}).json()
else:
collection_version = api.get('users', self.user_id, 'collections', self.collection_id,
'versions', params={'limit': 1}).json()
data = {
'request': self.request,
'from_user': self.from_user,
'from_org': self.from_org,
'user_id': self.user_id,
'org_id': self.org_id,
'owner_type': self.owner_type,
'owner_id': self.owner_id,
'collection_id': self.collection_id,
'previous_version': collection_version[0]['id'],
'released': False
}
return data
def get_context_data(self, *args, **kwargs):
context = super(CollectionVersionsNewView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
# collection = None
if self.from_org:
collection = api.get('orgs', self.org_id, 'collections', self.collection_id).json()
else:
collection = api.get('users', self.user_id, 'collections', self.collection_id).json()
# Set the context
context['kwargs'] = self.kwargs
context['collection'] = collection
return context
def form_valid(self, form):
self.get_args()
# Submit the new collection version
data = form.cleaned_data
api = OclApi(self.request, debug=True)
result = api.create_collection_version(self.owner_type, self.owner_id,
self.collection_id, data)
if result.status_code == requests.codes.created:
messages.add_message(self.request, messages.INFO, _('Collection version created!'))
if self.from_org:
return HttpResponseRedirect(reverse('collection-versions',
kwargs={'org': self.org_id,
'collection': self.collection_id}))
else:
return HttpResponseRedirect(reverse('collection-versions',
kwargs={'user': self.user_id,
'collection': self.collection_id}))
else:
error_msg = result.json().get('detail', 'Error')
messages.add_message(self.request, messages.ERROR, error_msg)
return HttpResponseRedirect(self.request.path)
class CollectionVersionEditView(LoginRequiredMixin, UserOrOrgMixin, FormView):
""" View to edit collection version """
form_class = CollectionVersionsEditForm
template_name = "collections/collection_versions_edit.html"
def get_form_class(self):
""" Trick to load initial form data """
self.get_args()
api = OclApi(self.request, debug=True)
self.collection_version = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id,
self.collection_version_id).json()
return CollectionVersionsEditForm
def get_initial(self):
""" Load initial form data """
data = {
'org_id': self.org_id,
'user_id': self.user_id,
'from_org': self.from_org,
'from_user': self.from_user,
'collection_id': self.collection_id,
'collection_version_id': self.collection_version_id,
'request': self.request,
}
data.update(self.collection_version)
return data
def get_context_data(self, *args, **kwargs):
""" Load context data needed for the view """
context = super(CollectionVersionEditView, self).get_context_data(*args, **kwargs)
context['kwargs'] = self.kwargs
context['collection_version'] = self.collection_version
return context
def form_valid(self, form):
""" If form data is valid, then update API backend. """
self.get_args()
# Submit updated collection version description to the API
data = {
'description':form.cleaned_data.get('description')
}
api = OclApi(self.request, debug=True)
result = api.update_resource_version(self.owner_type, self.owner_id, self.collection_id,
self.collection_version_id, 'collections', data)
# Check if successful
if result.status_code == requests.codes.ok:
messages.add_message(self.request, messages.INFO, _('Collection version updated'))
if self.from_org:
return HttpResponseRedirect(reverse('collection-versions',
kwargs={'org': self.org_id,
'collection': self.collection_id}))
else:
return HttpResponseRedirect(reverse('collection-versions',
kwargs={'user': self.user_id,
'collection': self.collection_id}))
else:
emsg = result.text
messages.add_message(self.request, messages.ERROR, emsg)
return HttpResponseRedirect(self.request.path)
class CollectionVersionEditJsonView(CollectionsBaseView, TemplateView):
def put(self, request, *args, **kwargs):
self.get_args()
api = OclApi(self.request, debug=True)
data = json.loads(request.body)
res = api.update_resource_version(self.owner_type,
self.owner_id,
self.collection_id,
self.collection_version_id,
'collections',
data)
return HttpResponse(res.content, status=200)
class CollectionVersionDeleteView(CollectionsBaseView, View):
""" collection version delete view"""
def delete(self, request, *args, **kwargs):
self.get_args()
api = OclApi(self.request, debug=True)
if request.is_ajax():
result = api.delete(
self.owner_type,
self.owner_id,
'collections',
self.collection_id,
self.collection_version_id,
**kwargs
)
return HttpResponse(
json.dumps({}),
content_type="application/json"
)
return super(CollectionVersionDeleteView, self).delete(self, *args, **kwargs)
| mpl-2.0 |
SoCo/SoCo | soco/events_asyncio.py | 1 | 20247 | """Classes to handle Sonos UPnP Events and Subscriptions using asyncio.
The `Subscription` class from this module will be used in
:py:mod:`soco.services` if `config.EVENTS_MODULE` is set
to point to this module.
Example:
Run this code, and change your volume, tracks etc::
import logging
logging.basicConfig()
import soco
import asyncio
from pprint import pprint
from soco import events_asyncio
soco.config.EVENTS_MODULE = events_asyncio
def print_event(event):
try:
pprint(event.variables)
except Exception as e:
print("There was an error in print_event:", e)
def _get_device():
device = soco.discover().pop().group.coordinator
print(device.player_name)
return device
async def main():
# pick a device at random and use it to get
# the group coordinator
loop = asyncio.get_event_loop()
device = await loop.run_in_executor(None, _get_device)
sub = await device.renderingControl.subscribe()
sub2 = await device.avTransport.subscribe()
sub.callback = print_event
sub2.callback = print_event
async def before_shutdown():
await sub.unsubscribe()
await sub2.unsubscribe()
await events_asyncio.event_listener.async_stop()
await asyncio.sleep(1)
print("Renewing subscription..")
await sub.renew()
await asyncio.sleep(100)
await before_shutdown()
if __name__ == "__main__":
asyncio.run(main())
"""
import logging
import socket
import sys
import time
import asyncio
try:
from aiohttp import ClientSession, web
except ImportError as error:
print(
"""ImportError: {}:
Use of the SoCo events_asyncio module requires the 'aiohttp'
package and its dependencies to be installed. aiohttp is not
installed with SoCo by default due to potential issues installing
the dependencies 'mutlidict' and 'yarl' on some platforms.
See: https://github.com/SoCo/SoCo/issues/819""".format(
error
)
)
sys.exit(1)
# Event is imported for compatibility with events.py
# pylint: disable=unused-import
from .events_base import Event # noqa: F401
from .events_base import ( # noqa: E402
get_listen_ip,
parse_event_xml,
EventNotifyHandlerBase,
EventListenerBase,
SubscriptionBase,
SubscriptionsMap,
)
from .exceptions import SoCoException # noqa: E402
log = logging.getLogger(__name__) # pylint: disable=C0103
class EventNotifyHandler(EventNotifyHandlerBase):
"""Handles HTTP ``NOTIFY`` Verbs sent to the listener server.
Inherits from `soco.events_base.EventNotifyHandlerBase`.
"""
def __init__(self):
super().__init__()
# The SubscriptionsMapAio instance created when this module is
# imported. This is referenced by
# soco.events_base.EventNotifyHandlerBase.
self.subscriptions_map = subscriptions_map
async def notify(self, request):
"""Serve a ``NOTIFY`` request by calling `handle_notification`
with the headers and content.
"""
content = await request.text()
seq = request.headers["seq"] # Event sequence number
sid = request.headers["sid"] # Event Subscription Identifier
# find the relevant service from the sid
# pylint: disable=no-member
subscription = self.subscriptions_map.get_subscription(sid)
# It might have been removed by another thread
if subscription:
timestamp = time.time()
service = subscription.service
self.log_event(seq, service.service_id, timestamp)
log.debug("Event content: %s", content)
if "x-sonos-http" in content:
# parse_event_xml will generate I/O if
# x-sonos-http is in the content
variables = await asyncio.get_event_loop().run_in_executor(
None, parse_event_xml, content
)
else:
variables = parse_event_xml(content)
# Build the Event object
event = Event(sid, seq, service, timestamp, variables)
# pass the event details on to the service so it can update
# its cache.
# pylint: disable=protected-access
service._update_cache_on_event(event)
# Pass the event on for handling
subscription.send_event(event)
else:
log.debug("No service registered for %s", sid)
return web.Response(text="OK", status=200)
# pylint: disable=no-self-use, missing-docstring
def log_event(self, seq, service_id, timestamp):
log.debug("Event %s received for %s service at %s", seq, service_id, timestamp)
class EventListener(EventListenerBase): # pylint: disable=too-many-instance-attributes
"""The Event Listener.
Runs an http server which is an endpoint for ``NOTIFY``
requests from Sonos devices. Inherits from
`soco.events_base.EventListenerBase`.
"""
def __init__(self):
super().__init__()
self.sock = None
self.ip_address = None
self.port = None
self.runner = None
self.site = None
self.session = None
self.start_lock = None
def start(self, any_zone):
"""A stub since the first subscribe calls async_start."""
return
def listen(self, ip_address):
"""A stub since since async_listen is used."""
return
async def async_start(self, any_zone):
"""Start the event listener listening on the local machine under the lock.
Args:
any_zone (SoCo): Any Sonos device on the network. It does not
matter which device. It is used only to find a local IP
address reachable by the Sonos net.
"""
if not self.start_lock:
self.start_lock = asyncio.Lock()
async with self.start_lock:
if self.is_running:
return
# Use configured IP address if there is one, else detect
# automatically.
ip_address = get_listen_ip(any_zone.ip_address)
if not ip_address:
log.exception("Could not start Event Listener: check network.")
# Otherwise, no point trying to start server
return
port = await self.async_listen(ip_address)
if not port:
return
self.address = (ip_address, port)
self.session = ClientSession(raise_for_status=True)
self.is_running = True
log.debug("Event Listener started")
async def async_listen(self, ip_address):
"""Start the event listener listening on the local machine at
port 1400 (default). If this port is unavailable, the
listener will attempt to listen on the next available port,
within a range of 100.
Make sure that your firewall allows connections to this port.
This method is called by `soco.events_base.EventListenerBase.start`
Handling of requests is delegated to an instance of the
`EventNotifyHandler` class.
Args:
ip_address (str): The local network interface on which the server
should start listening.
Returns:
int: The port on which the server is listening.
Note:
The port on which the event listener listens is configurable.
See `config.EVENT_LISTENER_PORT`
"""
for port_number in range(
self.requested_port_number, self.requested_port_number + 100
):
try:
if port_number > self.requested_port_number:
log.debug("Trying next port (%d)", port_number)
# pylint: disable=no-member
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((ip_address, port_number))
sock.listen(200)
self.sock = sock
self.port = port_number
break
# pylint: disable=invalid-name
except socket.error as e:
log.warning("Could not bind to %s:%s: %s", ip_address, port_number, e)
continue
if not self.port:
return None
self.ip_address = ip_address
await self._async_start()
return self.port
async def _async_start(self):
"""Start the site."""
handler = EventNotifyHandler()
app = web.Application()
app.add_routes([web.route("notify", "", handler.notify)])
self.runner = web.AppRunner(app)
await self.runner.setup()
self.site = web.SockSite(self.runner, self.sock)
await self.site.start()
log.debug("Event listener running on %s", (self.ip_address, self.port))
async def async_stop(self):
"""Stop the listener."""
if self.site:
await self.site.stop()
self.site = None
if self.runner:
await self.runner.cleanup()
self.runner = None
if self.session:
await self.session.close()
self.session = None
if self.sock:
self.sock.close()
self.sock = None
self.port = None
self.ip_address = None
# pylint: disable=unused-argument
def stop_listening(self, address):
"""Stop the listener."""
asyncio.ensure_future(self.async_stop())
class Subscription(SubscriptionBase):
"""A class representing the subscription to a UPnP event.
Inherits from `soco.events_base.SubscriptionBase`.
"""
def __init__(self, service, callback=None):
"""
Args:
service (Service): The SoCo `Service` to which the subscription
should be made.
event_queue (:class:`~queue.Queue`): A queue on which received
events will be put. If not specified, a queue will be
created and used.
"""
super().__init__(service, None)
#: :py:obj:`function`: callback function to be called whenever an
#: `Event` is received. If it is set and is callable, the callback
#: function will be called with the `Event` as the only parameter and
#: the Subscription's event queue won't be used.
self.callback = callback
# The SubscriptionsMapAio instance created when this module is
# imported. This is referenced by soco.events_base.SubscriptionBase.
self.subscriptions_map = subscriptions_map
# The EventListener instance created when this module is imported.
# This is referenced by soco.events_base.SubscriptionBase.
self.event_listener = event_listener
# Used to keep track of the auto_renew loop
self._auto_renew_task = None
# pylint: disable=arguments-differ
def subscribe(self, requested_timeout=None, auto_renew=False, strict=True):
"""Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
This method calls `events_base.SubscriptionBase.subscribe`.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`.
strict (bool, optional): If True and an Exception occurs during
execution, the Exception will be raised or, if False, the
Exception will be logged and the Subscription instance will be
returned. Default `True`.
Returns:
`Subscription`: The Subscription instance.
"""
self.subscriptions_map.subscribing()
future = asyncio.Future()
subscribe = super().subscribe
async def _async_wrap_subscribe():
try:
if not self.event_listener.is_running:
await self.event_listener.async_start(self.service.soco)
await subscribe(requested_timeout, auto_renew)
future.set_result(self)
except SoCoException as ex:
future.set_exception(ex)
except Exception as exc: # pylint: disable=broad-except
self._cancel_subscription(exc)
if strict:
future.set_exception(exc)
else:
self._log_exception(exc)
future.set_result(self)
finally:
self.subscriptions_map.finished_subscribing()
asyncio.ensure_future(_async_wrap_subscribe())
return future
def _log_exception(self, exc):
"""Log an exception during subscription."""
msg = (
"An Exception occurred: {}.".format(exc)
+ " Subscription to {},".format(
self.service.base_url + self.service.event_subscription_url
)
+ " sid: {} has been cancelled".format(self.sid)
)
log.exception(msg)
async def renew(
self, requested_timeout=None, is_autorenew=False, strict=True
): # pylint: disable=invalid-overridden-method
"""renew(requested_timeout=None)
Renew the event subscription.
You should not try to renew a subscription which has been
unsubscribed, or once it has expired.
This method calls `events_base.SubscriptionBase.renew`.
Args:
requested_timeout (int, optional): The period for which a renewal
request should be made. If None (the default), use the timeout
requested on subscription.
is_autorenew (bool, optional): Whether this is an autorenewal.
Default `False`.
strict (bool, optional): If True and an Exception occurs during
execution, the Exception will be raised or, if False, the
Exception will be logged and the Subscription instance will be
returned. Default `True`.
Returns:
`Subscription`: The Subscription instance.
"""
try:
return await super().renew(requested_timeout, is_autorenew)
except Exception as exc: # pylint: disable=broad-except
self._cancel_subscription(exc)
if self.auto_renew_fail is not None and hasattr(
self.auto_renew_fail, "__call__"
):
# pylint: disable=not-callable
self.auto_renew_fail(exc)
else:
self._log_exception(exc)
if strict:
raise
return self
async def unsubscribe(
self, strict=True
): # pylint: disable=invalid-overridden-method
"""unsubscribe()
Unsubscribe from the service's events.
Once unsubscribed, a Subscription instance should not be reused
This method calls `events_base.SubscriptionBase.unsubscribe`.
Args:
strict (bool, optional): If True and an Exception occurs during
execution, the Exception will be raised or, if False, the
Exception will be logged and the Subscription instance will be
returned. Default `True`.
Returns:
`Subscription`: The Subscription instance.
"""
try:
unsub = super().unsubscribe()
if unsub is None:
return
await unsub
except Exception as exc: # pylint: disable=broad-except
if strict:
raise
self._log_exception(exc)
return self
def _auto_renew_start(self, interval):
"""Starts the auto_renew loop."""
self._auto_renew_task = asyncio.get_event_loop().call_later(
interval, self._auto_renew_run, interval
)
def _auto_renew_run(self, interval):
asyncio.ensure_future(self.renew(is_autorenew=True, strict=False))
self._auto_renew_start(interval)
def _auto_renew_cancel(self):
"""Cancels the auto_renew loop"""
if self._auto_renew_task:
self._auto_renew_task.cancel()
self._auto_renew_task = None
# pylint: disable=no-self-use, too-many-branches, too-many-arguments
def _request(self, method, url, headers, success, unconditional=None):
"""Sends an HTTP request.
Args:
method (str): 'SUBSCRIBE' or 'UNSUBSCRIBE'.
url (str): The full endpoint to which the request is being sent.
headers (dict): A dict of headers, each key and each value being
of type `str`.
success (function): A function to be called if the
request succeeds. The function will be called with a dict
of response headers as its only parameter.
unconditional (function): An optional function to be called after
the request is complete, regardless of its success. Takes
no parameters.
"""
async def _async_make_request():
response = await self.event_listener.session.request(
method, url, headers=headers
)
if response.ok:
success(response.headers)
if unconditional:
unconditional()
return _async_make_request()
class nullcontext: # pylint: disable=invalid-name
"""Context manager that does no additional processing.
Backport from python 3.7+ for older pythons.
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
class SubscriptionsMapAio(SubscriptionsMap):
"""Maintains a mapping of sids to `soco.events_asyncio.Subscription`
instances. Registers each subscription to be unsubscribed at exit.
Inherits from `soco.events_base.SubscriptionsMap`.
"""
def __init__(self):
super().__init__()
# A counter of calls to Subscription.subscribe
# that have started but not completed. This is
# to prevent the event listener from being stopped prematurely
self._pending = 0
self.subscriptions_lock = nullcontext()
def register(self, subscription):
"""Register a subscription by updating local mapping of sid to
subscription and registering it to be unsubscribed at exit.
Args:
subscription(`soco.events_asyncio.Subscription`): the subscription
to be registered.
"""
# Add the subscription to the local dict of subscriptions so it
# can be looked up by sid
self.subscriptions[subscription.sid] = subscription
def subscribing(self):
"""Called when the `Subscription.subscribe` method
commences execution.
"""
# Increment the counter
self._pending += 1
def finished_subscribing(self):
"""Called when the `Subscription.subscribe` method
completes execution.
"""
# Decrement the counter
self._pending -= 1
@property
def count(self):
"""
`int`: The number of active or pending subscriptions.
"""
return len(self.subscriptions) + self._pending
subscriptions_map = SubscriptionsMapAio() # pylint: disable=C0103
event_listener = EventListener() # pylint: disable=C0103
| mit |
alshedivat/tensorflow | tensorflow/contrib/gan/python/eval/python/eval_utils_impl.py | 73 | 5394 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility file for visualizing generated images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
__all__ = [
"image_grid",
"image_reshaper",
]
# TODO(joelshor): Make this a special case of `image_reshaper`.
def image_grid(input_tensor, grid_shape, image_shape=(32, 32), num_channels=3):
"""Arrange a minibatch of images into a grid to form a single image.
Args:
input_tensor: Tensor. Minibatch of images to format, either 4D
([batch size, height, width, num_channels]) or flattened
([batch size, height * width * num_channels]).
grid_shape: Sequence of int. The shape of the image grid,
formatted as [grid_height, grid_width].
image_shape: Sequence of int. The shape of a single image,
formatted as [image_height, image_width].
num_channels: int. The number of channels in an image.
Returns:
Tensor representing a single image in which the input images have been
arranged into a grid.
Raises:
ValueError: The grid shape and minibatch size don't match, or the image
shape and number of channels are incompatible with the input tensor.
"""
if grid_shape[0] * grid_shape[1] != int(input_tensor.shape[0]):
raise ValueError("Grid shape %s incompatible with minibatch size %i." %
(grid_shape, int(input_tensor.shape[0])))
if len(input_tensor.shape) == 2:
num_features = image_shape[0] * image_shape[1] * num_channels
if int(input_tensor.shape[1]) != num_features:
raise ValueError("Image shape and number of channels incompatible with "
"input tensor.")
elif len(input_tensor.shape) == 4:
if (int(input_tensor.shape[1]) != image_shape[0] or
int(input_tensor.shape[2]) != image_shape[1] or
int(input_tensor.shape[3]) != num_channels):
raise ValueError("Image shape and number of channels incompatible with "
"input tensor.")
else:
raise ValueError("Unrecognized input tensor format.")
height, width = grid_shape[0] * image_shape[0], grid_shape[1] * image_shape[1]
input_tensor = array_ops.reshape(
input_tensor, tuple(grid_shape) + tuple(image_shape) + (num_channels,))
input_tensor = array_ops.transpose(input_tensor, [0, 1, 3, 2, 4])
input_tensor = array_ops.reshape(
input_tensor, [grid_shape[0], width, image_shape[0], num_channels])
input_tensor = array_ops.transpose(input_tensor, [0, 2, 1, 3])
input_tensor = array_ops.reshape(
input_tensor, [1, height, width, num_channels])
return input_tensor
def _validate_images(images):
for img in images:
img.shape.assert_has_rank(3)
img.shape.assert_is_fully_defined()
if img.shape[-1] not in (1, 3):
raise ValueError("image_reshaper only supports 1 or 3 channel images.")
# TODO(joelshor): Move the dimension logic from Python to Tensorflow.
def image_reshaper(images, num_cols=None):
"""A reshaped summary image.
Returns an image that will contain all elements in the list and will be
laid out in a nearly-square tiling pattern (e.g. 11 images will lead to a
3x4 tiled image).
Args:
images: Image data to summarize. Can be an RGB or grayscale image, a list of
such images, or a set of RGB images concatenated along the depth
dimension. The shape of each image is assumed to be [batch_size,
height, width, depth].
num_cols: (Optional) If provided, this is the number of columns in the final
output image grid. Otherwise, the number of columns is determined by
the number of images.
Returns:
A summary image matching the input with automatic tiling if needed.
Output shape is [1, height, width, channels].
"""
if isinstance(images, ops.Tensor):
images = array_ops.unstack(images)
_validate_images(images)
num_images = len(images)
num_columns = (num_cols if num_cols else
int(math.ceil(math.sqrt(num_images))))
num_rows = int(math.ceil(float(num_images) / num_columns))
rows = [images[x:x+num_columns] for x in range(0, num_images, num_columns)]
# Add empty image tiles if the last row is incomplete.
num_short = num_rows * num_columns - num_images
assert num_short >= 0 and num_short < num_columns
if num_short > 0:
rows[-1].extend([array_ops.zeros_like(images[-1])] * num_short)
# Convert each row from a list of tensors to a single tensor.
rows = [array_ops.concat(row, 1) for row in rows]
# Stack rows vertically.
img = array_ops.concat(rows, 0)
return array_ops.expand_dims(img, 0)
| apache-2.0 |
rohitwaghchaure/New_Theme_Erp | erpnext/accounts/report/gross_profit/gross_profit.py | 10 | 4692 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from erpnext.stock.utils import get_buying_amount, get_sales_bom_buying_amount
def execute(filters=None):
if not filters: filters = {}
stock_ledger_entries = get_stock_ledger_entries(filters)
source = get_source_data(filters)
item_sales_bom = get_item_sales_bom()
columns = ["Delivery Note/Sales Invoice::120", "Link::30", "Posting Date:Date", "Posting Time",
"Item Code:Link/Item", "Item Name", "Description", "Warehouse:Link/Warehouse",
"Qty:Float", "Selling Rate:Currency", "Avg. Buying Rate:Currency",
"Selling Amount:Currency", "Buying Amount:Currency",
"Gross Profit:Currency", "Gross Profit %:Percent", "Project:Link/Project"]
data = []
for row in source:
selling_amount = flt(row.base_amount)
item_sales_bom_map = item_sales_bom.get(row.parenttype, {}).get(row.name, frappe._dict())
if item_sales_bom_map.get(row.item_code):
buying_amount = get_sales_bom_buying_amount(row.item_code, row.warehouse,
row.parenttype, row.name, row.item_row, stock_ledger_entries, item_sales_bom_map)
else:
buying_amount = get_buying_amount(row.parenttype, row.name, row.item_row,
stock_ledger_entries.get((row.item_code, row.warehouse), []))
buying_amount = buying_amount > 0 and buying_amount or 0
gross_profit = selling_amount - buying_amount
if selling_amount:
gross_profit_percent = (gross_profit / selling_amount) * 100.0
else:
gross_profit_percent = 0.0
icon = """<a href="%s"><i class="icon icon-share" style="cursor: pointer;"></i></a>""" \
% ("/".join(["#Form", row.parenttype, row.name]),)
data.append([row.name, icon, row.posting_date, row.posting_time, row.item_code, row.item_name,
row.description, row.warehouse, row.qty, row.base_rate,
row.qty and (buying_amount / row.qty) or 0, row.base_amount, buying_amount,
gross_profit, gross_profit_percent, row.project])
return columns, data
def get_stock_ledger_entries(filters):
query = """select item_code, voucher_type, voucher_no,
voucher_detail_no, posting_date, posting_time, stock_value,
warehouse, actual_qty as qty
from `tabStock Ledger Entry`"""
if filters.get("company"):
query += """ where company=%(company)s"""
query += " order by item_code desc, warehouse desc, posting_date desc, posting_time desc, name desc"
res = frappe.db.sql(query, filters, as_dict=True)
out = {}
for r in res:
if (r.item_code, r.warehouse) not in out:
out[(r.item_code, r.warehouse)] = []
out[(r.item_code, r.warehouse)].append(r)
return out
def get_item_sales_bom():
item_sales_bom = {}
for d in frappe.db.sql("""select parenttype, parent, parent_item,
item_code, warehouse, -1*qty as total_qty, parent_detail_docname
from `tabPacked Item` where docstatus=1""", as_dict=True):
item_sales_bom.setdefault(d.parenttype, frappe._dict()).setdefault(d.parent,
frappe._dict()).setdefault(d.parent_item, []).append(d)
return item_sales_bom
def get_source_data(filters):
conditions = ""
if filters.get("company"):
conditions += " and company=%(company)s"
if filters.get("from_date"):
conditions += " and posting_date>=%(from_date)s"
if filters.get("to_date"):
conditions += " and posting_date<=%(to_date)s"
delivery_note_items = frappe.db.sql("""select item.parenttype, dn.name,
dn.posting_date, dn.posting_time, dn.project_name,
item.item_code, item.item_name, item.description, item.warehouse,
item.qty, item.base_rate, item.base_amount, item.name as "item_row",
timestamp(dn.posting_date, dn.posting_time) as posting_datetime
from `tabDelivery Note` dn, `tabDelivery Note Item` item
where item.parent = dn.name and dn.docstatus = 1 %s
order by dn.posting_date desc, dn.posting_time desc""" % (conditions,), filters, as_dict=1)
sales_invoice_items = frappe.db.sql("""select item.parenttype, si.name,
si.posting_date, si.posting_time, si.project_name,
item.item_code, item.item_name, item.description, item.warehouse,
item.qty, item.base_rate, item.base_amount, item.name as "item_row",
timestamp(si.posting_date, si.posting_time) as posting_datetime
from `tabSales Invoice` si, `tabSales Invoice Item` item
where item.parent = si.name and si.docstatus = 1 %s
and si.update_stock = 1
order by si.posting_date desc, si.posting_time desc""" % (conditions,), filters, as_dict=1)
source = delivery_note_items + sales_invoice_items
if len(source) > len(delivery_note_items):
source.sort(key=lambda d: d.posting_datetime, reverse=True)
return source | agpl-3.0 |
thnee/ansible | lib/ansible/module_utils/splitter.py | 197 | 9433 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx - 1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are donei
params = []
# here we encode the args, so we have a uniform charset to
# work with, and split on white space
args = args.strip()
try:
args = args.encode('utf-8')
do_decode = True
except UnicodeDecodeError:
do_decode = False
items = args.split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for itemidx, item in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for idx, token in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes:
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and not inside_quotes and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
spacer = ''
if not params[-1].endswith('\n') and idx == 0:
spacer = '\n'
params[-1] = "%s%s%s" % (params[-1], spacer, token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
if not params[-1].endswith('\n') or item == '':
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
# finally, we decode each param back to the unicode it was in the arg string
if do_decode:
params = [x.decode('utf-8') for x in params]
return params
def is_quoted(data):
return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
if is_quoted(data):
return data[1:-1]
return data
| gpl-3.0 |
mcaleavya/bcc | examples/networking/vlan_filter/data-plane-tracing.py | 4 | 7701 | #!/usr/bin/python
from __future__ import print_function
from bcc import BPF
import sys
import socket
import os
import argparse
import time
import netifaces as ni
from sys import argv
from kafka import KafkaProducer
from kafka.errors import KafkaError
from datetime import datetime
#args
def usage():
print("USAGE: %s [-i <if_name>]" % argv[0])
print("")
print("Try '%s -h' for more options." % argv[0])
exit()
#help
def help():
print("USAGE: %s [-i <if_name>][-k <kafka_server_name:kafka_port>]" % argv[0])
print("")
print("optional arguments:")
print(" -h print this help")
print(" -i if_name select interface if_name. Default is eth0")
print(" -k kafka_server_name select kafka server name. Default is save to file")
print(" If -k option is not specified data will be saved to file.")
print("")
print("examples:")
print(" data-plane-tracing # bind socket to eth0")
print(" data-plane-tracing -i eno2 -k vc.manage.overcloud:9092 # bind socket to eno2 and send data to kafka server in iovisor-topic.")
exit()
#arguments
interface="eth0"
kafkaserver=''
#check provided arguments
if len(argv) == 2:
if str(argv[1]) == '-h':
help()
else:
usage()
if len(argv) == 3:
if str(argv[1]) == '-i':
interface = argv[2]
elif str(argv[1]) == '-k':
kafkaserver = argv[2]
else:
usage()
if len(argv) == 5:
if str(argv[1]) == '-i':
interface = argv[2]
kafkaserver = argv[4]
elif str(argv[1]) == '-k':
kafkaserver = argv[2]
interface = argv[4]
else:
usage()
if len(argv) > 5:
usage()
print ("binding socket to '%s'" % interface)
#initialize BPF - load source code from http-parse-simple.c
bpf = BPF(src_file = "data-plane-tracing.c", debug = 0)
#load eBPF program http_filter of type SOCKET_FILTER into the kernel eBPF vm
#more info about eBPF program types http://man7.org/linux/man-pages/man2/bpf.2.html
function_vlan_filter = bpf.load_func("vlan_filter", BPF.SOCKET_FILTER)
#create raw socket, bind it to eth0
#attach bpf program to socket created
BPF.attach_raw_socket(function_vlan_filter, interface)
#get file descriptor of the socket previously created inside BPF.attach_raw_socket
socket_fd = function_vlan_filter.sock
#create python socket object, from the file descriptor
sock = socket.fromfd(socket_fd,socket.PF_PACKET,socket.SOCK_RAW,socket.IPPROTO_IP)
#set it as blocking socket
sock.setblocking(True)
#get interface ip address. In case ip is not set then just add 127.0.0.1.
ni.ifaddresses(interface)
try:
ip = ni.ifaddresses(interface)[ni.AF_INET][0]['addr']
except:
ip = '127.0.0.1'
print("| Timestamp | Host Name | Host IP | IP Version | Source Host IP | Dest Host IP | Source Host Port | Dest Host Port | VNI | Source VM MAC | Dest VM MAC | VLAN ID | Source VM IP | Dest VM IP | Protocol | Source VM Port | Dest VM Port | Packet Length |")
while 1:
#retrieve raw packet from socket
packet_str = os.read(socket_fd, 2048)
#convert packet into bytearray
packet_bytearray = bytearray(packet_str)
#ethernet header length
ETH_HLEN = 14
#VXLAN header length
VXLAN_HLEN = 8
#VLAN header length
VLAN_HLEN = 4
#Inner TCP/UDP header length
TCP_HLEN = 20
UDP_HLEN = 8
#calculate packet total length
total_length = packet_bytearray[ETH_HLEN + 2] #load MSB
total_length = total_length << 8 #shift MSB
total_length = total_length + packet_bytearray[ETH_HLEN+3] #add LSB
#calculate ip header length
ip_header_length = packet_bytearray[ETH_HLEN] #load Byte
ip_header_length = ip_header_length & 0x0F #mask bits 0..3
ip_header_length = ip_header_length << 2 #shift to obtain length
#calculate payload offset
payload_offset = ETH_HLEN + ip_header_length + UDP_HLEN + VXLAN_HLEN
#parsing ip version from ip packet header
ipversion = str(bin(packet_bytearray[14])[2:5])
#parsing source ip address, destination ip address from ip packet header
src_host_ip = str(packet_bytearray[26]) + "." + str(packet_bytearray[27]) + "." + str(packet_bytearray[28]) + "." + str(packet_bytearray[29])
dest_host_ip = str(packet_bytearray[30]) + "." + str(packet_bytearray[31]) + "." + str(packet_bytearray[32]) + "." + str(packet_bytearray[33])
#parsing source port and destination port
src_host_port = packet_bytearray[34] << 8 | packet_bytearray[35]
dest_host_port = packet_bytearray[36] << 8 | packet_bytearray[37]
#parsing VNI from VXLAN header
VNI = str((packet_bytearray[46])+(packet_bytearray[47])+(packet_bytearray[48]))
#parsing source mac address and destination mac address
mac_add = [packet_bytearray[50], packet_bytearray[51], packet_bytearray[52], packet_bytearray[53], packet_bytearray[54], packet_bytearray[55]]
src_vm_mac = ":".join(map(lambda b: format(b, "02x"), mac_add))
mac_add = [packet_bytearray[56], packet_bytearray[57], packet_bytearray[58], packet_bytearray[59], packet_bytearray[60], packet_bytearray[61]]
dest_vm_mac = ":".join(map(lambda b: format(b, "02x"), mac_add))
#parsing VLANID from VLAN header
VLANID=""
VLANID = str((packet_bytearray[64])+(packet_bytearray[65]))
#parsing source vm ip address, destination vm ip address from encapsulated ip packet header
src_vm_ip = str(packet_bytearray[80]) + "." + str(packet_bytearray[81]) + "." + str(packet_bytearray[82]) + "." + str(packet_bytearray[83])
dest_vm_ip = str(packet_bytearray[84]) + "." + str(packet_bytearray[85]) + "." + str(packet_bytearray[86]) + "." + str(packet_bytearray[87])
#parsing source port and destination port
if (packet_bytearray[77]==6 or packet_bytearray[77]==17):
src_vm_port = packet_bytearray[88] << 8 | packet_bytearray[88]
dest_vm_port = packet_bytearray[90] << 8 | packet_bytearray[91]
elif (packet_bytearray[77]==1):
src_vm_port = -1
dest_vm_port = -1
type = str(packet_bytearray[88])
else:
continue
timestamp = str(datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S.%f'))
#send data to remote server via Kafka Messaging Bus
if kafkaserver:
MESSAGE = (timestamp, socket.gethostname(),ip, str(int(ipversion, 2)), str(src_host_ip), str(dest_host_ip), str(src_host_port), str(dest_host_port), str(int(VNI)), str(src_vm_mac), str(dest_vm_mac), str(int(VLANID)), src_vm_ip, dest_vm_ip, str(packet_bytearray[77]), str(src_vm_port), str(dest_vm_port), str(total_length))
print (MESSAGE)
MESSAGE = ','.join(MESSAGE)
MESSAGE = MESSAGE.encode()
producer = KafkaProducer(bootstrap_servers=[kafkaserver])
producer.send('iovisor-topic', key=b'iovisor', value=MESSAGE)
#save data to files
else:
MESSAGE = timestamp+","+socket.gethostname()+","+ip+","+str(int(ipversion, 2))+","+src_host_ip+","+dest_host_ip+","+str(src_host_port)+","+str(dest_host_port)+","+str(int(VNI))+","+str(src_vm_mac)+","+str(dest_vm_mac)+","+str(int(VLANID))+","+src_vm_ip+","+dest_vm_ip+","+str(packet_bytearray[77])+","+str(src_vm_port)+","+str(dest_vm_port)+","+str(total_length)
print (MESSAGE)
#save data to a file on hour basis
filename = "./vlan-data-"+time.strftime("%Y-%m-%d-%H")+"-00"
with open(filename, "a") as f:
f.write("%s\n" % MESSAGE)
| apache-2.0 |
massmutual/scikit-learn | sklearn/utils/estimator_checks.py | 1 | 54609 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM, BaseSVC
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import NMF, ProjectedGradientNMF
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
# Estimators with deprecated transform methods. Should be removed in 0.19 when
# _LearntSelectorMixin is removed.
DEPRECATED_TRANSFORM = [
"RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier",
"ExtraTreesRegressor", "RandomTreesEmbedding", "DecisionTreeClassifier",
"DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor",
"LinearSVC", "SGDClassifier", "SGDRegressor", "Perceptron",
"LogisticRegression", "LogisticRegressionCV",
"GradientBoostingClassifier", "GradientBoostingRegressor"]
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
if name not in DEPRECATED_TRANSFORM:
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if isinstance(estimator, NMF):
if not isinstance(estimator, ProjectedGradientNMF):
estimator.set_params(solver='cd')
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
if name in DEPRECATED_TRANSFORM:
funcs = ["score"]
else:
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
if name in DEPRECATED_TRANSFORM:
funcs = ["fit", "score", "partial_fit", "fit_predict"]
else:
funcs = [
"fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
if name in DEPRECATED_TRANSFORM:
methods = ["predict", "decision_function", "predict_proba"]
else:
methods = [
"predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
if name in DEPRECATED_TRANSFORM:
check_methods = ["predict", "decision_function", "predict_proba"]
else:
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
spauka/therm_flask | scripts/BlueFors Scripts/TC_monitor.py | 1 | 7020 | import os, os.path
import sys
import re
import time, datetime
import csv, json
import urllib.request, urllib.error
PATH = "C:\\BlueFors logs"
FOLDER_PATTERN = r"([0-9]{2})-([0-9]{2})-([0-9]{2})"
FRIDGE = 'BlueFors_QT1'
SENSORS = ((1, "Fifty_K"),
(2, "Four_K"),
(3, "Magnet"),
(5, "Still"),
(6, "MC"),
(9, "Probe"))
# Finds the latest folder
def find_newest_folder():
newest = (0,0,0) # y, m, d
newest_folder = ""
# Look through all the folders in the path and find the newest
for filename in os.listdir(PATH):
match = re.findall(FOLDER_PATTERN, filename)
if not match:
continue
date = tuple(int(x) for x in match[0])
if date > newest:
newest_folder = filename
newest = date
return newest_folder
# Parse the file and returns the next set of sensor values
# Select time from the last sensor read (Normally MC)
def parse_file(folder, channels, seek=None, oldest=datetime.datetime.min):
ch1 = channels[-1][0] # get the number of the last valid channel
path = os.path.join(PATH, folder, "CH%d T %s.log"%(ch1, folder))
try:
fhandle = open(path, 'rU')
except FileNotFoundError:
return (None, seek, True)
if seek:
fhandle.seek(seek[-1]) # Seek the first channel
else:
seek = [0]*len(channels) # Initialize list with [channels] zeros
while True:
line = fhandle.readline().strip() # Read the next line of the last channel file
iseof = (fhandle.tell() == os.fstat(fhandle.fileno()).st_size)
if not line:
return (None, seek, iseof)
data = line.split(',')
# Read out the next date
try:
date = datetime.datetime.strptime(data[0]+" "+data[1], "%d-%m-%y %H:%M:%S")
except (IndexError, ValueError):
# Couldn't extract time, skip line
return (None, seek, iseof)
if date < oldest:
continue
else:
# Read in all the previous sensors
data = {'Time': date}
for i, channel in enumerate(channels):
try:
s_path = os.path.join(PATH, folder, "CH%d T %s.log"%(channel[0], folder))
s_fhandle = open(s_path, 'rU')
s_fhandle.seek(seek[i])
line = s_fhandle.readline().strip(' \n\r\x00')
seek[i] = s_fhandle.tell()
line = line.split(",")
if line and len(line) == 3:
s_date = datetime.datetime.strptime(line[0]+" "+line[1], "%d-%m-%y %H:%M:%S")
temp = float(line[2])
# Check that the time is not too far in the past, if it is try to fast forward
s_eof = False
while date - s_date > datetime.timedelta(seconds=90):
line = s_fhandle.readline().strip()
seek[i] = s_fhandle.tell()
line = line.split(",")
if line and len(line) == 3:
s_date = datetime.datetime.strptime(line[0]+" "+line[1], "%d-%m-%y %H:%M:%S")
temp = float(line[2])
else:
# If we hit the end of the file and we are still in the past, move to the next sensor
s_eof = True
break
if s_eof:
# We hit the end of the file in the past. Move on to next sensor.
print("Skipping sensor: %s"%(channel[1]))
continue
# Check that this record is not more than 1.5 minutes out from the first one
if abs(s_date - date) > datetime.timedelta(seconds=90):
data[channels[i][1]] = float('NaN')
elif temp > 400000 or temp <= 0:
data[channels[i][1]] = float('NaN')
else:
data[channels[i][1]] = float(line[2])
else:
data[channels[i][1]] = float('NaN')
except FileNotFoundError:
data[channels[i][1]] = float('NaN')
return (data, seek, iseof)
def find_oldest():
URL = 'https://qphys1114.research.ext.sydney.edu.au/therm_flask/%s/data/?current' % FRIDGE
try:
line = urllib.request.urlopen(URL).read().decode('utf-8')
except urllib.error.HTTPError:
return datetime.datetime.min
line = json.loads(line)
if "Time" not in line:
return datetime.datetime.min
else:
date = datetime.datetime.strptime(line["Time"], "%a %b %d %H:%M:%S %Y")
print(date)
return date
def post(data):
URL = 'https://qphys1114.research.ext.sydney.edu.au/therm_flask/%s/data/' % FRIDGE
data['Time'] = data['Time'].timestamp()
print("Updating at %r" % (data['Time'],))
while True:
try:
request = urllib.request.urlopen(URL, urllib.parse.urlencode(data).encode('utf-8'))
response = request.read().decode('utf-8')
request.close()
return response
except urllib.error.URLError:
print("URLOpen Error")
continue
if __name__ == '__main__':
if len(sys.argv) == 2:
if sys.argv[1] in ('--help', '-h'):
print("Usage: %s [--parse-all]")
exit(0)
if sys.argv[1] == '--parse-all':
oldest = find_oldest()
print("Parsing all data from date %r" % oldest)
for filename in os.listdir(PATH):
if re.findall(FOLDER_PATTERN, filename):
print("Parsing file: %s" % filename)
seek = None
eof = False
while not eof:
data, seek, eof = parse_file(filename, SENSORS, seek=seek, oldest=oldest)
if seek == 0:
print(post({'Time': data['Time']-datetime.timedelta(0, 1)}))
if data:
print('%r, %s' % (data['Time'], post(data)))
print('Done')
#time.sleep(10)
exit(0)
oldest = find_oldest()
cfile = find_newest_folder()
seek = None
# Post a blank
print(post({'Time': oldest}))
while True:
time.sleep(1)
filename = find_newest_folder()
if filename != cfile:
seek = None
cfile = filename
print("Starting new folder: %s", filename)
eof = False
while not eof:
data, seek, eof = parse_file(cfile, SENSORS, seek=seek, oldest=oldest)
if data:
oldest = data['Time']
print(post(data))
| mit |
dannyboi104/SickRage | lib/github/tests/Issue214.py | 39 | 3361 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github
import Framework
class Issue214(Framework.TestCase): # https://github.com/jacquev6/PyGithub/issues/214
def setUp(self):
Framework.TestCase.setUp(self)
self.repo = self.g.get_user().get_repo("PyGithub")
self.issue = self.repo.get_issue(1)
def testAssignees(self):
self.assertTrue(self.repo.has_in_assignees('farrd'))
self.assertFalse(self.repo.has_in_assignees('fake'))
def testCollaborators(self):
self.assertTrue(self.repo.has_in_collaborators('farrd'))
self.assertFalse(self.repo.has_in_collaborators('fake'))
self.assertFalse(self.repo.has_in_collaborators('marcmenges'))
self.repo.add_to_collaborators('marcmenges')
self.assertTrue(self.repo.has_in_collaborators('marcmenges'))
self.repo.remove_from_collaborators('marcmenges')
self.assertFalse(self.repo.has_in_collaborators('marcmenges'))
def testEditIssue(self):
self.assertEqual(self.issue.assignee, None)
self.issue.edit(assignee='farrd')
self.assertEqual(self.issue.assignee.login, 'farrd')
self.issue.edit(assignee=None)
self.assertEqual(self.issue.assignee, None)
def testCreateIssue(self):
issue = self.repo.create_issue("Issue created by PyGithub", assignee='farrd')
self.assertEqual(issue.assignee.login, 'farrd')
def testGetIssues(self):
issues = self.repo.get_issues(assignee='farrd')
for issue in issues:
self.assertEqual(issue.assignee.login, 'farrd')
| gpl-3.0 |
NihilistBrew/Dust | typeclasses/objects.py | 10 | 8575 | """
Object
The Object is the "naked" base class for things in the game world.
Note that the default Character, Room and Exit does not inherit from
this Object, but from their respective default implementations in the
evennia library. If you want to use this class as a parent to change
the other types, you can do so by adding this as a multiple
inheritance.
"""
from evennia import DefaultObject
class Object(DefaultObject):
"""
This is the root typeclass object, implementing an in-game Evennia
game object, such as having a location, being able to be
manipulated or looked at, etc. If you create a new typeclass, it
must always inherit from this object (or any of the other objects
in this file, since they all actually inherit from BaseObject, as
seen in src.object.objects).
The BaseObject class implements several hooks tying into the game
engine. By re-implementing these hooks you can control the
system. You should never need to re-implement special Python
methods, such as __init__ and especially never __getattribute__ and
__setattr__ since these are used heavily by the typeclass system
of Evennia and messing with them might well break things for you.
* Base properties defined/available on all Objects
key (string) - name of object
name (string)- same as key
aliases (list of strings) - aliases to the object. Will be saved to
database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
back to this class
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
player (Player) - controlling player (if any, only set together with
sessid below)
sessid (int, read-only) - session id (if any, only set together with
player above). Use `sessions` handler to get the
Sessions directly.
location (Object) - current location. Is None if this is a room
home (Object) - safety start-location
sessions (list of Sessions, read-only) - returns all sessions connected
to this object
has_player (bool, read-only)- will only return *connected* players
contents (list of Objects, read-only) - returns all objects inside this
object (including exits)
exits (list of Objects, read-only) - returns all exits from this
object, if any
destination (Object) - only set if this object is an exit.
is_superuser (bool, read-only) - True/False if this user is a superuser
* Handlers available
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this
self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not create
a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
sessions - sessions-handler. Get Sessions connected to this
object with sessions.get()
* Helper methods (see src.objects.objects.py for full headers)
search(ostring, global_search=False, attribute_name=None,
use_nicks=False, location=None, ignore_errors=False, player=False)
execute_cmd(raw_string)
msg(text=None, **kwargs)
msg_contents(message, exclude=None, from_obj=None, **kwargs)
move_to(destination, quiet=False, emit_to_obj=None, use_destination=True)
copy(new_key=None)
delete()
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hooks (these are class methods, so args should start with self):
basetype_setup() - only called once, used for behind-the-scenes
setup. Normally not modified.
basetype_posthook_setup() - customization in basetype, after the object
has been created; Normally not modified.
at_object_creation() - only called once, when object is first created.
Object customizations go here.
at_object_delete() - called just before deleting an object. If returning
False, deletion is aborted. Note that all objects
inside a deleted object are automatically moved
to their <home>, they don't need to be removed here.
at_init() - called whenever typeclass is cached from memory,
at least once every server restart/reload
at_cmdset_get(**kwargs) - this is called just before the command handler
requests a cmdset from this object. The kwargs are
not normally used unless the cmdset is created
dynamically (see e.g. Exits).
at_pre_puppet(player)- (player-controlled objects only) called just
before puppeting
at_post_puppet() - (player-controlled objects only) called just
after completing connection player<->object
at_pre_unpuppet() - (player-controlled objects only) called just
before un-puppeting
at_post_unpuppet(player) - (player-controlled objects only) called just
after disconnecting player<->object link
at_server_reload() - called before server is reloaded
at_server_shutdown() - called just before server is fully shut down
at_access(result, accessing_obj, access_type) - called with the result
of a lock access check on this object. Return value
does not affect check result.
at_before_move(destination) - called just before moving object
to the destination. If returns False, move is cancelled.
announce_move_from(destination) - called in old location, just
before move, if obj.move_to() has quiet=False
announce_move_to(source_location) - called in new location, just
after move, if obj.move_to() has quiet=False
at_after_move(source_location) - always called after a move has
been successfully performed.
at_object_leave(obj, target_location) - called when an object leaves
this object in any fashion
at_object_receive(obj, source_location) - called when this object receives
another object
at_traverse(traversing_object, source_loc) - (exit-objects only)
handles all moving across the exit, including
calling the other exit hooks. Use super() to retain
the default functionality.
at_after_traverse(traversing_object, source_location) - (exit-objects only)
called just after a traversal has happened.
at_failed_traverse(traversing_object) - (exit-objects only) called if
traversal fails and property err_traverse is not defined.
at_msg_receive(self, msg, from_obj=None, **kwargs) - called when a message
(via self.msg()) is sent to this obj.
If returns false, aborts send.
at_msg_send(self, msg, to_obj=None, **kwargs) - called when this objects
sends a message to someone via self.msg().
return_appearance(looker) - describes this object. Used by "look"
command by default
at_desc(looker=None) - called by 'look' whenever the
appearance is requested.
at_get(getter) - called after object has been picked up.
Does not stop pickup.
at_drop(dropper) - called when this object has been dropped.
at_say(speaker, message) - by default, called if an object inside this
object speaks
"""
pass
| mit |
eayun/ovirt-engine | packaging/setup/ovirt_engine_setup/util.py | 5 | 8581 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utils."""
import gettext
import grp
import pwd
import re
from otopi import util
from otopi import plugin
from otopi import constants as otopicons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
def editConfigContent(
content,
params,
keep_existing=False,
changed_lines=None,
comment_re='[#]*\s*',
param_re='\w+',
new_comment_tpl='{spaces}# {original}',
separator_re='\s*=\s*',
new_line_tpl='{spaces}{param} = {value}',
added_params=None,
):
"""Return edited content of a config file.
Keyword arguments:
content - a list of strings, the content prior to calling us
params - a dict of params/values that should be in the output
If the value for a param is None, param is deleted
keep_existing - if True, existing params are not changed, only missing
ones are added.
changed_lines - an output parameter, a list of dictionaries with
added and removed lines.
comment_re - a regular expression that a comment marker prefixed
to param should match. If a commented param line is found,
a new line will be added after it.
param_re - a regular expression that should match params
new_comment_tpl - a template for a comment. {original} will be replaced
with this template, {spaces} will be replaced with
original whitespace prefix.
separator_re - a regular expression that the separator between
param and value should match
new_line_tpl - a template for a new line. {param} will be replaced
with param, {value} with value.
added_params - an output parameter, a list of params that were added
in the end because they were not found in content.
Params that appear uncommented in the input, are commented, and new
values are added after the commented lines. Params that appear only
commented in the input, the comments are copied as-is, and new lines
are added after the comments. Params that do not appear in the input
are added in the end.
"""
params = params.copy()
pattern = r"""
^
(?P<spaces>\s*)
(?P<comment>{comment_re})
(?P<original>
(?P<param>{param_re})
(?P<separator>{separator_re})
(?P<value>.*)
)
$
""".format(
comment_re=comment_re,
param_re=param_re,
separator_re=separator_re,
)
re_obj = re.compile(flags=re.VERBOSE, pattern=pattern)
# Find params which are uncommented in the input.
uncommented = set()
for line in content:
f = re_obj.match(line)
if (
f is not None and
f.group('param') in params and
not f.group('comment')
):
uncommented.add(f.group('param'))
if changed_lines is None:
changed_lines = []
if added_params is None:
added_params = []
newcontent = []
processed = set()
for line in content:
f = re_obj.match(line)
if (
f is not None and
f.group('param') in params and
not (
f.group('param') in uncommented and
f.group('comment')
)
# If param in uncommented and current line is comment,
# we do not need to process it - we process the uncommented
# line when we see it
):
if (
not f.group('comment') and
(
str(f.group('value')) == str(params[f.group('param')]) or
keep_existing
)
):
# value is not changed, or we do not care. do nothing
processed.add(f.group('param'))
else:
if (
f.group('param') in uncommented and
not f.group('comment')
):
# Add current line, commented, before new line
currentline = new_comment_tpl.format(
spaces=f.group('spaces'),
original=f.group('original'),
)
changed_lines.append(
{
'added': currentline,
'removed': line,
}
)
newcontent.append(currentline)
else:
# Only possible option here is that current line is
# a comment and param is not in uncommented. Keep it.
# Other two options are in "if"s above.
# The last option - param is not in uncommented
# and current line is not a comment - is not possible.
newcontent.append(line)
newline = new_line_tpl.format(
spaces=f.group('spaces'),
param=f.group('param'),
value=params[f.group('param')],
)
changed_lines.append(
{
'added': newline,
}
)
processed.add(f.group('param'))
line = newline
newcontent.append(line)
# Add remaining params at the end
for param, value in params.items():
if param not in processed:
newline = new_line_tpl.format(
spaces='',
param=param,
value=value,
)
newcontent.append(newline)
changed_lines.append(
{
'added': newline,
}
)
added_params.append(param)
return newcontent
@util.export
def getUid(user):
return pwd.getpwnam(user)[2]
@util.export
def getGid(group):
return grp.getgrnam(group)[2]
@util.export
def parsePort(port):
try:
port = int(port)
except ValueError:
raise ValueError(
_('Invalid port {number}').format(
number=port,
)
)
if port < 0 or port > 0xffff:
raise ValueError(
_('Invalid number {number}').format(
number=port,
)
)
return port
@util.export
def addExitCode(environment, code, priority=plugin.Stages.PRIORITY_DEFAULT):
environment[
otopicons.BaseEnv.EXIT_CODE
].append(
{
'code': code,
'priority': priority,
}
)
@util.export
def getPackageManager(logger=None):
"""Return a tuple with the package manager printable name string, the mini
implementation class and the sink base class, for the preferred package
manager available in the system.
The only parameter accepted by this function is a logger instance, that
can be ommited (or None) if the user don't wants logs.
"""
try:
from otopi import minidnf
minidnf.MiniDNF()
if logger is not None:
logger.debug('Using DNF as package manager')
return 'DNF', minidnf.MiniDNF, minidnf.MiniDNFSinkBase
except (ImportError, RuntimeError):
try:
from otopi import miniyum
# yum does not raises validation exceptions in constructor,
# then its not worth instantiating it to test.
if logger is not None:
logger.debug('Using Yum as package manager')
return 'Yum', miniyum.MiniYum, miniyum.MiniYumSinkBase
except ImportError:
raise RuntimeError(
_(
'No supported package manager found in your system'
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| apache-2.0 |
ivanvladimir/gensim | gensim/test/test_doc2vec.py | 1 | 16473 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
from __future__ import with_statement
import logging
import unittest
import os
import tempfile
from six.moves import zip as izip
from collections import namedtuple
import numpy as np
from gensim import utils, matutils
from gensim.models import doc2vec
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
class DocsLeeCorpus(object):
def __init__(self, string_tags=False):
self.string_tags = string_tags
def _tag(self, i):
return i if not self.string_tags else '_*%d' % i
def __iter__(self):
with open(datapath('lee_background.cor')) as f:
for i, line in enumerate(f):
yield doc2vec.TaggedDocument(utils.simple_preprocess(line), [self._tag(i)])
list_corpus = list(DocsLeeCorpus())
raw_sentences = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_doc2vec.tst')
class TestDoc2VecModel(unittest.TestCase):
def test_persistence(self):
"""Test storing/loading the entire model."""
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
model.save(testfile())
self.models_equal(model, doc2vec.Doc2Vec.load(testfile()))
def test_load_mmap(self):
"""Test storing/loading the entire model."""
model = doc2vec.Doc2Vec(sentences, min_count=1)
# test storing the internal arrays into separate files
model.save(testfile(), sep_limit=0)
self.models_equal(model, doc2vec.Doc2Vec.load(testfile()))
# make sure mmaping the arrays back works, too
self.models_equal(model, doc2vec.Doc2Vec.load(testfile(), mmap='r'))
def test_int_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (300,))
self.assertRaises(KeyError, model.__getitem__, '_*0')
def test_string_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = list(DocsLeeCorpus(True))
# force duplicated tags
corpus = corpus[0:10] + corpus
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (300,))
self.assertEqual(model.docvecs['_*0'].shape, (300,))
self.assertTrue(all(model.docvecs['_*0'] == model.docvecs[0]))
self.assertTrue(max(d.offset for d in model.docvecs.doctags.values()) < len(model.docvecs.doctags))
self.assertTrue(max(model.docvecs._int_index(str_key) for str_key in model.docvecs.doctags.keys()) < len(model.docvecs.doctag_syn0))
def test_empty_errors(self):
# no input => "RuntimeError: you must first build vocabulary before training the model"
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, [])
# input not empty, but rather completely filtered out
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, list_corpus, min_count=10000)
def model_sanity(self, model):
"""Any non-trivial model on DocsLeeCorpus can pass these sanity checks"""
fire1 = 0 # doc 0 sydney fires
fire2 = 8 # doc 8 sydney fires
tennis1 = 6 # doc 6 tennis
# inferred vector should be top10 close to bulk-trained one
doc0_inferred = model.infer_vector(list(DocsLeeCorpus())[0].words)
sims_to_infer = model.docvecs.most_similar([doc0_inferred], topn=len(model.docvecs))
f_rank = [docid for docid, sim in sims_to_infer].index(fire1)
self.assertLess(fire1, 10)
# fire2 should be top30 close to fire1
sims = model.docvecs.most_similar(fire1, topn=len(model.docvecs))
f2_rank = [docid for docid, sim in sims].index(fire2)
self.assertLess(f2_rank, 30)
# same sims should appear in lookup by vec as by index
doc0_vec = model.docvecs[fire1]
sims2 = model.docvecs.most_similar(positive=[doc0_vec], topn=21)
sims2 = [(id, sim) for id, sim in sims2 if id != fire1] # ignore the doc itself
sims = sims[:20]
self.assertEqual(list(zip(*sims))[0], list(zip(*sims2))[0]) # same doc ids
self.assertTrue(np.allclose(list(zip(*sims))[1], list(zip(*sims2))[1])) # close-enough dists
# tennis doc should be out-of-place among fire news
self.assertEqual(model.docvecs.doesnt_match([fire1, tennis1, fire2]), tennis1)
# fire docs should be closer than fire-tennis
self.assertTrue(model.docvecs.similarity(fire1, fire2) > model.docvecs.similarity(fire1, tennis1))
def test_training(self):
"""Test doc2vec training."""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(size=100, min_count=2, iter=20)
model.build_vocab(corpus)
self.assertEqual(model.docvecs.doctag_syn0.shape, (300, 100))
model.train(corpus)
self.model_sanity(model)
# build vocab and train in one step; must be the same as above
model2 = doc2vec.Doc2Vec(corpus, size=100, min_count=2, iter=20)
self.models_equal(model, model2)
def test_dbow_hs(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=1, negative=0, min_count=2, iter=20)
self.model_sanity(model)
def test_dmm_hs(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=1, negative=0,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dms_hs(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=1, negative=0,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dmc_hs(self):
"""Test DM/concatenate doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_concat=1, size=24, window=4, hs=1, negative=0,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dbow_neg(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=0, negative=10, min_count=2, iter=20)
self.model_sanity(model)
def test_dmm_neg(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=0, negative=10,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dms_neg(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=0, negative=10,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dmc_neg(self):
"""Test DM/concatenate doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_concat=1, size=24, window=4, hs=0, negative=10,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_parallel(self):
"""Test doc2vec parallel training."""
if doc2vec.FAST_VERSION < 0: # don't test the plain NumPy version for parallelism (too slow)
return
corpus = utils.RepeatCorpus(DocsLeeCorpus(), 10000)
for workers in [2, 4]:
model = doc2vec.Doc2Vec(corpus, workers=workers)
self.model_sanity(model)
def test_deterministic_hs(self):
"""Test doc2vec results identical with identical RNG seed."""
# hs
model = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1)
self.models_equal(model, model2)
def test_deterministic_neg(self):
"""Test doc2vec results identical with identical RNG seed."""
# neg
model = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1)
self.models_equal(model, model2)
def test_deterministic_dmc(self):
"""Test doc2vec results identical with identical RNG seed."""
# bigger, dmc
model = doc2vec.Doc2Vec(DocsLeeCorpus(), dm=1, dm_concat=1, size=24, window=4, hs=1, negative=3,
seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), dm=1, dm_concat=1, size=24, window=4, hs=1, negative=3,
seed=42, workers=1)
self.models_equal(model, model2)
def test_mixed_tag_types(self):
"""Ensure alternating int/string tags don't share indexes in doctag_syn0"""
mixed_tag_corpus = [doc2vec.TaggedDocument(words, [i, words[0]]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec()
model.build_vocab(mixed_tag_corpus)
expected_length = len(sentences) + len(model.docvecs.doctags) # 9 sentences, 7 unique first tokens
print(model.docvecs.doctags)
print(model.docvecs.count)
self.assertEquals(len(model.docvecs.doctag_syn0), expected_length)
def models_equal(self, model, model2):
# check words/hidden-weights
self.assertEqual(len(model.vocab), len(model2.vocab))
self.assertTrue(np.allclose(model.syn0, model2.syn0))
if model.hs:
self.assertTrue(np.allclose(model.syn1, model2.syn1))
if model.negative:
self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg))
# check docvecs
self.assertEqual(len(model.docvecs.doctags), len(model2.docvecs.doctags))
self.assertEqual(len(model.docvecs.offset2doctag), len(model2.docvecs.offset2doctag))
self.assertTrue(np.allclose(model.docvecs.doctag_syn0, model2.docvecs.doctag_syn0))
#endclass TestDoc2VecModel
if not hasattr(TestDoc2VecModel, 'assertLess'):
# workaround for python 2.6
def assertLess(self, a, b, msg=None):
self.assertTrue(a < b, msg="%s is not less than %s" % (a, b))
setattr(TestDoc2VecModel, 'assertLess', assertLess)
# following code is useful for reproducing paragraph-vectors paper sentiment experiments
class ConcatenatedDoc2Vec(object):
"""
Concatenation of multiple models for reproducing the Paragraph Vectors paper.
Models must have exactly-matching vocabulary and document IDs. (Models should
be trained separately; this wrapper just returns concatenated results.)
"""
def __init__(self, models):
self.models = models
if hasattr(models[0], 'docvecs'):
self.docvecs = ConcatenatedDocvecs([model.docvecs for model in models])
def __getitem__(self, token):
return np.concatenate([model[token] for model in self.models])
def infer_vector(self, document, alpha=0.1, min_alpha=0.0001, steps=5):
return np.concatenate([model.infer_vector(document, alpha, min_alpha, steps) for model in self.models])
def train(self, ignored):
pass # train subcomponents individually
class ConcatenatedDocvecs(object):
def __init__(self, models):
self.models = models
def __getitem__(self, token):
return np.concatenate([model[token] for model in self.models])
SentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment')
def read_su_sentiment_rotten_tomatoes(dirname, lowercase=True):
"""
Read and return documents from the Stanford Sentiment Treebank
corpus (Rotten Tomatoes reviews), from http://nlp.Stanford.edu/sentiment/
Initialize the corpus from a given directory, where
http://nlp.stanford.edu/~socherr/stanfordSentimentTreebank.zip
has been expanded. It's not too big, so compose entirely into memory.
"""
logging.info("loading corpus from %s" % dirname)
# many mangled chars in sentences (datasetSentences.txt)
chars_sst_mangled = ['à', 'á', 'â', 'ã', 'æ', 'ç', 'è', 'é', 'í',
'í', 'ï', 'ñ', 'ó', 'ô', 'ö', 'û', 'ü']
sentence_fixups = [(char.encode('utf-8').decode('latin1'), char) for char in chars_sst_mangled]
# more junk, and the replace necessary for sentence-phrase consistency
sentence_fixups.extend([
('Â', ''),
('\xa0', ' '),
('-LRB-', '('),
('-RRB-', ')'),
])
# only this junk in phrases (dictionary.txt)
phrase_fixups = [('\xa0', ' ')]
# sentence_id and split are only positive for the full sentences
# read sentences to temp {sentence -> (id,split) dict, to correlate with dictionary.txt
info_by_sentence = {}
with open(os.path.join(dirname, 'datasetSentences.txt'), 'r') as sentences:
with open(os.path.join(dirname, 'datasetSplit.txt'), 'r') as splits:
next(sentences) # legend
next(splits) # legend
for sentence_line, split_line in izip(sentences, splits):
(id, text) = sentence_line.split('\t')
id = int(id)
text = text.rstrip()
for junk, fix in sentence_fixups:
text = text.replace(junk, fix)
(id2, split_i) = split_line.split(',')
assert id == int(id2)
if text not in info_by_sentence: # discard duplicates
info_by_sentence[text] = (id, int(split_i))
# read all phrase text
phrases = [None] * 239232 # known size of phrases
with open(os.path.join(dirname, 'dictionary.txt'), 'r') as phrase_lines:
for line in phrase_lines:
(text, id) = line.split('|')
for junk, fix in phrase_fixups:
text = text.replace(junk, fix)
phrases[int(id)] = text.rstrip() # for 1st pass just string
SentimentPhrase = namedtuple('SentimentPhrase', SentimentDocument._fields + ('sentence_id',))
# add sentiment labels, correlate with sentences
with open(os.path.join(dirname, 'sentiment_labels.txt'), 'r') as sentiments:
next(sentiments) # legend
for line in sentiments:
(id, sentiment) = line.split('|')
id = int(id)
sentiment = float(sentiment)
text = phrases[id]
words = text.split()
if lowercase:
words = [word.lower() for word in words]
(sentence_id, split_i) = info_by_sentence.get(text, (None, 0))
split = [None, 'train', 'test', 'dev'][split_i]
phrases[id] = SentimentPhrase(words, [id], split, sentiment, sentence_id)
assert len([phrase for phrase in phrases if phrase.sentence_id is not None]) == len(info_by_sentence) # all
# counts don't match 8544, 2210, 1101 because 13 TRAIN and 1 DEV sentences are duplicates
assert len([phrase for phrase in phrases if phrase.split == 'train']) == 8531 # 'train'
assert len([phrase for phrase in phrases if phrase.split == 'test']) == 2210 # 'test'
assert len([phrase for phrase in phrases if phrase.split == 'dev']) == 1100 # 'dev'
logging.info("loaded corpus with %i sentences and %i phrases from %s",
len(info_by_sentence), len(phrases), dirname)
return phrases
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
logging.info("using optimization %s", doc2vec.FAST_VERSION)
unittest.main()
| gpl-3.0 |
40223149/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/__init__.py | 603 | 6082 | ## pygame - Python Game Library
## Copyright (C) 2000-2001 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
'''Top-level Pygame module.
Pygame is a set of Python modules designed for writing games.
It is written on top of the excellent SDL library. This allows you
to create fully featured games and multimedia programs in the Python
language. The package is highly portable, with games running on
Windows, MacOS, OS X, BeOS, FreeBSD, IRIX, and Linux.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import sys
class MissingModule:
def __init__(self, name, info='', urgent=0):
self.name = name
self.info = str(info)
self.urgent = urgent
if urgent:
self.warn()
def __getattr__(self, var):
if not self.urgent:
self.warn()
self.urgent = 1
MissingPygameModule = "%s module not available" % self.name
raise NotImplementedError(MissingPygameModule)
def __nonzero__(self):
return 0
def warn(self):
if self.urgent: type = 'import'
else: type = 'use'
message = '%s %s: %s' % (type, self.name, self.info)
try:
import warnings
if self.urgent: level = 4
else: level = 3
warnings.warn(message, RuntimeWarning, level)
except ImportError:
print(message)
#we need to import like this, each at a time. the cleanest way to import
#our modules is with the import command (not the __import__ function)
#first, the "required" modules
#from pygame.array import * #brython fix me
from pygame.base import *
from pygame.constants import *
from pygame.version import *
from pygame.rect import Rect
import pygame.color
Color = pygame.color.Color
__version__ = ver
#added by earney
from . import time
from . import display
from . import constants
from . import event
from . import font
from . import mixer
from . import sprite
from .surface import Surface
from . import image
from . import mouse
from . import transform
#next, the "standard" modules
#we still allow them to be missing for stripped down pygame distributions
'''
try: import pygame.cdrom
except (ImportError,IOError), msg:cdrom=MissingModule("cdrom", msg, 1)
try: import pygame.cursors
except (ImportError,IOError), msg:cursors=MissingModule("cursors", msg, 1)
try: import pygame.display
except (ImportError,IOError), msg:display=MissingModule("display", msg, 1)
try: import pygame.draw
except (ImportError,IOError), msg:draw=MissingModule("draw", msg, 1)
try: import pygame.event
except (ImportError,IOError), msg:event=MissingModule("event", msg, 1)
try: import pygame.image
except (ImportError,IOError), msg:image=MissingModule("image", msg, 1)
try: import pygame.joystick
except (ImportError,IOError), msg:joystick=MissingModule("joystick", msg, 1)
try: import pygame.key
except (ImportError,IOError), msg:key=MissingModule("key", msg, 1)
try: import pygame.mouse
except (ImportError,IOError), msg:mouse=MissingModule("mouse", msg, 1)
try: import pygame.sprite
except (ImportError,IOError), msg:sprite=MissingModule("sprite", msg, 1)
try: from pygame.surface import Surface
except (ImportError,IOError):Surface = lambda:Missing_Function
try: from pygame.overlay import Overlay
except (ImportError,IOError):Overlay = lambda:Missing_Function
try: import pygame.time
except (ImportError,IOError), msg:time=MissingModule("time", msg, 1)
try: import pygame.transform
except (ImportError,IOError), msg:transform=MissingModule("transform", msg, 1)
#lastly, the "optional" pygame modules
try:
import pygame.font
import pygame.sysfont
pygame.font.SysFont = pygame.sysfont.SysFont
pygame.font.get_fonts = pygame.sysfont.get_fonts
pygame.font.match_font = pygame.sysfont.match_font
except (ImportError,IOError), msg:font=MissingModule("font", msg, 0)
try: import pygame.mixer
except (ImportError,IOError), msg:mixer=MissingModule("mixer", msg, 0)
#try: import pygame.movie
#except (ImportError,IOError), msg:movie=MissingModule("movie", msg, 0)
#try: import pygame.movieext
#except (ImportError,IOError), msg:movieext=MissingModule("movieext", msg, 0)
try: import pygame.surfarray
except (ImportError,IOError), msg:surfarray=MissingModule("surfarray", msg, 0)
try: import pygame.sndarray
except (ImportError,IOError), msg:sndarray=MissingModule("sndarray", msg, 0)
#try: import pygame.fastevent
#except (ImportError,IOError), msg:fastevent=MissingModule("fastevent", msg, 0)
#there's also a couple "internal" modules not needed
#by users, but putting them here helps "dependency finder"
#programs get everything they need (like py2exe)
try: import pygame.imageext; del pygame.imageext
except (ImportError,IOError):pass
try: import pygame.mixer_music; del pygame.mixer_music
except (ImportError,IOError):pass
def packager_imports():
"""
Some additional things that py2app/py2exe will want to see
"""
import OpenGL.GL
'''
#make Rects pickleable
import copyreg
def __rect_constructor(x,y,w,h):
return Rect(x,y,w,h)
def __rect_reduce(r):
assert type(r) == Rect
return __rect_constructor, (r.x, r.y, r.w, r.h)
copyreg.pickle(Rect, __rect_reduce, __rect_constructor)
#cleanup namespace
del pygame, os, sys, #TODO rwobject, surflock, MissingModule, copy_reg
| gpl-3.0 |
mm112287/2015cda-24 | static/Brython3.1.1-20150328-091302/Lib/codecs.py | 739 | 35436 | """ codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import builtins, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError as why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % \
(self.__class__.__module__, self.__class__.__name__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private codepoints U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars < 0:
if size < 0:
if self.charbuffer:
break
elif len(self.charbuffer) >= size:
break
else:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
| gpl-3.0 |
georgeha/mandelbrot | mandelbrot_core/mandelbrot_pilot_cores.py | 1 | 7097 | __author__ = "George Chantzialexiou"
__copyright__ = "Copyright 2012-2013, The Pilot Project"
__license__ = "MIT"
""" A Mandelbrot Fractal Generator Using Pilot Job
This is an example of mandelbrot Fracatl Generator
using the capabilities of Pilot Job API.
It requires the Python Image Library (PIL) which can be easily
installed with 'easy_install PIL'. Also, it requires the
mandel_lines.py programm to generate the parts of the fractal.
The parameters are the following:
imgX, imgY: the dimensions of the mandelbrot image, e.g. 1024, 1024
xBeg, xEnd: the x-axis portion of the (sub-)image to calculate
yBeg, yEnd: the y-axis portion of the (sub-)image to calculate
This module takes the parameters of the Mandelbrot fractal and decompose
the image into n diferent parts, where n is the number of the cores of
the system. Then it runs for every part the mandelbrot Generator Code
which is the mandel_lines.py. The mandel_lines.py creates n Images and
then we compose the n images into one. The whole fractal Image.
For every part of the image we create one Compute Unit.
You can run this code via command list:
python mandelbrot_pilot.py imgX imgY xBeg xEnd yBeg yEnd
"""
import os, sys, radical.pilot
from PIL import Image
import multiprocessing # this library is used to find the number of the cores.
# DBURL defines the MongoDB server URL and has the format mongodb://host:port.
# For the installation of a MongoDB server, refer to http://docs.mongodb.org.
DBURL = ("RADICAL_PILOT_DBURL")
if DBURL is None:
print "ERROR: RADICAL_PILOT_DBURL (MongoDB server URL) is not defined."
sys.exit(1)
#------------------------------------------------------------------------------
#
def pilot_state_cb(pilot, state):
"""pilot_state_change_cb() is a callback function. It gets called very
time a ComputePilot changes its state.
"""
if state == radical.pilot.states.FAILED:
print "Compute Pilot '%s' failed, exiting ..." % pilot.uid
sys.exit(1)
elif state == radical.pilot.states.ACTIVE:
print "Compute Pilot '%s' became active!" % (pilot.uid)
#------------------------------------------------------------------------------
#
def unit_state_change_cb(unit, state):
"""unit_state_change_cb() is a callback function. It gets called very
time a ComputeUnit changes its state.
"""
if state == radical.pilot.states.FAILED:
print "Compute Unit '%s' failed ..." % unit.uid
sys.exit(1)
elif state == radical.pilot.states.DONE:
print "Compute Unit '%s' finished with output:" % (unit.uid)
print unit.stdout
#------------------------------------------------------------------------------
#
def main():
try:
# reading the input from user:
args = sys.argv[1:]
if len(args) < 6:
print "Usage: python %s imgX imgY xBeg xEnd yBeg yEnd filename" % __file__
sys.exit(-1)
imgX = int(sys.argv[1])
imgY = int(sys.argv[2])
xBeg = int(sys.argv[3])
xEnd = int(sys.argv[4])
yBeg = int(sys.argv[5])
yEnd = int(sys.argv[6])
# end of reading input from the user
# Add the following three lines if you want to run remote
#c = radical.pilot.Context('ssh')
#c.user_id = 'user_id'
#session.add_context(c)
#DBURL = "mongodb://localhost:27017" # this is the default database_url if you run the mongodb on localhost
# here we create a new radical session
DBURL = "mongodb://localhost:27017"
try:
session = radical.pilot.Session(database_url = DBURL)
except Exception, e:
print "An error with mongodb has occured: %s" % (str(e))
return (-1)
# Add a Pilot Manager. Pilot managers manage one or more ComputePilots.
print "Initiliazing Pilot Manager..."
pmgr = radical.pilot.PilotManager(session=session)
# Register our callback with our Pilot Manager. This callback will get
# called every time any of the pilots managed by the PilotManager
# change their state
pmgr.register_callback(pilot_state_cb)
# this describes the requirements and the paramers
pdesc = radical.pilot.ComputePilotDescription()
pdesc.resource = "localhost" # we are running on localhost
pdesc.runtime = 10 # minutes
pdesc.cores = multiprocessing.cpu_count() # we use all the cores we have
pdesc.cleanup = True # delete all the files that are created automatically and we don't need anymore when the job is done
print "Submitting Compute Pilot to PilotManager"
pilot = pmgr.submit_pilots(pdesc)
umgr = radical.pilot.UnitManager(session=session, scheduler = radical.pilot.SCHED_DIRECT_SUBMISSION)
# Combine all the units
print "Initiliazing Unit Manager"
# Combine the ComputePilot, the ComputeUnits and a scheduler via
# a UnitManager object.
umgr = radical.pilot.UnitManager(
session=session,
scheduler=radical.pilot.SCHED_DIRECT_SUBMISSION)
# Register our callback with the UnitManager. This callback will get
# called every time any of the units managed by the UnitManager
# change their state.
print 'Registering the callbacks so we can keep an eye on the CUs'
umgr.register_callback(unit_state_change_cb)
print "Registering Compute Pilot with Unit Manager"
umgr.add_pilots(pilot)
output_data_list = []
mylist = []
for i in range(1,pdesc.cores+1):
output_data_list.append('mandel_%d.gif' % i)
# -------- BEGIN USER DEFINED CU DESCRIPTION --------- #
cudesc = radical.pilot.ComputeUnitDescription()
cudesc.environment = {"mandelx": "%d" % imgX, "mandely": "%d" % imgY, "xBeg": "%d" % xBeg,
"xEnd": "%d" % xEnd, "yBeg": "%d" % yBeg, "yEnd": "%d" % yEnd, "cores": "%d" % pdesc.cores, "iter": "%d" % i }
cudesc.executable = "python"
cudesc.arguments = ['mandel_lines.py','$mandelx','$mandely','$xBeg','$xEnd','$yBeg','$yEnd','$cores','$iter']
cudesc.input_data = ['mandel_lines.py']
cudesc.output_data = output_data_list[i-1]
mylist.append(cudesc)
# -------- END USER DEFINED CU DESCRIPTION --------- #
print 'Submitting the CU to the Unit Manager...'
mylist_units = umgr.submit_units(mylist)
# wait for all units to finish
umgr.wait_units()
print "All Compute Units completed successfully! Now.."
# stitch together the final image
fullimage = Image.new("RGB", (xEnd-xBeg, yEnd-yBeg))
print "Stitching together the whole fractal to : mandelbrot_full.gif"
for i in range(1,pdesc.cores+1):
partimage = Image.open('mandel_%d.gif' % i)
box_top = (xBeg, int((yEnd*(i-1))/pdesc.cores), xEnd ,int((yEnd*(i+1))/pdesc.cores))
mandel_part = partimage.crop(box_top)
fullimage.paste(mandel_part, box_top)
fullimage.save("mandelbrot_full.gif", "GIF")
print 'Images is now saved at the working directory..'
session.close()
print "Session closed, exiting now ..."
sys.exit(0)
except Exception as e:
print "AN ERROR OCCURRED: %s" % ((str(e)))
return(-1)
#------------------------------------------------------------------------------
#
if __name__ == "__main__":
sys.exit(main())
#
#------------------------------------------------------------------------------
| mit |
nexiles/odoo | addons/stock/procurement.py | 227 | 22183 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, float_compare, float_round
from openerp import SUPERUSER_ID
from dateutil.relativedelta import relativedelta
from datetime import datetime
from psycopg2 import OperationalError
import openerp
class procurement_group(osv.osv):
_inherit = 'procurement.group'
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner')
}
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
result = super(procurement_rule, self)._get_action(cr, uid, context=context)
return result + [('move', _('Move From Another Location'))]
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids):
res += [x.id for x in route.pull_ids]
return res
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'),
'location_src_id': fields.many2one('stock.location', 'Source Location',
help="Source location is action=move"),
'route_id': fields.many2one('stock.location.route', 'Route',
help="If route_id is False, the rule is global"),
'procure_method': fields.selection([('make_to_stock', 'Take From Stock'), ('make_to_order', 'Create Procurement')], 'Move Supply Method', required=True,
help="""Determines the procurement method of the stock move that will be generated: whether it will need to 'take from the available stock' in its source location or needs to ignore its stock and create a procurement over there."""),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'procurement.rule': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type',
help="Picking Type determines the way the picking should be shown in the view, reports, ..."),
'delay': fields.integer('Number of Days'),
'partner_address_id': fields.many2one('res.partner', 'Partner Address'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too'),
'warehouse_id': fields.many2one('stock.warehouse', 'Served Warehouse', help='The warehouse this rule is for'),
'propagate_warehouse_id': fields.many2one('stock.warehouse', 'Warehouse to Propagate', help="The warehouse to propagate on the created move/procurement, which can be different of the warehouse this rule is for (e.g for resupplying rules from another warehouse)"),
}
_defaults = {
'procure_method': 'make_to_stock',
'propagate': True,
'delay': 0,
}
class procurement_order(osv.osv):
_inherit = "procurement.order"
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'), # not required because task may create procurements that aren't linked to a location with sale_service
'partner_dest_id': fields.many2one('res.partner', 'Customer Address', help="In case of dropshipping, we need to know the destination address more precisely"),
'move_ids': fields.one2many('stock.move', 'procurement_id', 'Moves', help="Moves created by the procurement"),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Move which caused (created) the procurement"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_procurement', 'procurement_id', 'route_id', 'Preferred Routes', help="Preferred route to be followed by the procurement order. Usually copied from the generating document (SO) but could be set up manually."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Warehouse to consider for the route selection"),
'orderpoint_id': fields.many2one('stock.warehouse.orderpoint', 'Minimum Stock Rule'),
}
def propagate_cancel(self, cr, uid, procurement, context=None):
if procurement.rule_id.action == 'move' and procurement.move_ids:
self.pool.get('stock.move').action_cancel(cr, uid, [m.id for m in procurement.move_ids], context=context)
def cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context)
ctx = context.copy()
#set the context for the propagation of the procurement cancelation
ctx['cancel_procurement'] = True
for procurement in self.browse(cr, uid, to_cancel_ids, context=ctx):
self.propagate_cancel(cr, uid, procurement, context=ctx)
return super(procurement_order, self).cancel(cr, uid, to_cancel_ids, context=ctx)
def _find_parent_locations(self, cr, uid, procurement, context=None):
location = procurement.location_id
res = [location.id]
while location.location_id:
location = location.location_id
res.append(location.id)
return res
def change_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
return {'value': {'location_id': warehouse.lot_stock_id.id}}
return {}
def _search_suitable_rule(self, cr, uid, procurement, domain, context=None):
'''we try to first find a rule among the ones defined on the procurement order group and if none is found, we try on the routes defined for the product, and finally we fallback on the default behavior'''
pull_obj = self.pool.get('procurement.rule')
warehouse_route_ids = []
if procurement.warehouse_id:
domain += ['|', ('warehouse_id', '=', procurement.warehouse_id.id), ('warehouse_id', '=', False)]
warehouse_route_ids = [x.id for x in procurement.warehouse_id.route_ids]
product_route_ids = [x.id for x in procurement.product_id.route_ids + procurement.product_id.categ_id.total_route_ids]
procurement_route_ids = [x.id for x in procurement.route_ids]
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', procurement_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', product_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = warehouse_route_ids and pull_obj.search(cr, uid, domain + [('route_id', 'in', warehouse_route_ids)], order='route_sequence, sequence', context=context) or []
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
return res
def _find_suitable_rule(self, cr, uid, procurement, context=None):
rule_id = super(procurement_order, self)._find_suitable_rule(cr, uid, procurement, context=context)
if not rule_id:
#a rule defined on 'Stock' is suitable for a procurement in 'Stock\Bin A'
all_parent_location_ids = self._find_parent_locations(cr, uid, procurement, context=context)
rule_id = self._search_suitable_rule(cr, uid, procurement, [('location_id', 'in', all_parent_location_ids)], context=context)
rule_id = rule_id and rule_id[0] or False
return rule_id
def _run_move_create(self, cr, uid, procurement, context=None):
''' Returns a dictionary of values that will be used to create a stock move from a procurement.
This function assumes that the given procurement has a rule (action == 'move') set on it.
:param procurement: browse record
:rtype: dictionary
'''
newdate = (datetime.strptime(procurement.date_planned, '%Y-%m-%d %H:%M:%S') - relativedelta(days=procurement.rule_id.delay or 0)).strftime('%Y-%m-%d %H:%M:%S')
group_id = False
if procurement.rule_id.group_propagation_option == 'propagate':
group_id = procurement.group_id and procurement.group_id.id or False
elif procurement.rule_id.group_propagation_option == 'fixed':
group_id = procurement.rule_id.group_id and procurement.rule_id.group_id.id or False
#it is possible that we've already got some move done, so check for the done qty and create
#a new move with the correct qty
already_done_qty = 0
already_done_qty_uos = 0
for move in procurement.move_ids:
already_done_qty += move.product_uom_qty if move.state == 'done' else 0
already_done_qty_uos += move.product_uos_qty if move.state == 'done' else 0
qty_left = max(procurement.product_qty - already_done_qty, 0)
qty_uos_left = max(procurement.product_uos_qty - already_done_qty_uos, 0)
vals = {
'name': procurement.name,
'company_id': procurement.rule_id.company_id.id or procurement.rule_id.location_src_id.company_id.id or procurement.rule_id.location_id.company_id.id or procurement.company_id.id,
'product_id': procurement.product_id.id,
'product_uom': procurement.product_uom.id,
'product_uom_qty': qty_left,
'product_uos_qty': (procurement.product_uos and qty_uos_left) or qty_left,
'product_uos': (procurement.product_uos and procurement.product_uos.id) or procurement.product_uom.id,
'partner_id': procurement.rule_id.partner_address_id.id or (procurement.group_id and procurement.group_id.partner_id.id) or False,
'location_id': procurement.rule_id.location_src_id.id,
'location_dest_id': procurement.location_id.id,
'move_dest_id': procurement.move_dest_id and procurement.move_dest_id.id or False,
'procurement_id': procurement.id,
'rule_id': procurement.rule_id.id,
'procure_method': procurement.rule_id.procure_method,
'origin': procurement.origin,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in procurement.route_ids],
'warehouse_id': procurement.rule_id.propagate_warehouse_id.id or procurement.rule_id.warehouse_id.id,
'date': newdate,
'date_expected': newdate,
'propagate': procurement.rule_id.propagate,
'priority': procurement.priority,
}
return vals
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'move':
if not procurement.rule_id.location_src_id:
self.message_post(cr, uid, [procurement.id], body=_('No source location defined!'), context=context)
return False
move_obj = self.pool.get('stock.move')
move_dict = self._run_move_create(cr, uid, procurement, context=context)
#create the move as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example)
move_obj.create(cr, SUPERUSER_ID, move_dict, context=context)
return True
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def run(self, cr, uid, ids, autocommit=False, context=None):
new_ids = [x.id for x in self.browse(cr, uid, ids, context=context) if x.state not in ('running', 'done', 'cancel')]
res = super(procurement_order, self).run(cr, uid, new_ids, autocommit=autocommit, context=context)
#after all the procurements are run, check if some created a draft stock move that needs to be confirmed
#(we do that in batch because it fasts the picking assignation and the picking state computation)
move_to_confirm_ids = []
for procurement in self.browse(cr, uid, new_ids, context=context):
if procurement.state == "running" and procurement.rule_id and procurement.rule_id.action == "move":
move_to_confirm_ids += [m.id for m in procurement.move_ids if m.state == 'draft']
if move_to_confirm_ids:
self.pool.get('stock.move').action_confirm(cr, uid, move_to_confirm_ids, context=context)
return res
def _check(self, cr, uid, procurement, context=None):
''' Implement the procurement checking for rules of type 'move'. The procurement will be satisfied only if all related
moves are done/cancel and if the requested quantity is moved.
'''
if procurement.rule_id and procurement.rule_id.action == 'move':
uom_obj = self.pool.get('product.uom')
# In case Phantom BoM splits only into procurements
if not procurement.move_ids:
return True
cancel_test_list = [x.state == 'cancel' for x in procurement.move_ids]
done_cancel_test_list = [x.state in ('done', 'cancel') for x in procurement.move_ids]
at_least_one_cancel = any(cancel_test_list)
all_done_or_cancel = all(done_cancel_test_list)
all_cancel = all(cancel_test_list)
if not all_done_or_cancel:
return False
elif all_done_or_cancel and not all_cancel:
return True
elif all_cancel:
self.message_post(cr, uid, [procurement.id], body=_('All stock moves have been cancelled for this procurement.'), context=context)
self.write(cr, uid, [procurement.id], {'state': 'cancel'}, context=context)
return False
return super(procurement_order, self)._check(cr, uid, procurement, context)
def do_view_pickings(self, cr, uid, ids, context=None):
'''
This function returns an action that display the pickings of the procurements belonging
to the same procurement group of given ids.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'do_view_pickings')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id])
result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]"
return result
def run_scheduler(self, cr, uid, use_new_cursor=False, company_id=False, context=None):
'''
Call the scheduler in order to check the running procurements (super method), to check the minimum stock rules
and the availability of moves. This function is intended to be run for all the companies at the same time, so
we run functions as SUPERUSER to avoid intercompanies and access rights issues.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
@param context: A standard dictionary for contextual values
@return: Dictionary of values
'''
super(procurement_order, self).run_scheduler(cr, uid, use_new_cursor=use_new_cursor, company_id=company_id, context=context)
if context is None:
context = {}
try:
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
move_obj = self.pool.get('stock.move')
#Minimum stock rules
self._procure_orderpoint_confirm(cr, SUPERUSER_ID, use_new_cursor=use_new_cursor, company_id=company_id, context=context)
#Search all confirmed stock_moves and try to assign them
confirmed_ids = move_obj.search(cr, uid, [('state', '=', 'confirmed')], limit=None, order='priority desc, date_expected asc', context=context)
for x in xrange(0, len(confirmed_ids), 100):
move_obj.action_assign(cr, uid, confirmed_ids[x:x + 100], context=context)
if use_new_cursor:
cr.commit()
if use_new_cursor:
cr.commit()
finally:
if use_new_cursor:
try:
cr.close()
except Exception:
pass
return {}
def _get_orderpoint_date_planned(self, cr, uid, orderpoint, start_date, context=None):
date_planned = start_date + relativedelta(days=orderpoint.product_id.seller_delay or 0.0)
return date_planned.strftime(DEFAULT_SERVER_DATE_FORMAT)
def _prepare_orderpoint_procurement(self, cr, uid, orderpoint, product_qty, context=None):
return {
'name': orderpoint.name,
'date_planned': self._get_orderpoint_date_planned(cr, uid, orderpoint, datetime.today(), context=context),
'product_id': orderpoint.product_id.id,
'product_qty': product_qty,
'company_id': orderpoint.company_id.id,
'product_uom': orderpoint.product_uom.id,
'location_id': orderpoint.location_id.id,
'origin': orderpoint.name,
'warehouse_id': orderpoint.warehouse_id.id,
'orderpoint_id': orderpoint.id,
'group_id': orderpoint.group_id.id,
}
def _product_virtual_get(self, cr, uid, order_point):
product_obj = self.pool.get('product.product')
return product_obj._product_available(cr, uid,
[order_point.product_id.id],
context={'location': order_point.location_id.id})[order_point.product_id.id]['virtual_available']
def _procure_orderpoint_confirm(self, cr, uid, use_new_cursor=False, company_id = False, context=None):
'''
Create procurement based on Orderpoint
:param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
'''
if context is None:
context = {}
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
orderpoint_obj = self.pool.get('stock.warehouse.orderpoint')
procurement_obj = self.pool.get('procurement.order')
dom = company_id and [('company_id', '=', company_id)] or []
orderpoint_ids = orderpoint_obj.search(cr, uid, dom)
prev_ids = []
while orderpoint_ids:
ids = orderpoint_ids[:100]
del orderpoint_ids[:100]
for op in orderpoint_obj.browse(cr, uid, ids, context=context):
try:
prods = self._product_virtual_get(cr, uid, op)
if prods is None:
continue
if float_compare(prods, op.product_min_qty, precision_rounding=op.product_uom.rounding) < 0:
qty = max(op.product_min_qty, op.product_max_qty) - prods
reste = op.qty_multiple > 0 and qty % op.qty_multiple or 0.0
if float_compare(reste, 0.0, precision_rounding=op.product_uom.rounding) > 0:
qty += op.qty_multiple - reste
if float_compare(qty, 0.0, precision_rounding=op.product_uom.rounding) <= 0:
continue
qty -= orderpoint_obj.subtract_procurements(cr, uid, op, context=context)
qty_rounded = float_round(qty, precision_rounding=op.product_uom.rounding)
if qty_rounded > 0:
proc_id = procurement_obj.create(cr, uid,
self._prepare_orderpoint_procurement(cr, uid, op, qty_rounded, context=context),
context=context)
self.check(cr, uid, [proc_id])
self.run(cr, uid, [proc_id])
if use_new_cursor:
cr.commit()
except OperationalError:
if use_new_cursor:
orderpoint_ids.append(op.id)
cr.rollback()
continue
else:
raise
if use_new_cursor:
cr.commit()
if prev_ids == ids:
break
else:
prev_ids = ids
if use_new_cursor:
cr.commit()
cr.close()
return {}
| agpl-3.0 |
mailboxly/po | pymongo/auth.py | 23 | 15619 | # Copyright 2013-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authentication helpers."""
import hmac
HAVE_KERBEROS = True
try:
import kerberos
except ImportError:
HAVE_KERBEROS = False
from base64 import standard_b64decode, standard_b64encode
from collections import namedtuple
from hashlib import md5, sha1
from random import SystemRandom
from bson.binary import Binary
from bson.py3compat import b, string_type, _unicode, PY3
from bson.son import SON
from pymongo.errors import ConfigurationError, OperationFailure
MECHANISMS = frozenset(
['GSSAPI', 'MONGODB-CR', 'MONGODB-X509', 'PLAIN', 'SCRAM-SHA-1', 'DEFAULT'])
"""The authentication mechanisms supported by PyMongo."""
MongoCredential = namedtuple(
'MongoCredential',
['mechanism', 'source', 'username', 'password', 'mechanism_properties'])
"""A hashable namedtuple of values used for authentication."""
GSSAPIProperties = namedtuple('GSSAPIProperties', ['service_name'])
"""Mechanism properties for GSSAPI authentication."""
def _build_credentials_tuple(mech, source, user, passwd, extra):
"""Build and return a mechanism specific credentials tuple.
"""
user = _unicode(user)
if mech == 'GSSAPI':
properties = extra.get('authmechanismproperties', {})
service_name = properties.get('SERVICE_NAME', 'mongodb')
props = GSSAPIProperties(service_name=service_name)
# No password, source is always $external.
return MongoCredential(mech, '$external', user, None, props)
elif mech == 'MONGODB-X509':
return MongoCredential(mech, '$external', user, None, None)
else:
if passwd is None:
raise ConfigurationError("A password is required.")
return MongoCredential(mech, source, user, _unicode(passwd), None)
if PY3:
def _xor(fir, sec):
"""XOR two byte strings together (python 3.x)."""
return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)])
_from_bytes = int.from_bytes
_to_bytes = int.to_bytes
else:
from binascii import (hexlify as _hexlify,
unhexlify as _unhexlify)
def _xor(fir, sec):
"""XOR two byte strings together (python 2.x)."""
return b"".join([chr(ord(x) ^ ord(y)) for x, y in zip(fir, sec)])
def _from_bytes(value, dummy, int=int, _hexlify=_hexlify):
"""An implementation of int.from_bytes for python 2.x."""
return int(_hexlify(value), 16)
def _to_bytes(value, dummy0, dummy1, _unhexlify=_unhexlify):
"""An implementation of int.to_bytes for python 2.x."""
return _unhexlify('%040x' % value)
try:
# The fastest option, if it's been compiled to use OpenSSL's HMAC.
from backports.pbkdf2 import pbkdf2_hmac
def _hi(data, salt, iterations):
return pbkdf2_hmac('sha1', data, salt, iterations)
except ImportError:
try:
# Python 2.7.8+, or Python 3.4+.
from hashlib import pbkdf2_hmac
def _hi(data, salt, iterations):
return pbkdf2_hmac('sha1', data, salt, iterations)
except ImportError:
def _hi(data, salt, iterations):
"""A simple implementation of PBKDF2."""
mac = hmac.HMAC(data, None, sha1)
def _digest(msg, mac=mac):
"""Get a digest for msg."""
_mac = mac.copy()
_mac.update(msg)
return _mac.digest()
from_bytes = _from_bytes
to_bytes = _to_bytes
_u1 = _digest(salt + b'\x00\x00\x00\x01')
_ui = from_bytes(_u1, 'big')
for _ in range(iterations - 1):
_u1 = _digest(_u1)
_ui ^= from_bytes(_u1, 'big')
return to_bytes(_ui, 20, 'big')
try:
from hmac import compare_digest
except ImportError:
if PY3:
def _xor_bytes(a, b):
return a ^ b
else:
def _xor_bytes(a, b, _ord=ord):
return _ord(a) ^ _ord(b)
# Python 2.x < 2.7.7 and Python 3.x < 3.3
# References:
# - http://bugs.python.org/issue14532
# - http://bugs.python.org/issue14955
# - http://bugs.python.org/issue15061
def compare_digest(a, b, _xor_bytes=_xor_bytes):
left = None
right = b
if len(a) == len(b):
left = a
result = 0
if len(a) != len(b):
left = b
result = 1
for x, y in zip(left, right):
result |= _xor_bytes(x, y)
return result == 0
def _parse_scram_response(response):
"""Split a scram response into key, value pairs."""
return dict(item.split(b"=", 1) for item in response.split(b","))
def _authenticate_scram_sha1(credentials, sock_info):
"""Authenticate using SCRAM-SHA-1."""
username = credentials.username
password = credentials.password
source = credentials.source
# Make local
_hmac = hmac.HMAC
_sha1 = sha1
user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C")
nonce = standard_b64encode(
(("%s" % (SystemRandom().random(),))[2:]).encode("utf-8"))
first_bare = b"n=" + user + b",r=" + nonce
cmd = SON([('saslStart', 1),
('mechanism', 'SCRAM-SHA-1'),
('payload', Binary(b"n,," + first_bare)),
('autoAuthorize', 1)])
res = sock_info.command(source, cmd)
server_first = res['payload']
parsed = _parse_scram_response(server_first)
iterations = int(parsed[b'i'])
salt = parsed[b's']
rnonce = parsed[b'r']
if not rnonce.startswith(nonce):
raise OperationFailure("Server returned an invalid nonce.")
without_proof = b"c=biws,r=" + rnonce
salted_pass = _hi(_password_digest(username, password).encode("utf-8"),
standard_b64decode(salt),
iterations)
client_key = _hmac(salted_pass, b"Client Key", _sha1).digest()
stored_key = _sha1(client_key).digest()
auth_msg = b",".join((first_bare, server_first, without_proof))
client_sig = _hmac(stored_key, auth_msg, _sha1).digest()
client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig))
client_final = b",".join((without_proof, client_proof))
server_key = _hmac(salted_pass, b"Server Key", _sha1).digest()
server_sig = standard_b64encode(
_hmac(server_key, auth_msg, _sha1).digest())
cmd = SON([('saslContinue', 1),
('conversationId', res['conversationId']),
('payload', Binary(client_final))])
res = sock_info.command(source, cmd)
parsed = _parse_scram_response(res['payload'])
if not compare_digest(parsed[b'v'], server_sig):
raise OperationFailure("Server returned an invalid signature.")
# Depending on how it's configured, Cyrus SASL (which the server uses)
# requires a third empty challenge.
if not res['done']:
cmd = SON([('saslContinue', 1),
('conversationId', res['conversationId']),
('payload', Binary(b''))])
res = sock_info.command(source, cmd)
if not res['done']:
raise OperationFailure('SASL conversation failed to complete.')
def _password_digest(username, password):
"""Get a password digest to use for authentication.
"""
if not isinstance(password, string_type):
raise TypeError("password must be an "
"instance of %s" % (string_type.__name__,))
if len(password) == 0:
raise ValueError("password can't be empty")
if not isinstance(username, string_type):
raise TypeError("password must be an "
"instance of %s" % (string_type.__name__,))
md5hash = md5()
data = "%s:mongo:%s" % (username, password)
md5hash.update(data.encode('utf-8'))
return _unicode(md5hash.hexdigest())
def _auth_key(nonce, username, password):
"""Get an auth key to use for authentication.
"""
digest = _password_digest(username, password)
md5hash = md5()
data = "%s%s%s" % (nonce, username, digest)
md5hash.update(data.encode('utf-8'))
return _unicode(md5hash.hexdigest())
def _authenticate_gssapi(credentials, sock_info):
"""Authenticate using GSSAPI.
"""
if not HAVE_KERBEROS:
raise ConfigurationError('The "kerberos" module must be '
'installed to use GSSAPI authentication.')
try:
username = credentials.username
gsn = credentials.mechanism_properties.service_name
# Starting here and continuing through the while loop below - establish
# the security context. See RFC 4752, Section 3.1, first paragraph.
host = sock_info.address[0]
result, ctx = kerberos.authGSSClientInit(
gsn + '@' + host, gssflags=kerberos.GSS_C_MUTUAL_FLAG)
if result != kerberos.AUTH_GSS_COMPLETE:
raise OperationFailure('Kerberos context failed to initialize.')
try:
# pykerberos uses a weird mix of exceptions and return values
# to indicate errors.
# 0 == continue, 1 == complete, -1 == error
# Only authGSSClientStep can return 0.
if kerberos.authGSSClientStep(ctx, '') != 0:
raise OperationFailure('Unknown kerberos '
'failure in step function.')
# Start a SASL conversation with mongod/s
# Note: pykerberos deals with base64 encoded byte strings.
# Since mongo accepts base64 strings as the payload we don't
# have to use bson.binary.Binary.
payload = kerberos.authGSSClientResponse(ctx)
cmd = SON([('saslStart', 1),
('mechanism', 'GSSAPI'),
('payload', payload),
('autoAuthorize', 1)])
response = sock_info.command('$external', cmd)
# Limit how many times we loop to catch protocol / library issues
for _ in range(10):
result = kerberos.authGSSClientStep(ctx,
str(response['payload']))
if result == -1:
raise OperationFailure('Unknown kerberos '
'failure in step function.')
payload = kerberos.authGSSClientResponse(ctx) or ''
cmd = SON([('saslContinue', 1),
('conversationId', response['conversationId']),
('payload', payload)])
response = sock_info.command('$external', cmd)
if result == kerberos.AUTH_GSS_COMPLETE:
break
else:
raise OperationFailure('Kerberos '
'authentication failed to complete.')
# Once the security context is established actually authenticate.
# See RFC 4752, Section 3.1, last two paragraphs.
if kerberos.authGSSClientUnwrap(ctx,
str(response['payload'])) != 1:
raise OperationFailure('Unknown kerberos '
'failure during GSS_Unwrap step.')
if kerberos.authGSSClientWrap(ctx,
kerberos.authGSSClientResponse(ctx),
username) != 1:
raise OperationFailure('Unknown kerberos '
'failure during GSS_Wrap step.')
payload = kerberos.authGSSClientResponse(ctx)
cmd = SON([('saslContinue', 1),
('conversationId', response['conversationId']),
('payload', payload)])
sock_info.command('$external', cmd)
finally:
kerberos.authGSSClientClean(ctx)
except kerberos.KrbError as exc:
raise OperationFailure(str(exc))
def _authenticate_plain(credentials, sock_info):
"""Authenticate using SASL PLAIN (RFC 4616)
"""
source = credentials.source
username = credentials.username
password = credentials.password
payload = ('\x00%s\x00%s' % (username, password)).encode('utf-8')
cmd = SON([('saslStart', 1),
('mechanism', 'PLAIN'),
('payload', Binary(payload)),
('autoAuthorize', 1)])
sock_info.command(source, cmd)
def _authenticate_cram_md5(credentials, sock_info):
"""Authenticate using CRAM-MD5 (RFC 2195)
"""
source = credentials.source
username = credentials.username
password = credentials.password
# The password used as the mac key is the
# same as what we use for MONGODB-CR
passwd = _password_digest(username, password)
cmd = SON([('saslStart', 1),
('mechanism', 'CRAM-MD5'),
('payload', Binary(b'')),
('autoAuthorize', 1)])
response = sock_info.command(source, cmd)
# MD5 as implicit default digest for digestmod is deprecated
# in python 3.4
mac = hmac.HMAC(key=passwd.encode('utf-8'), digestmod=md5)
mac.update(response['payload'])
challenge = username.encode('utf-8') + b' ' + b(mac.hexdigest())
cmd = SON([('saslContinue', 1),
('conversationId', response['conversationId']),
('payload', Binary(challenge))])
sock_info.command(source, cmd)
def _authenticate_x509(credentials, sock_info):
"""Authenticate using MONGODB-X509.
"""
query = SON([('authenticate', 1),
('mechanism', 'MONGODB-X509'),
('user', credentials.username)])
sock_info.command('$external', query)
def _authenticate_mongo_cr(credentials, sock_info):
"""Authenticate using MONGODB-CR.
"""
source = credentials.source
username = credentials.username
password = credentials.password
# Get a nonce
response = sock_info.command(source, {'getnonce': 1})
nonce = response['nonce']
key = _auth_key(nonce, username, password)
# Actually authenticate
query = SON([('authenticate', 1),
('user', username),
('nonce', nonce),
('key', key)])
sock_info.command(source, query)
def _authenticate_default(credentials, sock_info):
if sock_info.max_wire_version >= 3:
return _authenticate_scram_sha1(credentials, sock_info)
else:
return _authenticate_mongo_cr(credentials, sock_info)
_AUTH_MAP = {
'CRAM-MD5': _authenticate_cram_md5,
'GSSAPI': _authenticate_gssapi,
'MONGODB-CR': _authenticate_mongo_cr,
'MONGODB-X509': _authenticate_x509,
'PLAIN': _authenticate_plain,
'SCRAM-SHA-1': _authenticate_scram_sha1,
'DEFAULT': _authenticate_default,
}
def authenticate(credentials, sock_info):
"""Authenticate sock_info."""
mechanism = credentials.mechanism
auth_func = _AUTH_MAP.get(mechanism)
auth_func(credentials, sock_info)
def logout(source, sock_info):
"""Log out from a database."""
sock_info.command(source, {'logout': 1})
| agpl-3.0 |
LeZhang2016/openthread | tests/scripts/thread-cert/ipv6.py | 11 | 35649 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import abc
import io
import struct
import sys
from binascii import hexlify
from ipaddress import ip_address
try:
from itertools import izip_longest as zip_longest
except ImportError:
from itertools import zip_longest
# Next headers for IPv6 protocols
IPV6_NEXT_HEADER_HOP_BY_HOP = 0
IPV6_NEXT_HEADER_TCP = 6
IPV6_NEXT_HEADER_UDP = 17
IPV6_NEXT_HEADER_ICMP = 58
UPPER_LAYER_PROTOCOLS = [
IPV6_NEXT_HEADER_TCP,
IPV6_NEXT_HEADER_UDP,
IPV6_NEXT_HEADER_ICMP,
]
# ICMP Protocol codes
ICMP_DESTINATION_UNREACHABLE = 1
ICMP_ECHO_REQUEST = 128
ICMP_ECHO_RESPONSE = 129
# Default hop limit for IPv6
HOP_LIMIT_DEFAULT = 64
def calculate_checksum(data):
""" Calculate checksum from data bytes.
How to calculate checksum (RFC 2460):
https://tools.ietf.org/html/rfc2460#page-27
Args:
data (bytes): input data from which checksum will be calculated
Returns:
int: calculated checksum
"""
# Create halfwords from data bytes. Example: data[0] = 0x01, data[1] = 0xb2 => 0x01b2
halfwords = [((byte0 << 8) | byte1) for byte0, byte1 in zip_longest(data[::2], data[1::2], fillvalue=0x00)]
checksum = 0
for halfword in halfwords:
checksum += halfword
checksum = (checksum & 0xFFFF) + (checksum >> 16)
checksum ^= 0xFFFF
if checksum == 0:
return 0xFFFF
else:
return checksum
class PacketFactory(object):
""" Interface for classes that produce objects from data. """
def parse(self, data, message_info):
""" Convert data to object.
Args:
data (BytesIO)
message_info (MessageInfo)
"""
raise NotImplementedError
class BuildableFromBytes(object):
""" Interface for classes which can be built from bytes. """
@classmethod
def from_bytes(cls, data):
""" Convert data to object.
Args:
data (bytes)
"""
raise NotImplementedError
class ConvertibleToBytes(object):
""" Interface for classes which can be converted to bytes. """
def to_bytes(self):
""" Convert object to data.
Returns:
bytes
"""
raise NotImplementedError
def __len__(self):
""" Length of data (in bytes).
Returns:
int
"""
raise NotImplementedError
class Header(object):
""" Interface for header classes. """
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def type(self):
""" Number which can be used in the next header field in IPv6 header or next headers.
Returns:
int
"""
class ExtensionHeader(object):
""" Base for classes representing Extension Headers in IPv6 packets. """
def __init__(self, next_header, hdr_ext_len=0):
self.next_header = next_header
self.hdr_ext_len = hdr_ext_len
class UpperLayerProtocol(Header, ConvertibleToBytes):
""" Base for classes representing upper layer protocol payload in IPv6 packets. """
def __init__(self, header):
self.header = header
@property
def checksum(self):
""" Return checksum from upper layer protocol header. """
return self.header.checksum
@checksum.setter
def checksum(self, value):
""" Set checksum value in upper layer protocol header. """
self.header.checksum = value
def is_valid_checksum(self):
""" Return information if set checksum is valid.
It is not possible to get zero from checksum calculation.
Zero indicates invalid checksum value.
Returns:
bool
"""
return self.checksum != 0
class IPv6PseudoHeader(ConvertibleToBytes):
""" Class representing IPv6 pseudo header which is required to calculate
upper layer protocol (like e.g. UDP or ICMPv6) checksum.
This class is used only during upper layer protocol checksum calculation. Do not use it outside of this module.
"""
def __init__(self, source_address, destination_address, payload_length, next_header):
self._source_address = self._convert_to_ipaddress(source_address)
self._destination_address = self._convert_to_ipaddress(destination_address)
self.payload_length = payload_length
self.next_header = next_header
def _convert_to_ipaddress(self, value):
if isinstance(value, bytearray):
value = bytes(value)
elif isinstance(value, str) and sys.version_info[0] == 2:
value = value.decode("utf-8")
return ip_address(value)
@property
def source_address(self):
return self._source_address
@source_address.setter
def source_address(self, value):
self._source_address = self._convert_to_ipaddress(value)
@property
def destination_address(self):
return self._destination_address
@destination_address.setter
def destination_address(self, value):
self._source_address = self._convert_to_ipaddress(value)
def to_bytes(self):
data = bytearray()
data += self.source_address.packed
data += self.destination_address.packed
data += struct.pack(">I", self.payload_length)
data += struct.pack(">I", self.next_header)
return data
class IPv6Header(ConvertibleToBytes, BuildableFromBytes):
""" Class representing IPv6 packet header. """
_version = 6
_header_length = 40
def __init__(self, source_address, destination_address, traffic_class=0, flow_label=0, hop_limit=64,
payload_length=0, next_header=0):
self.version = self._version
self._source_address = self._convert_to_ipaddress(source_address)
self._destination_address = self._convert_to_ipaddress(destination_address)
self.traffic_class = traffic_class
self.flow_label = flow_label
self.hop_limit = hop_limit
self.payload_length = payload_length
self.next_header = next_header
def _convert_to_ipaddress(self, value):
if isinstance(value, bytearray):
value = bytes(value)
elif isinstance(value, str) and sys.version_info[0] == 2:
value = value.decode("utf-8")
return ip_address(value)
@property
def source_address(self):
return self._source_address
@source_address.setter
def source_address(self, value):
self._source_address = self._convert_to_ipaddress(value)
@property
def destination_address(self):
return self._destination_address
def to_bytes(self):
data = bytearray([
((self.version & 0x0F) << 4) | ((self.traffic_class >> 4) & 0x0F),
((self.traffic_class & 0x0F) << 4) | ((self.flow_label >> 16) & 0x0F),
((self.flow_label >> 8) & 0xFF),
((self.flow_label & 0xFF))
])
data += struct.pack(">H", self.payload_length)
data += bytearray([self.next_header, self.hop_limit])
data += self.source_address.packed
data += self.destination_address.packed
return data
@classmethod
def from_bytes(cls, data):
b = bytearray(data.read(4))
version = (b[0] >> 4) & 0x0F
traffic_class = ((b[0] & 0x0F) << 4) | ((b[1] >> 4) & 0x0F)
flow_label = ((b[1] & 0x0F) << 16) | (b[2] << 8) | b[3]
payload_length = struct.unpack(">H", data.read(2))[0]
next_header = ord(data.read(1))
hop_limit = ord(data.read(1))
src_addr = bytearray(data.read(16))
dst_addr = bytearray(data.read(16))
return cls(src_addr,
dst_addr,
traffic_class,
flow_label,
hop_limit,
payload_length,
next_header)
def __repr__(self):
return "IPv6Header(source_address={}, destination_address={}, next_header={}, payload_length={}, \
hop_limit={}, traffic_class={}, flow_label={})".format(self.source_address.compressed,
self.destination_address.compressed,
self.next_header,
self.payload_length,
self.hop_limit,
self.traffic_class,
self.flow_label)
def __len__(self):
return self._header_length
class IPv6Packet(ConvertibleToBytes):
""" Class representing IPv6 packet.
IPv6 packet consists of IPv6 header, optional extension header, and upper layer protocol.
IPv6 packet
+-------------+----------------------------------+----------------------------------------------+
| | | |
| IPv6 header | extension headers (zero or more) | upper layer protocol (e.g. UDP, TCP, ICMPv6) |
| | | |
+-------------+----------------------------------+----------------------------------------------+
Extension headers:
- HopByHop
- Routing header (not implemented in this module)
Upper layer protocols:
- ICMPv6
- UDP
- TCP (not implemented in this module)
Example:
IPv6 packet construction without extension headers:
ipv6_packet = IPv6Packet(IPv6Header("fd00:1234:4555::ff:fe00:1800", "ff03::1"),
ICMPv6(ICMPv6Header(128, 0),
ICMPv6EchoBody(0, 2, bytes([0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01,
0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
0x41, 0x41]))))
IPv6 packet construction with extension headers:
ipv6_packet = IPv6Packet(IPv6Header("fd00:1234:4555::ff:fe00:1800", "ff03::1"),
ICMPv6(ICMPv6Header(128, 0),
ICMPv6EchoBody(0, 2, bytes([0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01,
0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
0x41, 0x41])),
[HopByHop(options=[
HopByHopOption(HopByHopOptionHeader(_type=0x6d),
MPLOption(S=1, M=0, V=0, sequence=2, seed_id=bytes([0x00, 0x18])))
])])
"""
def __init__(self, ipv6_header, upper_layer_protocol, extension_headers=None):
self.ipv6_header = ipv6_header
self.upper_layer_protocol = upper_layer_protocol
self.extension_headers = extension_headers if extension_headers is not None else []
self._update_next_header_values_in_headers()
if not upper_layer_protocol.is_valid_checksum():
self.upper_layer_protocol.checksum = self.calculate_checksum()
def _validate_checksum(self):
checksum = self.calculate_checksum()
if self.upper_layer_protocol.checksum != checksum:
raise RuntimeError("Could not create IPv6 packet. "
"Invalid checksum: {}!={}".format(self.upper_layer_protocol.checksum, checksum))
self.upper_layer_protocol.checksum = checksum
def _update_payload_length_value_in_ipv6_header(self):
self.ipv6_header.payload_length = len(self.upper_layer_protocol) + \
sum([len(extension_header) for extension_header in self.extension_headers])
def _update_next_header_values_in_headers(self):
last_header = self.ipv6_header
for extension_header in self.extension_headers:
last_header.next_header = extension_header.type
last_header = extension_header
last_header.next_header = self.upper_layer_protocol.type
def calculate_checksum(self):
saved_checksum = self.upper_layer_protocol.checksum
self.upper_layer_protocol.checksum = 0
upper_layer_protocol_bytes = self.upper_layer_protocol.to_bytes()
self.upper_layer_protocol.checksum = saved_checksum
pseudo_header = IPv6PseudoHeader(self.ipv6_header.source_address,
self.ipv6_header.destination_address,
len(upper_layer_protocol_bytes),
self.upper_layer_protocol.type)
return calculate_checksum(pseudo_header.to_bytes() + upper_layer_protocol_bytes)
def to_bytes(self):
self._update_payload_length_value_in_ipv6_header()
self._update_next_header_values_in_headers()
self.upper_layer_protocol.checksum = self.calculate_checksum()
ipv6_packet = self.ipv6_header.to_bytes()
for extension_header in self.extension_headers:
ipv6_packet += extension_header.to_bytes()
ipv6_packet += self.upper_layer_protocol.to_bytes()
return ipv6_packet
def __repr__(self):
return "IPv6Packet(header={}, upper_layer_protocol={})".format(self.ipv6_header, self.upper_layer_protocol)
class UDPHeader(ConvertibleToBytes, BuildableFromBytes):
""" Class representing UDP datagram header.
This header is required to construct UDP datagram.
"""
_header_length = 8
def __init__(self, src_port, dst_port, payload_length=0, checksum=0):
self.src_port = src_port
self.dst_port = dst_port
self._payload_length = payload_length
self.checksum = checksum
@property
def type(self):
return 17
@property
def payload_length(self):
return self._payload_length
@payload_length.setter
def payload_length(self, value):
self._payload_length = self._header_length + value
def to_bytes(self):
data = struct.pack(">H", self.src_port)
data += struct.pack(">H", self.dst_port)
data += struct.pack(">H", self.payload_length)
data += struct.pack(">H", self.checksum)
return data
@classmethod
def from_bytes(cls, data):
src_port = struct.unpack(">H", data.read(2))[0]
dst_port = struct.unpack(">H", data.read(2))[0]
payload_length = struct.unpack(">H", data.read(2))[0]
checksum = struct.unpack(">H", data.read(2))[0]
return cls(src_port, dst_port, payload_length, checksum)
def __len__(self):
return self._header_length
class UDPDatagram(UpperLayerProtocol):
""" Class representing UDP datagram.
UDP is an upper layer protocol for IPv6 so it can be passed to IPv6 packet as upper_layer_protocol.
This class consists of a UDP header and payload. The example below shows how a UDP datagram can be constructed.
Example:
udp_dgram = UDPDatagram(UDPHeader(src_port=19788, dst_port=19788),
bytes([0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x09, 0x01, 0x01, 0x0b, 0x03,
0x04, 0xc6, 0x69, 0x73, 0x51, 0x0e, 0x01, 0x80,
0x12, 0x02, 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef]))
"""
@property
def type(self):
return 17
def __init__(self, header, payload):
super(UDPDatagram, self).__init__(header)
self.payload = payload
def to_bytes(self):
self.header.payload_length = len(self.payload)
data = bytearray()
data += self.header.to_bytes()
data += self.payload.to_bytes()
return data
def __len__(self):
return len(self.header) + len(self.payload)
class ICMPv6Header(ConvertibleToBytes, BuildableFromBytes):
""" Class representing ICMPv6 message header.
This header is required to construct ICMPv6 message.
"""
_header_length = 4
def __init__(self, _type, code, checksum=0):
self.type = _type
self.code = code
self.checksum = checksum
def to_bytes(self):
return bytearray([self.type, self.code]) + struct.pack(">H", self.checksum)
@classmethod
def from_bytes(cls, data):
_type = ord(data.read(1))
code = ord(data.read(1))
checksum = struct.unpack(">H", data.read(2))[0]
return cls(_type, code, checksum)
def __len__(self):
return self._header_length
class ICMPv6(UpperLayerProtocol):
""" Class representing ICMPv6 message.
ICMPv6 is an upper layer protocol for IPv6 so it can be passed to IPv6 packet as upper_layer_protocol.
This class consists of an ICMPv6 header and body. The example below shows how an ICMPv6 message can be constructed.
Example:
icmpv6_msg = ICMPv6(ICMPv6Header(128, 0),
ICMPv6EchoBody(0, 2, bytes([0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01,
0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
0x41, 0x41])))
"""
@property
def type(self):
return 58
def __init__(self, header, body):
super(ICMPv6, self).__init__(header)
self.body = body
def to_bytes(self):
return bytearray(self.header.to_bytes() + self.body.to_bytes())
def __len__(self):
return len(self.header) + len(self.body)
class HopByHop(ExtensionHeader):
""" Class representing HopByHop extension header.
HopByHop extension header consists of:
- next_header type
- extension header length which is multiple of 8
- options
"""
_one_byte_padding = 0x00
_many_bytes_padding = 0x01
@property
def type(self):
return 0
def __init__(self, next_header=None, options=None, hdr_ext_len=None):
super(HopByHop, self).__init__(next_header, hdr_ext_len)
self.options = options if options is not None else []
if hdr_ext_len is not None:
self.hdr_ext_len = hdr_ext_len
else:
payload_length = self._calculate_payload_length()
self.hdr_ext_len = self._calculate_hdr_ext_len(payload_length)
def _calculate_payload_length(self):
payload_length = 2
for option in self.options:
payload_length += len(option)
return payload_length
def _calculate_hdr_ext_len(self, payload_length):
count = payload_length >> 3
if (payload_length & 0x7) == 0 and count > 0:
return count - 1
return count
def to_bytes(self):
data = bytearray([self.next_header, self.hdr_ext_len])
for option in self.options:
data += option.to_bytes()
# Padding
#
# More details:
# https://tools.ietf.org/html/rfc2460#section-4.2
#
excess_bytes = len(data) & 0x7
if excess_bytes > 0:
padding_length = 8 - excess_bytes
if padding_length == 1:
data += bytearray([self._one_byte_padding])
else:
padding_length -= 2
data += bytearray([self._many_bytes_padding, padding_length])
data += bytearray([0x00 for _ in range(padding_length)])
return data
def __len__(self):
""" HopByHop extension header length
More details:
https://tools.ietf.org/html/rfc2460#section-4.3
"""
return (self.hdr_ext_len + 1) * 8
class HopByHopOptionHeader(ConvertibleToBytes, BuildableFromBytes):
""" Class representing HopByHop option header. """
_header_length = 2
def __init__(self, _type, length=None):
self.type = _type
self.length = length if length is not None else 0
def to_bytes(self):
return bytearray([self.type, self.length])
@classmethod
def from_bytes(cls, data):
_type = ord(data.read(1))
length = ord(data.read(1))
return cls(_type, length)
def __len__(self):
return self._header_length
def __repr__(self):
return "HopByHopOptionHeader(type={}, length={})".format(self.type, self.length)
class HopByHopOption(ConvertibleToBytes):
""" Class representing HopByHop option.
Class consists of two elements: HopByHopOptionHeader and value (e.g. for MPLOption).
The following example shows how any HopByHop option can be constructed.
Example:
HopByHop(next_header=0x3a,
options=[HopByHopOption(HopByHopOptionHeader(_type=0x6d),
MPLOption(S=1, M=0, V=0, sequence=2, seed_id=bytes([0x00, 0x18])))
"""
def __init__(self, header, value):
self.value = value
self.header = header
self.header.length = len(self.value)
def to_bytes(self):
return self.header.to_bytes() + self.value.to_bytes()
def __len__(self):
return len(self.header) + len(self.value)
def __repr__(self):
return "HopByHopOption(header={}, value={})".format(self.header, self.value)
class MPLOption(ConvertibleToBytes):
""" Class representing MPL option. """
_header_length = 2
_seed_id_length = {
0: 0,
1: 2,
2: 8,
3: 16
}
def __init__(self, S, M, V, sequence, seed_id):
self.S = S
self.M = M
self.V = V
self.sequence = sequence
self.seed_id = seed_id
def to_bytes(self):
smv = ((self.S & 0x03) << 6) | ((self.M & 0x01) << 5) | ((self.V & 0x01) << 4)
return bytearray([smv, self.sequence]) + self.seed_id
@classmethod
def from_bytes(cls, data):
b = ord(data.read(1))
s = ((b >> 6) & 0x03)
m = ((b >> 5) & 0x01)
v = ((b >> 4) & 0x01)
sequence = ord(data.read(1))
seed_id = data.read(cls._seed_id_length[s])
return cls(s, m, v, sequence, seed_id)
def __len__(self):
return self._header_length + self._seed_id_length[self.S]
def __repr__(self):
return "MPLOption(S={}, M={}, V={}, sequence={}, seed_id={})".format(self.S, self.M, self.V, self.sequence, hexlify(self.seed_id))
class IPv6PacketFactory(PacketFactory):
""" Factory that produces IPv6 packets from data.
This factory must be initialized with factories which allow to parse extension headers and upper layer protocols.
The following example shows preferable setup of IPv6PacketFactory.
Header types:
0: HopByHop
17: UDP
58: ICMPv6
Option types:
109: MPL
ICMPv6 body types:
128: Echo request
129: Echo response
Example usage:
ipv6_factory = IPv6PacketFactory(
ehf={
0: HopByHopFactory(options_factories={
109: MPLOptionFactory()
})
},
ulpf={
17: UDPDatagramFactory(dst_port_factories={
19788: MLEMessageFactory(),
19789: CoAPMessageFactory()
}),
58: ICMPv6Factory(body_factories={
128: ICMPv6EchoBodyFactory(),
129: ICMPv6EchoBodyFactory()
})
}
)
"""
def __init__(self, ehf=None, ulpf=None):
"""
ehf - Extension Header Factory
ulpf - Upper Layer Protocol Factory
Args:
ehf(dict[int: PacketFactory]): Dictionary mapping extension header types on specialized factories.
ulpf(dict[int: PacketFactory]): Dictionary mapping upper layer protocol types on specialized factories.
"""
self._ehf = ehf if ehf is not None else {}
self._ulpf = ulpf if ulpf is not None else {}
def _is_extension_header(self, header_type):
return not header_type in UPPER_LAYER_PROTOCOLS
def _get_extension_header_factory_for(self, next_header):
try:
return self._ehf[next_header]
except KeyError:
raise RuntimeError("Could not get Extension Header factory for next_header={}.".format(next_header))
def _get_upper_layer_protocol_factory_for(self, next_header):
try:
return self._ulpf[next_header]
except KeyError:
raise RuntimeError("Could not get Upper Layer Protocol factory for next_header={}.".format(next_header))
def _parse_extension_headers(self, data, next_header, message_info):
extension_headers = []
while self._is_extension_header(next_header):
factory = self._get_extension_header_factory_for(next_header)
extension_header = factory.parse(data, message_info)
next_header = extension_header.next_header
extension_headers.append(extension_header)
return next_header, extension_headers
def _parse_upper_layer_protocol(self, data, next_header, message_info):
factory = self._get_upper_layer_protocol_factory_for(next_header)
return factory.parse(data, message_info)
def parse(self, data, message_info):
ipv6_header = IPv6Header.from_bytes(data)
message_info.source_ipv6 = ipv6_header.source_address
message_info.destination_ipv6 = ipv6_header.destination_address
next_header, extension_headers = self._parse_extension_headers(data, ipv6_header.next_header, message_info)
upper_layer_protocol = self._parse_upper_layer_protocol(data, next_header, message_info)
return IPv6Packet(ipv6_header, upper_layer_protocol, extension_headers)
class HopByHopOptionsFactory(object):
""" Factory that produces HopByHop options. """
_one_byte_padding = 0x00
_many_bytes_padding = 0x01
def __init__(self, options_factories=None):
self._options_factories = options_factories if options_factories is not None else {}
def _get_HopByHopOption_value_factory(self, _type):
try:
return self._options_factories[_type]
except KeyError:
raise RuntimeError("Could not find HopByHopOption value factory for type={}.".format(_type))
def parse(self, data, message_info):
options = []
while data.tell() < len(data.getvalue()):
option_header = HopByHopOptionHeader.from_bytes(data)
if option_header.type == self._one_byte_padding:
# skip one byte padding
data.read(1)
elif option_header.type == self._many_bytes_padding:
# skip n bytes padding
data.read(option_header.length)
else:
factory = self._get_HopByHopOption_value_factory(option_header.type)
option_data = data.read(option_header.length)
option = HopByHopOption(option_header, factory.parse(io.BytesIO(option_data), message_info))
options.append(option)
return options
class HopByHopFactory(PacketFactory):
""" Factory that produces HopByHop extension headers from data. """
def __init__(self, hop_by_hop_options_factory):
self._hop_by_hop_options_factory = hop_by_hop_options_factory
def _calculate_extension_header_length(self, hdr_ext_len):
return (hdr_ext_len + 1) * 8
def parse(self, data, message_info):
next_header = ord(data.read(1))
hdr_ext_len = ord(data.read(1))
# Note! Two bytes were read (next_header and hdr_ext_len) so they must be substracted from header length
hop_by_hop_length = self._calculate_extension_header_length(hdr_ext_len) - 2
hop_by_hop_data = data.read(hop_by_hop_length)
options = self._hop_by_hop_options_factory.parse(io.BytesIO(hop_by_hop_data), message_info)
hop_by_hop = HopByHop(next_header, options, hdr_ext_len)
message_info.payload_length += len(hop_by_hop)
return hop_by_hop
class MPLOptionFactory(PacketFactory):
""" Factory that produces MPL options for HopByHop extension header. """
def parse(self, data, message_info):
return MPLOption.from_bytes(data)
class UDPHeaderFactory:
""" Factory that produces UDP header. """
def parse(self, data, message_info):
return UDPHeader.from_bytes(data)
class UdpBasedOnSrcDstPortsPayloadFactory:
# TODO: Unittests
""" Factory that produces UDP payload. """
def __init__(self, src_dst_port_based_payload_factories):
"""
Args:
src_dst_port_based_payload_factories (PacketFactory): Factories parse UDP payload based on source or destination port.
"""
self._factories = src_dst_port_based_payload_factories
def parse(self, data, message_info):
factory = None
if message_info.dst_port in self._factories:
factory = self._factories[message_info.dst_port]
if message_info.src_port in self._factories:
factory = self._factories[message_info.src_port]
if factory is None:
raise RuntimeError("Could not find factory to build UDP payload.")
return factory.parse(data, message_info)
class UDPDatagramFactory(PacketFactory):
# TODO: Unittests
""" Factory that produces UDP datagrams. """
def __init__(self, udp_header_factory, udp_payload_factory):
self._udp_header_factory = udp_header_factory
self._udp_payload_factory = udp_payload_factory
def parse(self, data, message_info):
header = self._udp_header_factory.parse(data, message_info)
# Update message payload length: UDP header (8B) + payload length
message_info.payload_length += len(header) + (len(data.getvalue()) - data.tell())
message_info.src_port = header.src_port
message_info.dst_port = header.dst_port
payload = self._udp_payload_factory.parse(data, message_info)
return UDPDatagram(header, payload)
class ICMPv6Factory(PacketFactory):
""" Factory that produces ICMPv6 messages from data. """
def __init__(self, body_factories=None):
self._body_factories = body_factories if body_factories is not None else {}
def _get_icmpv6_body_factory(self, _type):
try:
return self._body_factories[_type]
except KeyError:
if "default" not in self._body_factories:
raise RuntimeError("Could not find specialized factory to parse ICMP body. "
"Unsupported ICMP type: {}".format(_type))
default_factory = self._body_factories["default"]
print("Could not find specialized factory to parse ICMP body. "
"Take the default one: {}".format(type(default_factory)))
return default_factory
def parse(self, data, message_info):
header = ICMPv6Header.from_bytes(data)
factory = self._get_icmpv6_body_factory(header.type)
message_info.payload_length += len(header) + (len(data.getvalue()) - data.tell())
return ICMPv6(header, factory.parse(data, message_info))
class ICMPv6EchoBodyFactory(PacketFactory):
""" Factory that produces ICMPv6 echo message body. """
def parse(self, data, message_info):
return ICMPv6EchoBody.from_bytes(data)
class BytesPayload(ConvertibleToBytes, BuildableFromBytes):
""" Class representing bytes payload. """
def __init__(self, data):
self.data = data
def to_bytes(self):
return bytearray(self.data)
@classmethod
def from_bytes(cls, data):
return cls(data)
def __len__(self):
return len(self.data)
class BytesPayloadFactory(PacketFactory):
""" Factory that produces bytes payload. """
def parse(self, data, message_info):
return BytesPayload(data.read())
class ICMPv6EchoBody(ConvertibleToBytes, BuildableFromBytes):
""" Class representing body of ICMPv6 echo messages. """
_header_length = 4
def __init__(self, identifier, sequence_number, data):
self.identifier = identifier
self.sequence_number = sequence_number
self.data = data
def to_bytes(self):
data = struct.pack(">H", self.identifier)
data += struct.pack(">H", self.sequence_number)
data += self.data
return data
@classmethod
def from_bytes(cls, data):
identifier = struct.unpack(">H", data.read(2))[0]
sequence_number = struct.unpack(">H", data.read(2))[0]
return cls(identifier, sequence_number, data.read())
def __len__(self):
return self._header_length + len(self.data)
class ICMPv6DestinationUnreachableFactory(PacketFactory):
""" Factory that produces ICMPv6 echo message body. """
def parse(self, data, message_info):
return ICMPv6DestinationUnreachable.from_bytes(data)
class ICMPv6DestinationUnreachable(ConvertibleToBytes, BuildableFromBytes):
""" Class representing body of ICMPv6 Destination Unreachable messages. """
_header_length = 4
_unused = 0
def __init__(self, data):
self.data = data
def to_bytes(self):
data = bytearray(struct.pack(">I", self._unused))
data += self.data
return data
@classmethod
def from_bytes(cls, data):
unused = struct.unpack(">I", data.read(4))[0]
if unused != 0:
raise RuntimeError(
"Invalid value of unused field in the ICMPv6 Destination Unreachable data. Expected value: 0.")
return cls(bytearray(data.read()))
def __len__(self):
return self._header_length + len(self.data)
| bsd-3-clause |
southpawtech/TACTIC-DEV | 3rd_party/CherryPy/cherrypy/process/plugins.py | 6 | 20583 | """Site services for use with a Web Site Process Bus."""
import os
import re
try:
set
except NameError:
from sets import Set as set
import signal as _signal
import sys
import time
import thread
import threading
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file has
# "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
By default, instantiating this object subscribes the following signals
and listeners:
TERM: bus.exit
HUP : bus.restart
USR1: bus.graceful
"""
# Map from signal numbers to names
signals = {}
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
self._previous_handlers = {}
def subscribe(self):
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log("Restoring %s handler to SIG_DFL." % signame)
handler = _signal.SIG_DFL
else:
self.bus.log("Restoring %s handler %r." % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log("Restored old %s handler %r, but our "
"handler was not registered." %
(signame, handler), level=30)
except ValueError:
self.bus.log("Unable to restore %s handler %r." %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, basestring):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError("No such signal: %r" % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError("No such signal: %r" % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log("Listening for %s." % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log("Caught signal %s." % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
if os.isatty(sys.stdin.fileno()):
# not daemonized (may be foreground or background)
self.bus.log("SIGHUP caught but not daemonized. Exiting.")
self.bus.exit()
else:
self.bus.log("SIGHUP caught while daemonized. Restarting.")
self.bus.restart()
try:
import pwd, grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to Gavin Baker: http://antonym.org/node/100.
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
def _get_uid(self):
return self._uid
def _set_uid(self, val):
if val is not None:
if pwd is None:
self.bus.log("pwd module not available; ignoring uid.",
level=30)
val = None
elif isinstance(val, basestring):
val = pwd.getpwnam(val)[2]
self._uid = val
uid = property(_get_uid, _set_uid, doc="The uid under which to run.")
def _get_gid(self):
return self._gid
def _set_gid(self, val):
if val is not None:
if grp is None:
self.bus.log("grp module not available; ignoring gid.",
level=30)
val = None
elif isinstance(val, basestring):
val = grp.getgrnam(val)[2]
self._gid = val
gid = property(_get_gid, _set_gid, doc="The gid under which to run.")
def _get_umask(self):
return self._umask
def _set_umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log("umask function not available; ignoring umask.",
level=30)
val = None
self._umask = val
umask = property(_get_umask, _set_umask, doc="The umask under which to run.")
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via:
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process succesfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.activeCount() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
pid = os.fork()
if pid == 0:
# This is the child process. Continue.
pass
else:
# This is the first parent. Exit, now that we've forked.
self.bus.log('Forking once.')
os._exit(0)
except OSError, exc:
# Python raises OSError rather than returning negative numbers.
sys.exit("%s: fork #1 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
self.bus.log('Forking twice.')
os._exit(0) # Exit second parent
except OSError, exc:
sys.exit("%s: fork #2 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.chdir("/")
os.umask(0)
si = open(self.stdin, "r")
so = open(self.stdout, "a+")
se = open(self.stderr, "a+")
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
self.bus.log('Daemonized to PID: %s' % os.getpid())
self.finalized = True
start.priority = 65
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
open(self.pidfile, "wb").write(str(pid))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
class PerpetualTimer(threading._Timer):
"""A subclass of threading._Timer whose run() method repeats."""
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception, x:
self.bus.log("Error in perpetual timer thread function %r." %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread.
bus: a Web Site Process Bus object.
callback: the function to call at intervals.
frequency: the time in seconds between callback runs.
"""
frequency = 60
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own perpetual timer thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = PerpetualTimer(self.frequency, self.callback)
self.thread.bus = self.bus
self.thread.setName(threadname)
self.thread.start()
self.bus.log("Started monitor thread %r." % threadname)
else:
self.bus.log("Monitor thread %r already started." % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's perpetual timer thread."""
if self.thread is None:
self.bus.log("No thread running for %s." % self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
self.thread.join()
self.bus.log("Stopped thread %r." % name)
self.thread = None
def graceful(self):
"""Stop the callback's perpetual timer thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change."""
frequency = 1
match = '.*'
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own perpetual timer thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of filenames which the Autoreloader will monitor."""
files = set()
for k, m in sys.modules.items():
if re.match(self.match, k):
if hasattr(m, '__loader__') and hasattr(m.__loader__, 'archive'):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app doesn't break me
f = os.path.normpath(os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log("Restarting because %s changed." % filename)
self.thread.cancel()
self.bus.log("Stopped thread %r." % self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('release_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = thread.get_ident()
if thread_ident not in self.threads:
# We can't just use _get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = threading._get_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
| epl-1.0 |
smn/onadata | onadata/apps/main/tests/test_form_edit.py | 13 | 5646 | from django.core.urlresolvers import reverse
from onadata.apps.main.models import MetaData
from onadata.apps.main.views import edit
from onadata.apps.logger.models import XForm
from onadata.apps.logger.views import delete_xform
from test_base import TestBase
class TestFormEdit(TestBase):
def setUp(self):
TestBase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form_and_submit_instance()
self.edit_url = reverse(edit, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
def test_anon_no_edit_post(self):
self.xform.shared = True
self.xform.save()
desc = 'Snooky'
response = self.anon.post(self.edit_url, {'description': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertNotEqual(
XForm.objects.get(pk=self.xform.pk).description, desc)
self.assertEqual(response.status_code, 302)
def test_not_owner_no_edit_post(self):
self.xform.shared = True
self.xform.save()
desc = 'Snooky'
self._create_user_and_login("jo")
response = self.client.post(self.edit_url, {'description': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 403)
self.assertNotEqual(
XForm.objects.get(pk=self.xform.pk).description, desc)
def test_user_description_edit_updates(self):
desc = 'Snooky'
response = self.client.post(self.edit_url, {'description': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.get(pk=self.xform.pk).description, desc)
def test_user_title_edit_updates(self):
desc = 'Snooky'
response = self.client.post(self.edit_url, {'title': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.get(pk=self.xform.pk).title, desc)
def test_user_form_license_edit_updates(self):
desc = 'Snooky'
response = self.client.post(self.edit_url, {'form-license': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(MetaData.form_license(self.xform).data_value, desc)
def test_user_data_license_edit_updates(self):
desc = 'Snooky'
response = self.client.post(self.edit_url, {'data-license': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(MetaData.data_license(self.xform).data_value, desc)
def test_user_toggle_data_privacy(self):
self.assertEqual(self.xform.shared, False)
response = self.client.post(self.edit_url, {'toggle_shared': 'data'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.get(pk=self.xform.pk).shared_data, True)
def test_user_toggle_data_privacy_off(self):
self.xform.shared_data = True
self.xform.save()
response = self.client.post(self.edit_url, {'toggle_shared': 'data'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(
XForm.objects.get(pk=self.xform.pk).shared_data, False)
def test_user_toggle_form_privacy(self):
self.assertEqual(self.xform.shared, False)
response = self.client.post(self.edit_url, {'toggle_shared': 'form'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.get(pk=self.xform.pk).shared, True)
def test_user_toggle_form_privacy_off(self):
self.xform.shared = True
self.xform.save()
response = self.client.post(self.edit_url, {'toggle_shared': 'form'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.get(pk=self.xform.pk).shared, False)
def test_user_toggle_form_downloadable(self):
self.xform.downloadable = False
self.xform.save()
self.assertEqual(self.xform.downloadable, False)
response = self.client.post(self.edit_url, {'toggle_shared': 'active'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(
XForm.objects.get(pk=self.xform.pk).downloadable, True)
def test_user_toggle_form_downloadable_off(self):
self.xform.downloadable = True
self.xform.save()
response = self.client.post(self.edit_url, {'toggle_shared': 'active'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(
XForm.objects.get(pk=self.xform.pk).downloadable, False)
def test_delete_404(self):
bad_delete_url = reverse(delete_xform, kwargs={
'username': self.user.username,
'id_string': 'non_existent_id_string'
})
response = self.client.post(bad_delete_url)
self.assertEqual(response.status_code, 404)
| bsd-2-clause |
kwrobert/heat-templates | tests/software_config/test_heat_config_kubelet.py | 7 | 4615 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import tempfile
import fixtures
from testtools import matchers
from tests.software_config import common
class HeatConfigKubeletORCTest(common.RunScriptTest):
fake_hooks = ['kubelet']
data = [{
"id": "abcdef001",
"group": "kubelet",
"name": "mysql",
"config": {
"version": "v1beta2",
"volumes": [{
"name": "mariadb-data"
}],
"containers": [{
"image": "mariadb_image",
"volumeMounts": [{
"mountPath": "/var/lib/mysql",
"name": "mariadb-data"
}],
"name": "mariadb",
"env": [{
"name": "DB_ROOT_PASSWORD",
"value": "mariadb_password"
}],
"ports": [{
"containerPort": 3306
}]
}]}
}, {
"id": "abcdef002",
"group": "kubelet",
"name": "rabbitmq",
"config": {
"version": "v1beta2",
"containers": [{
"image": "rabbitmq_image",
"name": "rabbitmq",
"ports": [{
"containerPort": 5672
}]
}]
}
}, {
"id": "abcdef003",
"group": "kubelet",
"name": "heat_api_engine",
"config": {
"version": "v1beta2",
"containers": [{
"image": "heat_engine_image",
"name": "heat-engine",
"env": [{
"name": "DB_ROOT_PASSWORD",
"value": "mariadb_password"
}, {
"name": "HEAT_DB_PASSWORD",
"value": "heatdb_password"
}, {
"name": "HEAT_KEYSTONE_PASSWORD",
"value": "password"
}]
}, {
"image": "heat_api_image",
"name": "heat-api",
"ports": [{
"containerPort": 8004
}]
}]
}
}]
def setUp(self):
super(HeatConfigKubeletORCTest, self).setUp()
self.fake_hook_path = self.relative_path(__file__, 'hook-fake.py')
self.heat_config_kubelet_path = self.relative_path(
__file__,
'../..',
'hot/software-config/elements',
'heat-config-kubelet/os-refresh-config/configure.d/'
'50-heat-config-kubelet')
self.manifests_dir = self.useFixture(fixtures.TempDir())
with open(self.fake_hook_path) as f:
fake_hook = f.read()
for hook in self.fake_hooks:
hook_name = self.manifests_dir.join(hook)
with open(hook_name, 'w') as f:
os.utime(hook_name, None)
f.write(fake_hook)
f.flush()
os.chmod(hook_name, 0o755)
def write_config_file(self, data):
config_file = tempfile.NamedTemporaryFile()
config_file.write(json.dumps(data))
config_file.flush()
return config_file
def test_run_heat_config(self):
with self.write_config_file(self.data) as config_file:
env = os.environ.copy()
env.update({
'HEAT_KUBELET_MANIFESTS': self.manifests_dir.join(),
'HEAT_SHELL_CONFIG': config_file.name
})
returncode, stdout, stderr = self.run_cmd(
[self.heat_config_kubelet_path], env)
self.assertEqual(0, returncode, stderr)
for config in self.data:
manifest_name = '%s.json' % config['id']
manifest_path = self.manifests_dir.join(manifest_name)
self.assertThat(manifest_path, matchers.FileExists())
# manifest file should match manifest config
self.assertEqual(config['config'],
self.json_from_file(manifest_path))
| apache-2.0 |
enlighter/ndl-question-papers-search-hub | qp_search_project/searcher/models.py | 1 | 3438 | from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager
from django.utils import timezone
from django.dispatch import receiver
from django.db.models.signals import post_save
from localflavor.in_ import in_states
gettext_noop = lambda s: s
EDUCATIONAL_ROLE= (
('vb', gettext_noop('Till Class VIII')),
('mc', gettext_noop('Class IX to X')),
('ss', gettext_noop('Class XI to XII')),
('gr', gettext_noop('UG or PG')),
('cd', gettext_noop('Career Development or Technical Study')),
('ae', gettext_noop('Adult Education')),
('ll', gettext_noop('Lifelong Learner')),
)
LANGUAGES = (
('en', gettext_noop('English')),
('hi', gettext_noop('Hindi')),
('bn', gettext_noop('Bengali')),
)
class board(models.Model):
name = models.CharField(max_length=15)
#example : name = CBSE
def __str__(self):
return self.name
class exam(models.Model):
name = models.CharField(max_length=15)
# example : type = AISSCE
def __str__(self):
return self.name
class educational_institute(models.Model):
name = models.CharField(max_length=90, unique=True)
state = models.CharField(max_length=21, choices=in_states.STATE_CHOICES)
city = models.CharField(max_length=21)
def __str__(self):
return self.name
class student(AbstractUser):
#user = models.OneToOneField(User, primary_key=True, on_delete=models.CASCADE)
state = models.CharField(max_length=21, null=True, blank=True, choices=in_states.STATE_CHOICES)
city = models.CharField(max_length=21, null=True, blank=True)
educational_role = models.CharField(max_length=39, choices=EDUCATIONAL_ROLE)
institute = models.ForeignKey(educational_institute, null=True, blank=True)
language = models.CharField(max_length=8, choices=LANGUAGES)
#is_staff = models.BooleanField(u'staff status', default=False,
# help_text=u'Designates whether the user can log into this admin '
# 'site.')
#is_active = models.BooleanField(u'active', default=True,
# help_text=u'Designates whether this user should be treated as '
# 'active. Unselect this instead of deleting accounts.')
REQUIRED_FIELDS = ['email', 'educational_role', 'language']
objects = UserManager()
def __str__(self):
return str(self.username)
#@receiver(post_save, sender=User)
#def create_profile(sender, **kwargs):
# user = kwargs["instance"]
# if kwargs["created"]:
# user_profile = student(user=user)
# user_profile.save()
#post_save.connect(create_profile, sender=User)
# Create student instance on access - very useful if you plan to always have a Student obj associated with a User object anyway
#User.student = property(lambda u: student.objects.get_or_create(user=u)[0])
class search_result(models.Model):
year_month = models.DateField()
type = models.ForeignKey(exam, on_delete=models.CASCADE)
source = models.ForeignKey(board, null=True, blank=True)
subject = models.CharField(max_length=45)
location = models.URLField(max_length=120)
def get_year(self):
return self.year_month.year
def get_month(self):
return self.year_month.month
def __str__(self):
return str(self.get_year()) + str(self.type) + self.subject | mit |
shaulkf/bitcoin | qa/rpc-tests/proxy_test.py | 93 | 7769 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
])
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| mit |
nguyentruongtho/buck | programs/subprocutils.py | 5 | 3349 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
#
# an almost exact copy of the shutil.which() implementation from python3.4
#
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to
# the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def propagate_failure(status):
"""
Propagate the failure mode of a subprocess to the current process.
"""
# The subprocess died with via a signal, so re-raise it.
if status < 0:
os.kill(os.getpid(), -status)
# The subprocess died with an error code, propagate it.
if status > 0:
sys.exit(status)
| apache-2.0 |
ppries/tensorflow | tensorflow/contrib/distributions/python/ops/transformed_distribution.py | 1 | 9906 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution as distributions
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.ops import math_ops
_condition_kwargs_dict = {
"bijector_kwargs": ("Python dictionary of arg names/values "
"forwarded to the bijector."),
"distribution_kwargs": ("Python dictionary of arg names/values "
"forwarded to the distribution."),
}
class TransformedDistribution(distributions.Distribution):
"""A Transformed Distribution.
A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,
and a deterministic, invertible, differentiable transform, `Y = g(X)`. The
transform is typically an instance of the `Bijector` class and the base
distribution is typically an instance of the `Distribution` class.
A `Bijector` is expected to implement the following functions:
- `forward`,
- `inverse`,
- `inverse_log_det_jacobian`.
The semantics of these functions are outlined in the `Bijector` documentation.
Shapes, type, and reparameterization are taken from the base distribution.
Write `P(Y=y)` for cumulative density function of random variable (rv) `Y` and
`p` for its derivative wrt to `Y`. Assume that `Y=g(X)` where `g` is
continuous and `X=g^{-1}(Y)`. Write `J` for the Jacobian (of some function).
A `TransformedDistribution` alters the input/outputs of a `Distribution`
associated with rv `X` in the following ways:
* `sample`:
Mathematically:
```none
Y = g(X)
```
Programmatically:
```python
return bijector.forward(distribution.sample(...))
```
* `log_prob`:
Mathematically:
```none
(log o p o g^{-1})(y) + (log o det o J o g^{-1})(y)
```
Programmatically:
```python
return (bijector.inverse_log_det_jacobian(x) +
distribution.log_prob(bijector.inverse(x))
```
* `log_cdf`:
Mathematically:
```none
(log o P o g^{-1})(y)
```
Programmatically:
```python
return distribution.log_prob(bijector.inverse(x))
```
* and similarly for: `cdf`, `prob`, `log_survival_function`,
`survival_function`.
A simple example constructing a Log-Normal distribution from a Normal
distribution:
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(mu=mu, sigma=sigma),
bijector=ds.bijector.Exp(),
name="LogNormalTransformedDistribution")
```
A `LogNormal` made from callables:
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(mu=mu, sigma=sigma),
bijector=ds.bijector.Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), reduction_indices=-1)),
name="LogNormalTransformedDistribution")
```
Another example constructing a Normal from a StandardNormal:
```python
ds = tf.contrib.distributions
normal = ds.TransformedDistribution(
distribution=ds.Normal(mu=0, sigma=1),
bijector=ds.bijector.ScaleAndShift(loc=mu, scale=sigma, event_ndims=0),
name="NormalTransformedDistribution")
```
"""
def __init__(self,
distribution,
bijector,
validate_args=False,
name=None):
"""Construct a Transformed Distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
bijector: The object responsible for calculating the transformation.
Typically an instance of `Bijector`.
validate_args: Python boolean. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
name: The name for the distribution. Default:
`bijector.name + distribution.name`.
"""
parameters = locals()
parameters.pop("self")
name = name or bijector.name + distribution.name
self._distribution = distribution
self._bijector = bijector
super(TransformedDistribution, self).__init__(
dtype=self._distribution.dtype,
is_continuous=self._distribution.is_continuous,
is_reparameterized=self._distribution.is_reparameterized,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
# We let TransformedDistribution access _graph_parents since this class
# is more like a baseclass than derived.
graph_parents=(distribution._graph_parents + # pylint: disable=protected-access
bijector.graph_parents),
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._distribution
@property
def bijector(self):
"""Function transforming x => y."""
return self._bijector
def _event_shape(self):
return self.bijector.forward_event_shape(
self.distribution.event_shape())
def _get_event_shape(self):
return self.bijector.get_forward_event_shape(
self.distribution.get_event_shape())
def _batch_shape(self):
return self.distribution.batch_shape()
def _get_batch_shape(self):
return self.distribution.get_batch_shape()
@distribution_util.AppendDocstring(
"""Samples from the base distribution and then passes through
the bijector's forward transform.""",
condition_kwargs_dict=_condition_kwargs_dict)
def _sample_n(self, n, seed=None,
bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.distribution.sample(sample_shape=n, seed=seed,
**distribution_kwargs)
# Recall that a bijector is named for its forward transform, i.e.,
# `Y = g(X)`,
return self.bijector.forward(x, **bijector_kwargs)
@distribution_util.AppendDocstring(
"""Implements `(log o p o g^{-1})(y) + (log o det o J o g^{-1})(y)`,
where `g^{-1}` is the inverse of `transform`.
Also raises a `ValueError` if `inverse` was not provided to the
distribution and `y` was not returned from `sample`.""",
condition_kwargs_dict=_condition_kwargs_dict)
def _log_prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian(
y, **bijector_kwargs)
return ildj + self.distribution.log_prob(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
"""Implements `p(g^{-1}(y)) det|J(g^{-1}(y))|`, where `g^{-1}` is the
inverse of `transform`.
Also raises a `ValueError` if `inverse` was not provided to the
distribution and `y` was not returned from `sample`.""",
condition_kwargs_dict=_condition_kwargs_dict)
def _prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian(
y, **bijector_kwargs)
return math_ops.exp(ildj) * self.distribution.prob(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _log_cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _log_survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_survival_function(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.survival_function(x, **distribution_kwargs)
| apache-2.0 |
adw0rd/lettuce | tests/integration/lib/Django-1.3/django/contrib/localflavor/fi/fi_municipalities.py | 394 | 10822 | # -*- coding: utf-8 -*-
"""
An alphabetical list of Finnish municipalities for use as `choices` in a
formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
MUNICIPALITY_CHOICES = (
('akaa', u"Akaa"),
('alajarvi', u"Alajärvi"),
('alavieska', u"Alavieska"),
('alavus', u"Alavus"),
('artjarvi', u"Artjärvi"),
('asikkala', u"Asikkala"),
('askola', u"Askola"),
('aura', u"Aura"),
('brando', u"Brändö"),
('eckero', u"Eckerö"),
('enonkoski', u"Enonkoski"),
('enontekio', u"Enontekiö"),
('espoo', u"Espoo"),
('eura', u"Eura"),
('eurajoki', u"Eurajoki"),
('evijarvi', u"Evijärvi"),
('finstrom', u"Finström"),
('forssa', u"Forssa"),
('foglo', u"Föglö"),
('geta', u"Geta"),
('haapajarvi', u"Haapajärvi"),
('haapavesi', u"Haapavesi"),
('hailuoto', u"Hailuoto"),
('halsua', u"Halsua"),
('hamina', u"Hamina"),
('hammarland', u"Hammarland"),
('hankasalmi', u"Hankasalmi"),
('hanko', u"Hanko"),
('harjavalta', u"Harjavalta"),
('hartola', u"Hartola"),
('hattula', u"Hattula"),
('haukipudas', u"Haukipudas"),
('hausjarvi', u"Hausjärvi"),
('heinola', u"Heinola"),
('heinavesi', u"Heinävesi"),
('helsinki', u"Helsinki"),
('hirvensalmi', u"Hirvensalmi"),
('hollola', u"Hollola"),
('honkajoki', u"Honkajoki"),
('huittinen', u"Huittinen"),
('humppila', u"Humppila"),
('hyrynsalmi', u"Hyrynsalmi"),
('hyvinkaa', u"Hyvinkää"),
('hameenkoski', u"Hämeenkoski"),
('hameenkyro', u"Hämeenkyrö"),
('hameenlinna', u"Hämeenlinna"),
('ii', u"Ii"),
('iisalmi', u"Iisalmi"),
('iitti', u"Iitti"),
('ikaalinen', u"Ikaalinen"),
('ilmajoki', u"Ilmajoki"),
('ilomantsi', u"Ilomantsi"),
('imatra', u"Imatra"),
('inari', u"Inari"),
('inkoo', u"Inkoo"),
('isojoki', u"Isojoki"),
('isokyro', u"Isokyrö"),
('jalasjarvi', u"Jalasjärvi"),
('janakkala', u"Janakkala"),
('joensuu', u"Joensuu"),
('jokioinen', u"Jokioinen"),
('jomala', u"Jomala"),
('joroinen', u"Joroinen"),
('joutsa', u"Joutsa"),
('juankoski', u"Juankoski"),
('juuka', u"Juuka"),
('juupajoki', u"Juupajoki"),
('juva', u"Juva"),
('jyvaskyla', u"Jyväskylä"),
('jamijarvi', u"Jämijärvi"),
('jamsa', u"Jämsä"),
('jarvenpaa', u"Järvenpää"),
('kaarina', u"Kaarina"),
('kaavi', u"Kaavi"),
('kajaani', u"Kajaani"),
('kalajoki', u"Kalajoki"),
('kangasala', u"Kangasala"),
('kangasniemi', u"Kangasniemi"),
('kankaanpaa', u"Kankaanpää"),
('kannonkoski', u"Kannonkoski"),
('kannus', u"Kannus"),
('karijoki', u"Karijoki"),
('karjalohja', u"Karjalohja"),
('karkkila', u"Karkkila"),
('karstula', u"Karstula"),
('karttula', u"Karttula"),
('karvia', u"Karvia"),
('kaskinen', u"Kaskinen"),
('kauhajoki', u"Kauhajoki"),
('kauhava', u"Kauhava"),
('kauniainen', u"Kauniainen"),
('kaustinen', u"Kaustinen"),
('keitele', u"Keitele"),
('kemi', u"Kemi"),
('kemijarvi', u"Kemijärvi"),
('keminmaa', u"Keminmaa"),
('kemionsaari', u"Kemiönsaari"),
('kempele', u"Kempele"),
('kerava', u"Kerava"),
('kerimaki', u"Kerimäki"),
('kesalahti', u"Kesälahti"),
('keuruu', u"Keuruu"),
('kihnio', u"Kihniö"),
('kiikoinen', u"Kiikoinen"),
('kiiminki', u"Kiiminki"),
('kinnula', u"Kinnula"),
('kirkkonummi', u"Kirkkonummi"),
('kitee', u"Kitee"),
('kittila', u"Kittilä"),
('kiuruvesi', u"Kiuruvesi"),
('kivijarvi', u"Kivijärvi"),
('kokemaki', u"Kokemäki"),
('kokkola', u"Kokkola"),
('kolari', u"Kolari"),
('konnevesi', u"Konnevesi"),
('kontiolahti', u"Kontiolahti"),
('korsnas', u"Korsnäs"),
('koskitl', u"Koski Tl"),
('kotka', u"Kotka"),
('kouvola', u"Kouvola"),
('kristiinankaupunki', u"Kristiinankaupunki"),
('kruunupyy', u"Kruunupyy"),
('kuhmalahti', u"Kuhmalahti"),
('kuhmo', u"Kuhmo"),
('kuhmoinen', u"Kuhmoinen"),
('kumlinge', u"Kumlinge"),
('kuopio', u"Kuopio"),
('kuortane', u"Kuortane"),
('kurikka', u"Kurikka"),
('kustavi', u"Kustavi"),
('kuusamo', u"Kuusamo"),
('kylmakoski', u"Kylmäkoski"),
('kyyjarvi', u"Kyyjärvi"),
('karkola', u"Kärkölä"),
('karsamaki', u"Kärsämäki"),
('kokar', u"Kökar"),
('koylio', u"Köyliö"),
('lahti', u"Lahti"),
('laihia', u"Laihia"),
('laitila', u"Laitila"),
('lapinjarvi', u"Lapinjärvi"),
('lapinlahti', u"Lapinlahti"),
('lappajarvi', u"Lappajärvi"),
('lappeenranta', u"Lappeenranta"),
('lapua', u"Lapua"),
('laukaa', u"Laukaa"),
('lavia', u"Lavia"),
('lemi', u"Lemi"),
('lemland', u"Lemland"),
('lempaala', u"Lempäälä"),
('leppavirta', u"Leppävirta"),
('lestijarvi', u"Lestijärvi"),
('lieksa', u"Lieksa"),
('lieto', u"Lieto"),
('liminka', u"Liminka"),
('liperi', u"Liperi"),
('lohja', u"Lohja"),
('loimaa', u"Loimaa"),
('loppi', u"Loppi"),
('loviisa', u"Loviisa"),
('luhanka', u"Luhanka"),
('lumijoki', u"Lumijoki"),
('lumparland', u"Lumparland"),
('luoto', u"Luoto"),
('luumaki', u"Luumäki"),
('luvia', u"Luvia"),
('lansi-turunmaa', u"Länsi-Turunmaa"),
('maalahti', u"Maalahti"),
('maaninka', u"Maaninka"),
('maarianhamina', u"Maarianhamina"),
('marttila', u"Marttila"),
('masku', u"Masku"),
('merijarvi', u"Merijärvi"),
('merikarvia', u"Merikarvia"),
('miehikkala', u"Miehikkälä"),
('mikkeli', u"Mikkeli"),
('muhos', u"Muhos"),
('multia', u"Multia"),
('muonio', u"Muonio"),
('mustasaari', u"Mustasaari"),
('muurame', u"Muurame"),
('mynamaki', u"Mynämäki"),
('myrskyla', u"Myrskylä"),
('mantsala', u"Mäntsälä"),
('mantta-vilppula', u"Mänttä-Vilppula"),
('mantyharju', u"Mäntyharju"),
('naantali', u"Naantali"),
('nakkila', u"Nakkila"),
('nastola', u"Nastola"),
('nilsia', u"Nilsiä"),
('nivala', u"Nivala"),
('nokia', u"Nokia"),
('nousiainen', u"Nousiainen"),
('nummi-pusula', u"Nummi-Pusula"),
('nurmes', u"Nurmes"),
('nurmijarvi', u"Nurmijärvi"),
('narpio', u"Närpiö"),
('oravainen', u"Oravainen"),
('orimattila', u"Orimattila"),
('oripaa', u"Oripää"),
('orivesi', u"Orivesi"),
('oulainen', u"Oulainen"),
('oulu', u"Oulu"),
('oulunsalo', u"Oulunsalo"),
('outokumpu', u"Outokumpu"),
('padasjoki', u"Padasjoki"),
('paimio', u"Paimio"),
('paltamo', u"Paltamo"),
('parikkala', u"Parikkala"),
('parkano', u"Parkano"),
('pedersore', u"Pedersöre"),
('pelkosenniemi', u"Pelkosenniemi"),
('pello', u"Pello"),
('perho', u"Perho"),
('pertunmaa', u"Pertunmaa"),
('petajavesi', u"Petäjävesi"),
('pieksamaki', u"Pieksämäki"),
('pielavesi', u"Pielavesi"),
('pietarsaari', u"Pietarsaari"),
('pihtipudas', u"Pihtipudas"),
('pirkkala', u"Pirkkala"),
('polvijarvi', u"Polvijärvi"),
('pomarkku', u"Pomarkku"),
('pori', u"Pori"),
('pornainen', u"Pornainen"),
('porvoo', u"Porvoo"),
('posio', u"Posio"),
('pudasjarvi', u"Pudasjärvi"),
('pukkila', u"Pukkila"),
('punkaharju', u"Punkaharju"),
('punkalaidun', u"Punkalaidun"),
('puolanka', u"Puolanka"),
('puumala', u"Puumala"),
('pyhtaa', u"Pyhtää"),
('pyhajoki', u"Pyhäjoki"),
('pyhajarvi', u"Pyhäjärvi"),
('pyhanta', u"Pyhäntä"),
('pyharanta', u"Pyhäranta"),
('palkane', u"Pälkäne"),
('poytya', u"Pöytyä"),
('raahe', u"Raahe"),
('raasepori', u"Raasepori"),
('raisio', u"Raisio"),
('rantasalmi', u"Rantasalmi"),
('ranua', u"Ranua"),
('rauma', u"Rauma"),
('rautalampi', u"Rautalampi"),
('rautavaara', u"Rautavaara"),
('rautjarvi', u"Rautjärvi"),
('reisjarvi', u"Reisjärvi"),
('riihimaki', u"Riihimäki"),
('ristiina', u"Ristiina"),
('ristijarvi', u"Ristijärvi"),
('rovaniemi', u"Rovaniemi"),
('ruokolahti', u"Ruokolahti"),
('ruovesi', u"Ruovesi"),
('rusko', u"Rusko"),
('raakkyla', u"Rääkkylä"),
('saarijarvi', u"Saarijärvi"),
('salla', u"Salla"),
('salo', u"Salo"),
('saltvik', u"Saltvik"),
('sastamala', u"Sastamala"),
('sauvo', u"Sauvo"),
('savitaipale', u"Savitaipale"),
('savonlinna', u"Savonlinna"),
('savukoski', u"Savukoski"),
('seinajoki', u"Seinäjoki"),
('sievi', u"Sievi"),
('siikainen', u"Siikainen"),
('siikajoki', u"Siikajoki"),
('siikalatva', u"Siikalatva"),
('siilinjarvi', u"Siilinjärvi"),
('simo', u"Simo"),
('sipoo', u"Sipoo"),
('siuntio', u"Siuntio"),
('sodankyla', u"Sodankylä"),
('soini', u"Soini"),
('somero', u"Somero"),
('sonkajarvi', u"Sonkajärvi"),
('sotkamo', u"Sotkamo"),
('sottunga', u"Sottunga"),
('sulkava', u"Sulkava"),
('sund', u"Sund"),
('suomenniemi', u"Suomenniemi"),
('suomussalmi', u"Suomussalmi"),
('suonenjoki', u"Suonenjoki"),
('sysma', u"Sysmä"),
('sakyla', u"Säkylä"),
('taipalsaari', u"Taipalsaari"),
('taivalkoski', u"Taivalkoski"),
('taivassalo', u"Taivassalo"),
('tammela', u"Tammela"),
('tampere', u"Tampere"),
('tarvasjoki', u"Tarvasjoki"),
('tervo', u"Tervo"),
('tervola', u"Tervola"),
('teuva', u"Teuva"),
('tohmajarvi', u"Tohmajärvi"),
('toholampi', u"Toholampi"),
('toivakka', u"Toivakka"),
('tornio', u"Tornio"),
('turku', u"Turku"),
('tuusniemi', u"Tuusniemi"),
('tuusula', u"Tuusula"),
('tyrnava', u"Tyrnävä"),
('toysa', u"Töysä"),
('ulvila', u"Ulvila"),
('urjala', u"Urjala"),
('utajarvi', u"Utajärvi"),
('utsjoki', u"Utsjoki"),
('uurainen', u"Uurainen"),
('uusikaarlepyy', u"Uusikaarlepyy"),
('uusikaupunki', u"Uusikaupunki"),
('vaala', u"Vaala"),
('vaasa', u"Vaasa"),
('valkeakoski', u"Valkeakoski"),
('valtimo', u"Valtimo"),
('vantaa', u"Vantaa"),
('varkaus', u"Varkaus"),
('varpaisjarvi', u"Varpaisjärvi"),
('vehmaa', u"Vehmaa"),
('vesanto', u"Vesanto"),
('vesilahti', u"Vesilahti"),
('veteli', u"Veteli"),
('vierema', u"Vieremä"),
('vihanti', u"Vihanti"),
('vihti', u"Vihti"),
('viitasaari', u"Viitasaari"),
('vimpeli', u"Vimpeli"),
('virolahti', u"Virolahti"),
('virrat', u"Virrat"),
('vardo', u"Vårdö"),
('vahakyro', u"Vähäkyrö"),
('voyri-maksamaa', u"Vöyri-Maksamaa"),
('yli-ii', u"Yli-Ii"),
('ylitornio', u"Ylitornio"),
('ylivieska', u"Ylivieska"),
('ylojarvi', u"Ylöjärvi"),
('ypaja', u"Ypäjä"),
('ahtari', u"Ähtäri"),
('aanekoski', u"Äänekoski")
) | gpl-3.0 |
cmbruns/vr_samples | test/python/test_sphere.py | 1 | 2362 | #!/bin/env python
import unittest
import glfw
from OpenGL import GL
from PIL import Image
import numpy
from vrprim.imposter import sphere
def images_are_identical(img1, img2):
ar1 = numpy.array(img1.convert('RGBA'))
ar2 = numpy.array(img2.convert('RGBA'))
return numpy.array_equiv(ar1, ar2)
class TestGLRendering(unittest.TestCase):
def setUp(self):
if not glfw.init():
raise Exception("GLFW Initialization error")
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 1)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.VISIBLE, False) # Hidden window is next best thing to offscreen
window = glfw.create_window(16, 16, "Little test window", None, None)
if window is None:
glfw.terminate()
raise Exception("GLFW window creation error")
glfw.make_context_current(window)
with open('../images/red16x16.png', 'rb') as fh:
self.red_image = Image.open(fh)
self.red_image.load()
def tearDown(self):
glfw.terminate()
def test_sphere_imposter(self):
GL.glClearColor(1, 0, 0, 1) # red
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
s = sphere.SphereActor()
s.init_gl()
s.display_gl(None)
s.dispose_gl()
# Save render as image
GL.glFlush()
data = GL.glReadPixels(0, 0, 16, 16, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE)
observed = Image.frombytes('RGBA', (16, 16), data)
observed.save("test.png")
self.assertFalse(images_are_identical(observed, self.red_image))
def test_red_render(self):
'Test minimal screen clear in OpenGL'
# Color the entire display red
GL.glClearColor(1, 0, 0, 1) # red
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
# Save render as image
GL.glFlush()
data = GL.glReadPixels(0, 0, 16, 16, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE)
observed = Image.frombytes('RGBA', (16, 16), data)
expected = self.red_image
self.assertTrue(images_are_identical(observed, expected))
print ("Red GL test completed")
if __name__ == '__main__':
unittest.main()
| mit |
pdee/pdee | python-libs/rope/base/project.py | 40 | 11976 | import cPickle as pickle
import os
import shutil
import sys
import warnings
import rope.base.fscommands
from rope.base import exceptions, taskhandle, prefs, history, pycore, utils
from rope.base.resourceobserver import *
from rope.base.resources import File, Folder, _ResourceMatcher
class _Project(object):
def __init__(self, fscommands):
self.observers = []
self.fscommands = fscommands
self.prefs = prefs.Prefs()
self.data_files = _DataFiles(self)
def get_resource(self, resource_name):
"""Get a resource in a project.
`resource_name` is the path of a resource in a project. It is
the path of a resource relative to project root. Project root
folder address is an empty string. If the resource does not
exist a `exceptions.ResourceNotFound` exception would be
raised. Use `get_file()` and `get_folder()` when you need to
get nonexistent `Resource`\s.
"""
path = self._get_resource_path(resource_name)
if not os.path.exists(path):
raise exceptions.ResourceNotFoundError(
'Resource <%s> does not exist' % resource_name)
elif os.path.isfile(path):
return File(self, resource_name)
elif os.path.isdir(path):
return Folder(self, resource_name)
else:
raise exceptions.ResourceNotFoundError('Unknown resource '
+ resource_name)
def validate(self, folder):
"""Validate files and folders contained in this folder
It validates all of the files and folders contained in this
folder if some observers are interested in them.
"""
for observer in list(self.observers):
observer.validate(folder)
def add_observer(self, observer):
"""Register a `ResourceObserver`
See `FilteredResourceObserver`.
"""
self.observers.append(observer)
def remove_observer(self, observer):
"""Remove a registered `ResourceObserver`"""
if observer in self.observers:
self.observers.remove(observer)
def do(self, changes, task_handle=taskhandle.NullTaskHandle()):
"""Apply the changes in a `ChangeSet`
Most of the time you call this function for committing the
changes for a refactoring.
"""
self.history.do(changes, task_handle=task_handle)
def get_pycore(self):
return self.pycore
def get_file(self, path):
"""Get the file with `path` (it may not exist)"""
return File(self, path)
def get_folder(self, path):
"""Get the folder with `path` (it may not exist)"""
return Folder(self, path)
def is_ignored(self, resource):
return False
def get_prefs(self):
return self.prefs
def _get_resource_path(self, name):
pass
@property
@utils.saveit
def history(self):
return history.History(self)
@property
@utils.saveit
def pycore(self):
return pycore.PyCore(self)
def close(self):
warnings.warn('Cannot close a NoProject',
DeprecationWarning, stacklevel=2)
ropefolder = None
class Project(_Project):
"""A Project containing files and folders"""
def __init__(self, projectroot, fscommands=None,
ropefolder='.ropeproject', **prefs):
"""A rope project
:parameters:
- `projectroot`: The address of the root folder of the project
- `fscommands`: Implements the file system operations used
by rope; have a look at `rope.base.fscommands`
- `ropefolder`: The name of the folder in which rope stores
project configurations and data. Pass `None` for not using
such a folder at all.
- `prefs`: Specify project preferences. These values
overwrite config file preferences.
"""
if projectroot != '/':
projectroot = _realpath(projectroot).rstrip('/\\')
self._address = projectroot
self._ropefolder_name = ropefolder
if not os.path.exists(self._address):
os.mkdir(self._address)
elif not os.path.isdir(self._address):
raise exceptions.RopeError('Project root exists and'
' is not a directory')
if fscommands is None:
fscommands = rope.base.fscommands.create_fscommands(self._address)
super(Project, self).__init__(fscommands)
self.ignored = _ResourceMatcher()
self.file_list = _FileListCacher(self)
self.prefs.add_callback('ignored_resources', self.ignored.set_patterns)
if ropefolder is not None:
self.prefs['ignored_resources'] = [ropefolder]
self._init_prefs(prefs)
def get_files(self):
return self.file_list.get_files()
def _get_resource_path(self, name):
return os.path.join(self._address, *name.split('/'))
def _init_ropefolder(self):
if self.ropefolder is not None:
if not self.ropefolder.exists():
self._create_recursively(self.ropefolder)
if not self.ropefolder.has_child('config.py'):
config = self.ropefolder.create_file('config.py')
config.write(self._default_config())
def _create_recursively(self, folder):
if folder.parent != self.root and not folder.parent.exists():
self._create_recursively(folder.parent)
folder.create()
def _init_prefs(self, prefs):
run_globals = {}
if self.ropefolder is not None:
config = self.get_file(self.ropefolder.path + '/config.py')
run_globals.update({'__name__': '__main__',
'__builtins__': __builtins__,
'__file__': config.real_path})
if config.exists():
config = self.ropefolder.get_child('config.py')
execfile(config.real_path, run_globals)
else:
exec(self._default_config(), run_globals)
if 'set_prefs' in run_globals:
run_globals['set_prefs'](self.prefs)
for key, value in prefs.items():
self.prefs[key] = value
self._init_other_parts()
self._init_ropefolder()
if 'project_opened' in run_globals:
run_globals['project_opened'](self)
def _default_config(self):
import rope.base.default_config
import inspect
return inspect.getsource(rope.base.default_config)
def _init_other_parts(self):
# Forcing the creation of `self.pycore` to register observers
self.pycore
def is_ignored(self, resource):
return self.ignored.does_match(resource)
def sync(self):
"""Closes project open resources"""
self.close()
def close(self):
"""Closes project open resources"""
self.data_files.write()
def set(self, key, value):
"""Set the `key` preference to `value`"""
self.prefs.set(key, value)
@property
def ropefolder(self):
if self._ropefolder_name is not None:
return self.get_folder(self._ropefolder_name)
def validate(self, folder=None):
if folder is None:
folder = self.root
super(Project, self).validate(folder)
root = property(lambda self: self.get_resource(''))
address = property(lambda self: self._address)
class NoProject(_Project):
"""A null object for holding out of project files.
This class is singleton use `get_no_project` global function
"""
def __init__(self):
fscommands = rope.base.fscommands.FileSystemCommands()
super(NoProject, self).__init__(fscommands)
def _get_resource_path(self, name):
real_name = name.replace('/', os.path.sep)
return _realpath(real_name)
def get_resource(self, name):
universal_name = _realpath(name).replace(os.path.sep, '/')
return super(NoProject, self).get_resource(universal_name)
def get_files(self):
return []
_no_project = None
def get_no_project():
if NoProject._no_project is None:
NoProject._no_project = NoProject()
return NoProject._no_project
class _FileListCacher(object):
def __init__(self, project):
self.project = project
self.files = None
rawobserver = ResourceObserver(
self._changed, self._invalid, self._invalid,
self._invalid, self._invalid)
self.project.add_observer(rawobserver)
def get_files(self):
if self.files is None:
self.files = set()
self._add_files(self.project.root)
return self.files
def _add_files(self, folder):
for child in folder.get_children():
if child.is_folder():
self._add_files(child)
elif not self.project.is_ignored(child):
self.files.add(child)
def _changed(self, resource):
if resource.is_folder():
self.files = None
def _invalid(self, resource, new_resource=None):
self.files = None
class _DataFiles(object):
def __init__(self, project):
self.project = project
self.hooks = []
def read_data(self, name, compress=False, import_=False):
if self.project.ropefolder is None:
return None
compress = compress and self._can_compress()
opener = self._get_opener(compress)
file = self._get_file(name, compress)
if not compress and import_:
self._import_old_files(name)
if file.exists():
input = opener(file.real_path, 'rb')
try:
result = []
try:
while True:
result.append(pickle.load(input))
except EOFError:
pass
if len(result) == 1:
return result[0]
if len(result) > 1:
return result
finally:
input.close()
def write_data(self, name, data, compress=False):
if self.project.ropefolder is not None:
compress = compress and self._can_compress()
file = self._get_file(name, compress)
opener = self._get_opener(compress)
output = opener(file.real_path, 'wb')
try:
pickle.dump(data, output, 2)
finally:
output.close()
def add_write_hook(self, hook):
self.hooks.append(hook)
def write(self):
for hook in self.hooks:
hook()
def _can_compress(self):
try:
import gzip
return True
except ImportError:
return False
def _import_old_files(self, name):
old = self._get_file(name + '.pickle', False)
new = self._get_file(name, False)
if old.exists() and not new.exists():
shutil.move(old.real_path, new.real_path)
def _get_opener(self, compress):
if compress:
try:
import gzip
return gzip.open
except ImportError:
pass
return open
def _get_file(self, name, compress):
path = self.project.ropefolder.path + '/' + name
if compress:
path += '.gz'
return self.project.get_file(path)
def _realpath(path):
"""Return the real path of `path`
Is equivalent to ``realpath(abspath(expanduser(path)))``.
"""
# there is a bug in cygwin for os.path.abspath() for abs paths
if sys.platform == 'cygwin':
if path[1:3] == ':\\':
return path
return os.path.abspath(os.path.expanduser(path))
return os.path.realpath(os.path.abspath(os.path.expanduser(path)))
| gpl-3.0 |
giovannimanzoni/project2 | test/RS485/python/readFromRS485.py | 2 | 1384 | #!/usr/bin/env python
#
# Python sample application that reads from the raspicomm's RS-485 Port
#
# Thanks to Acmesystems, program edited by Giovanni Manzoni @ HardElettroSoft
#
# 9600 8N1 flow control Xon/Xoff
#
import array
import serial
maxReadCount=10
readBuffer = array.array('c')
print('this sample application reads from the rs-485 port')
# open the port
print('opening device /dev/ttys1')
try:
ser = serial.Serial(port='/dev/ttyS1', baudrate=9600) # or ttyS2
except:
print('failed.')
print('possible causes:')
print('1) the raspicomm device driver is not loaded. type \'lsmod\' and verify that you \'raspicommrs485\' is loaded.')
print('2) the raspicomm device driver is in use. Is another application using the device driver?')
print('3) something went wrong when loading the device driver. type \'dmesg\' and check the kernel messages')
exit()
print('successful.')
# read in a loop
print('start reading from the rs-485 port a maximum of ' + str(maxReadCount) + ' bytes')
readCount=0
i=0
while readCount < maxReadCount:
readBuffer.append(ser.read(1))
readCount=readCount+1
# print the received bytes
print('we received the following bytes:')
val=ord(readBuffer[i])
hx=''
if val >= 32 and val <= 126:
hx=' - \'{0}\''.format(readBuffer[i])
print('[{0:d}]: 0x{1:x}{2}'.format(i, val, hx))
i=i+1
| cc0-1.0 |
FrankBian/kuma | kuma/humans/models.py | 5 | 2264 | from __future__ import with_statement
import json
import os
import subprocess
import urllib
from django.conf import settings
GITHUB_REPOS = "https://api.github.com/repos/mozilla/kuma/contributors"
class Human(object):
def __init__(self):
self.name = None
self.website = None
class HumansTXT(object):
def generate_file(self):
githubbers = self.get_github(json.load(urllib.urlopen(GITHUB_REPOS)))
localizers = self.get_mdn()
path = os.path.join(settings.HUMANSTXT_ROOT, "humans.txt")
with open(path, 'w') as target:
self.write_to_file(githubbers, target,
"Contributors on Github", "Developer")
self.write_to_file(localizers, target,
"Localization Contributors", "Localizer")
def write_to_file(self, humans, target, message, role):
target.write("%s \n" % message)
for h in humans:
target.write("%s: %s \n" %
(role, h.name.encode('ascii', 'ignore')))
if h.website is not None:
target.write("Website: %s \n" % h.website)
target.write('\n')
target.write('\n')
def get_github(self, data=None):
if not data:
raw_data = json.load(urllib.urlopen(GITHUB_REPOS))
else:
raw_data = data
humans = []
for contributor in raw_data:
human = Human()
human.name = contributor.get('name', contributor['login'])
human.website = contributor.get('blog', None)
humans.append(human)
return humans
def split_name(self, name):
if '@' in name:
name = name.split('@')[0]
return name
def get_mdn(self):
p = subprocess.Popen("svn log --quiet http://svn.mozilla.org/projects/\
mdn/trunk/locale/ | grep '^r' | awk '{print $3}' | sort | uniq",
shell=True, stdout=subprocess.PIPE)
localizers_list = p.communicate()[0].rstrip().split('\n', -1)
humans = []
for localizer in localizers_list:
human = Human()
human.name = self.split_name(localizer)
humans.append(human)
return humans
| mpl-2.0 |
ravello/ansible | v2/ansible/plugins/lookup/flattened.py | 60 | 2408 | # (c) 2013, Serge van Ginderachter <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
def _check_list_of_one_list(self, term):
# make sure term is not a list of one (list of one..) item
# return the final non list item if so
if isinstance(term,list) and len(term) == 1:
term = term[0]
if isinstance(term,list):
term = self._check_list_of_one_list(term)
return term
def _do_flatten(self, terms, variables):
ret = []
for term in terms:
term = self._check_list_of_one_list(term)
if term == 'None' or term == 'null':
# ignore undefined items
break
if isinstance(term, basestring):
# convert a variable to a list
term2 = listify_lookup_plugin_terms(term, variables, loader=self._loader)
# but avoid converting a plain string to a list of one string
if term2 != [ term ]:
term = term2
if isinstance(term, list):
# if it's a list, check recursively for items that are a list
term = self._do_flatten(term, variables)
ret.extend(term)
else:
ret.append(term)
return ret
def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
raise AnsibleError("with_flattened expects a list")
return self._do_flatten(terms, variables)
| gpl-3.0 |
openpaperwork/paperwork | paperwork-gtk/src/paperwork/frontend/beacon/__init__.py | 1 | 7272 | import datetime
import dateutil.parser
import http
import http.client
import json
import logging
import multiprocessing
import os
import platform
import ssl
import re
import threading
import urllib
logger = logging.getLogger(__name__)
class Beacon(object):
USER_AGENT = "Paperwork"
UPDATE_CHECK_INTERVAL = datetime.timedelta(days=7)
POST_STATISTICS_INTERVAL = datetime.timedelta(days=7)
SSL_CONTEXT = ssl._create_unverified_context()
GITHUB_RELEASES = {
'host': 'api.github.com',
'path': '/repos/openpaperwork/paperwork/releases',
}
OPENPAPERWORK_RELEASES = {
'host': os.getenv("OPENPAPER_SERVER", 'openpaper.work'),
'path': '/beacon/latest',
}
OPENPAPERWORK_STATS = {
'host': os.getenv("OPENPAPER_SERVER", 'openpaper.work'),
'path': '/beacon/post_statistics',
}
PROTOCOL = os.getenv("OPENPAPER_PROTOCOL", "https")
def __init__(self, config, flatpak):
super().__init__()
self.config = config
self.flatpak = flatpak
def get_version_github(self):
logger.info("Querying GitHub ...")
h = http.client.HTTPSConnection(
host=self.GITHUB_RELEASES['host'],
)
h.request('GET', url=self.GITHUB_RELEASES['path'], headers={
'User-Agent': self.USER_AGENT
})
r = h.getresponse()
r = r.read().decode('utf-8')
r = json.loads(r)
last_tag_date = None
last_tag_name = None
for release in r:
date = dateutil.parser.parse(release['created_at'])
tag = release['tag_name']
if not re.match("\d+\.\d+(|\.\d+)", tag):
continue
if last_tag_date is None or last_tag_date < date:
last_tag_date = date
last_tag_name = tag
return last_tag_name
def get_version_openpaperwork(self):
logger.info("Querying OpenPaper.work ...")
if self.PROTOCOL == "http":
h = http.client.HTTPConnection(
host=self.OPENPAPERWORK_RELEASES['host'],
)
else:
h = http.client.HTTPSConnection(
host=self.OPENPAPERWORK_RELEASES['host'],
context=self.SSL_CONTEXT
)
h.request('GET', url=self.OPENPAPERWORK_RELEASES['path'], headers={
'User-Agent': self.USER_AGENT
})
r = h.getresponse()
r = r.read().decode('utf-8')
r = json.loads(r)
return r['paperwork'][os.name]
def check_update(self):
if not self.config['check_for_update'].value:
logger.info("Update checking is disabled")
return
now = datetime.datetime.now()
last_check = self.config['last_update_check'].value
logger.info("Updates were last checked: {}".format(last_check))
if (last_check is not None and
last_check + self.UPDATE_CHECK_INTERVAL >= now):
logger.info("No need to check for new updates yet")
return
logger.info("Checking for updates ...")
version = None
try:
version = self.get_version_openpaperwork()
except Exception as exc:
logger.exception(
"Failed to get latest Paperwork release from OpenPaper.work. "
"Falling back to Github ...",
exc_info=exc
)
if version is None:
try:
version = self.get_version_github()
except Exception as exc:
logger.exception(
"Failed to get latest Paperwork from Github",
exc_info=exc
)
if version is None:
return
logger.info("Latest Paperwork release: {}".format(version))
self.config['last_update_found'].value = version
self.config['last_update_check'].value = now
self.config.write()
def get_statistics(self, version, docsearch):
distribution = platform.linux_distribution()
if distribution[0] == '':
distribution = platform.win32_ver()
processor = ""
os_name = os.name
if os_name != 'nt': # contains too much infos on Windows
processor = platform.processor()
if self.flatpak:
os_name += " (flatpak)"
return {
'uuid': int(self.config['uuid'].value),
'paperwork_version': str(version),
'nb_documents': int(docsearch.nb_docs),
'os_name': str(os_name),
'platform_architecture': str(platform.architecture()),
'platform_processor': str(processor),
'platform_distribution': str(distribution),
'cpu_count': int(multiprocessing.cpu_count()),
}
def send_statistics(self, version, docsearch):
if not self.config['send_statistics'].value:
logger.info("Anonymous statistics are disabled")
return
now = datetime.datetime.now()
last_post = self.config['last_statistics_post'].value
logger.info("Statistics were last posted: {}".format(last_post))
logger.info("Next post date: {}".format(
last_post + self.POST_STATISTICS_INTERVAL))
logger.info("Now: {}".format(now))
if (last_post is not None and
last_post + self.POST_STATISTICS_INTERVAL >= now):
logger.info("No need to post statistics")
return
logger.info("Sending anonymous statistics ...")
stats = self.get_statistics(version, docsearch)
logger.info("Statistics: {}".format(stats))
logger.info("Posting statistics on openpaper.work ...")
if self.PROTOCOL == "http":
h = http.client.HTTPConnection(
host=self.OPENPAPERWORK_STATS['host'],
)
else:
h = http.client.HTTPSConnection(
host=self.OPENPAPERWORK_STATS['host'],
context=self.SSL_CONTEXT
)
h.request('POST', url=self.OPENPAPERWORK_STATS['path'], headers={
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
'User-Agent': self.USER_AGENT,
}, body=urllib.parse.urlencode({
'statistics': json.dumps(stats),
}))
r = h.getresponse()
logger.info("Getting reply from openpaper.work ({})".format(r.status))
reply = r.read().decode('utf-8')
if r.status == http.client.OK:
logger.info("Openpaper.work replied: {} | {}".format(
r.status, r.reason
))
else:
logger.warning("Openpaper.work replied: {} | {}".format(
r.status, r.reason
))
logger.warning("Openpaper.work: {}".format(reply))
self.config['last_statistics_post'].value = now
self.config.write()
def check_update(beacon):
thread = threading.Thread(target=beacon.check_update)
thread.start()
def send_statistics(beacon, version, docsearch):
thread = threading.Thread(target=beacon.send_statistics, kwargs={
'version': version,
'docsearch': docsearch,
})
thread.start()
| gpl-3.0 |
atodorov/dnf-plugins-core | plugins/needs_restarting.py | 2 | 6405 | # needs_restarting.py
# DNF plugin to check for running binaries in a need of restarting.
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# the mechanism of scanning smaps for opened files and matching them back to
# packages is heavily inspired by the original needs-restarting.py:
# http://yum.baseurl.org/gitweb?p=yum-utils.git;a=blob;f=needs-restarting.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from dnfpluginscore import logger, _
import dnf
import dnf.cli
import dnfpluginscore
import functools
import os
import re
import stat
def list_opened_files(uid):
for (pid, smaps) in list_smaps():
try:
if uid is not None and uid != owner_uid(smaps):
continue
with open(smaps, 'r') as smaps_file:
lines = smaps_file.readlines()
except EnvironmentError:
logger.warning("Failed to read PID %d's smaps.", pid)
continue
for line in lines:
ofile = smap2opened_file(pid, line)
if ofile is not None:
yield ofile
def list_smaps():
for dir_ in os.listdir('/proc'):
try:
pid = int(dir_)
except ValueError:
continue
smaps = '/proc/%d/smaps' % pid
yield (pid, smaps)
def memoize(func):
sentinel = object()
cache = {}
def wrapper(param):
val = cache.get(param, sentinel)
if val is not sentinel:
return val
val = func(param)
cache[param] = val
return val
return wrapper
def owner_uid(fname):
return os.stat(fname)[stat.ST_UID]
def owning_package(sack, fname):
matches = sack.query().filter(file=fname).run()
if matches:
return matches[0]
return None
def parse_args(args):
parser = dnfpluginscore.ArgumentParser(NeedsRestartingCommand.aliases[0])
parser.add_argument('-u', '--useronly', action='store_true',
help=_("only consider this user's processes"))
return parser.parse_args(args)
def print_cmd(pid):
cmdline = '/proc/%d/cmdline' % pid
with open(cmdline) as cmdline_file:
command = dnf.i18n.ucd(cmdline_file.read())
command = ' '.join(command.split('\000'))
print('%d : %s' % (pid, command))
def smap2opened_file(pid, line):
slash = line.find('/')
if slash < 0:
return None
if line.find('00:') >= 0:
# not a regular file
return None
fn = line[slash:].strip()
suffix_index = fn.rfind(' (deleted)')
if suffix_index < 0:
return OpenedFile(pid, fn, False)
else:
return OpenedFile(pid, fn[:suffix_index], True)
class OpenedFile(object):
RE_TRANSACTION_FILE = re.compile('^(.+);[0-9A-Fa-f]{8,}$')
def __init__(self, pid, name, deleted):
self.deleted = deleted
self.name = name
self.pid = pid
@property
def presumed_name(self):
"""Calculate the name of the file pre-transaction.
In case of a file that got deleted during the transactionm, possibly
just because of an upgrade to a newer version of the same file, RPM
renames the old file to the same name with a hexadecimal suffix just
before delting it.
"""
if self.deleted:
match = self.RE_TRANSACTION_FILE.match(self.name)
if match:
return match.group(1)
return self.name
class ProcessStart(object):
def __init__(self):
self.boot_time = self.get_boot_time()
self.sc_clk_tck = self.get_sc_clk_tck()
@staticmethod
def get_boot_time():
with open('/proc/stat') as stat_file:
for line in stat_file.readlines():
if not line.startswith('btime '):
continue
return int(line[len('btime '):].strip())
@staticmethod
def get_sc_clk_tck():
return os.sysconf(os.sysconf_names['SC_CLK_TCK'])
def __call__(self, pid):
stat_fn = '/proc/%d/stat' % pid
with open(stat_fn) as stat_file:
stats = stat_file.read().strip().split()
ticks_after_boot = int(stats[21])
secs_after_boot = ticks_after_boot // self.sc_clk_tck
return self.boot_time + secs_after_boot
class NeedsRestarting(dnf.Plugin):
name = 'needs-restarting'
def __init__(self, base, cli):
super(NeedsRestarting, self).__init__(base, cli)
if cli is None:
return
cli.register_command(NeedsRestartingCommand)
class NeedsRestartingCommand(dnf.cli.Command):
aliases = ('needs-restarting',)
summary = _('determine updated binaries that need restarting')
usage = ''
def configure(self, _):
demands = self.cli.demands
demands.sack_activation = True
def run(self, args):
opts = parse_args(args)
process_start = ProcessStart()
owning_pkg_fn = functools.partial(owning_package, self.base.sack)
owning_pkg_fn = memoize(owning_pkg_fn)
stale_pids = set()
uid = os.geteuid() if opts.useronly else None
for ofile in list_opened_files(uid):
pkg = owning_pkg_fn(ofile.presumed_name)
if pkg is None:
continue
if pkg.installtime > process_start(ofile.pid):
stale_pids.add(ofile.pid)
for pid in sorted(stale_pids):
print_cmd(pid)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.