code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from sst.actions import (
assert_title,
fails,
wait_for,
)
from u1testutils import mail
from u1testutils.sst import config
from acceptance import helpers
config.set_base_url_from_env()
PASSWORD = 'Admin007'
# 2) Create 2 accounts (A & B). In Account A add email address C and do not
# verify. In Account B add email address C and do not verify.
email_a_id = mail.make_unique_test_email_address()
email_b_id = mail.make_unique_test_email_address()
email_c_id = mail.make_unique_test_email_address()
helpers.register_account(email_a_id, password=PASSWORD)
vcode_x = helpers.add_email(email_c_id)
helpers.logout()
helpers.register_account(email_b_id, password=PASSWORD)
vcode_y = helpers.add_email(email_c_id)
# try x from a, should fail
helpers.logout()
helpers.login(email_a_id, PASSWORD)
# Trying and failing to use token X completely invalidates token X, even for
# account B (which now owns the token) later in this test.
# helpers.try_to_validate_email(email_c_id, vcode_x)
# fails(assert_title, 'Complete email address validation')
# try y from a, should fail
helpers.try_to_validate_email(email_c_id, vcode_y, finish_validation=False)
fails(assert_title, 'Complete email address validation')
# both x & y should work for b, but using one should kill the other.
# try x from b, should work
helpers.logout()
helpers.login(email_b_id, PASSWORD)
helpers.try_to_validate_email(email_c_id, vcode_x, finish_validation=False)
wait_for(assert_title, 'Complete email address validation')
# now, y from b should fail, because address C was already verified (but would
# normally work)
helpers.try_to_validate_email(email_c_id, vcode_y, finish_validation=False)
fails(assert_title, 'Complete email address validation')
| miing/mci_migo | acceptance/tests/emails/doubled_email.py | Python | agpl-3.0 | 1,736 |
"""
Views related to course groups functionality.
"""
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_POST
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseBadRequest
from django.views.decorators.http import require_http_methods
from util.json_request import expect_json, JsonResponse
from django.db import transaction
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext
import logging
import re
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.courses import get_course_with_access
from edxmako.shortcuts import render_to_response
from . import cohorts
from lms.djangoapps.django_comment_client.utils import get_discussion_category_map, get_discussion_categories_ids
from lms.djangoapps.django_comment_client.constants import TYPE_ENTRY
from .models import CourseUserGroup, CourseUserGroupPartitionGroup, CohortMembership
log = logging.getLogger(__name__)
def json_http_response(data):
"""
Return an HttpResponse with the data json-serialized and the right content
type header.
"""
return JsonResponse(data)
def split_by_comma_and_whitespace(cstr):
"""
Split a string both by commas and whitespace. Returns a list.
"""
return re.split(r'[\s,]+', cstr)
def link_cohort_to_partition_group(cohort, partition_id, group_id):
"""
Create cohort to partition_id/group_id link.
"""
CourseUserGroupPartitionGroup(
course_user_group=cohort,
partition_id=partition_id,
group_id=group_id,
).save()
def unlink_cohort_partition_group(cohort):
"""
Remove any existing cohort to partition_id/group_id link.
"""
CourseUserGroupPartitionGroup.objects.filter(course_user_group=cohort).delete()
# pylint: disable=invalid-name
def _get_course_cohort_settings_representation(course, course_cohort_settings):
"""
Returns a JSON representation of a course cohort settings.
"""
cohorted_course_wide_discussions, cohorted_inline_discussions = get_cohorted_discussions(
course, course_cohort_settings
)
return {
'id': course_cohort_settings.id,
'is_cohorted': course_cohort_settings.is_cohorted,
'cohorted_inline_discussions': cohorted_inline_discussions,
'cohorted_course_wide_discussions': cohorted_course_wide_discussions,
'always_cohort_inline_discussions': course_cohort_settings.always_cohort_inline_discussions,
}
def _get_cohort_representation(cohort, course):
"""
Returns a JSON representation of a cohort.
"""
group_id, partition_id = cohorts.get_group_info_for_cohort(cohort)
assignment_type = cohorts.get_assignment_type(cohort)
return {
'name': cohort.name,
'id': cohort.id,
'user_count': cohort.users.filter(courseenrollment__course_id=course.location.course_key,
courseenrollment__is_active=1).count(),
'assignment_type': assignment_type,
'user_partition_id': partition_id,
'group_id': group_id,
}
def get_cohorted_discussions(course, course_settings):
"""
Returns the course-wide and inline cohorted discussion ids separately.
"""
cohorted_course_wide_discussions = []
cohorted_inline_discussions = []
course_wide_discussions = [topic['id'] for __, topic in course.discussion_topics.items()]
all_discussions = get_discussion_categories_ids(course, None, include_all=True)
for cohorted_discussion_id in course_settings.cohorted_discussions:
if cohorted_discussion_id in course_wide_discussions:
cohorted_course_wide_discussions.append(cohorted_discussion_id)
elif cohorted_discussion_id in all_discussions:
cohorted_inline_discussions.append(cohorted_discussion_id)
return cohorted_course_wide_discussions, cohorted_inline_discussions
@require_http_methods(("GET", "PATCH"))
@ensure_csrf_cookie
@expect_json
@login_required
def course_cohort_settings_handler(request, course_key_string):
"""
The restful handler for cohort setting requests. Requires JSON.
This will raise 404 if user is not staff.
GET
Returns the JSON representation of cohort settings for the course.
PATCH
Updates the cohort settings for the course. Returns the JSON representation of updated settings.
"""
course_key = CourseKey.from_string(course_key_string)
course = get_course_with_access(request.user, 'staff', course_key)
cohort_settings = cohorts.get_course_cohort_settings(course_key)
if request.method == 'PATCH':
cohorted_course_wide_discussions, cohorted_inline_discussions = get_cohorted_discussions(
course, cohort_settings
)
settings_to_change = {}
if 'is_cohorted' in request.json:
settings_to_change['is_cohorted'] = request.json.get('is_cohorted')
if 'cohorted_course_wide_discussions' in request.json or 'cohorted_inline_discussions' in request.json:
cohorted_course_wide_discussions = request.json.get(
'cohorted_course_wide_discussions', cohorted_course_wide_discussions
)
cohorted_inline_discussions = request.json.get(
'cohorted_inline_discussions', cohorted_inline_discussions
)
settings_to_change['cohorted_discussions'] = cohorted_course_wide_discussions + cohorted_inline_discussions
if 'always_cohort_inline_discussions' in request.json:
settings_to_change['always_cohort_inline_discussions'] = request.json.get(
'always_cohort_inline_discussions'
)
if not settings_to_change:
return JsonResponse({"error": unicode("Bad Request")}, 400)
try:
cohort_settings = cohorts.set_course_cohort_settings(
course_key, **settings_to_change
)
except ValueError as err:
# Note: error message not translated because it is not exposed to the user (UI prevents this state).
return JsonResponse({"error": unicode(err)}, 400)
return JsonResponse(_get_course_cohort_settings_representation(course, cohort_settings))
@require_http_methods(("GET", "PUT", "POST", "PATCH"))
@ensure_csrf_cookie
@expect_json
@login_required
def cohort_handler(request, course_key_string, cohort_id=None):
"""
The restful handler for cohort requests. Requires JSON.
GET
If a cohort ID is specified, returns a JSON representation of the cohort
(name, id, user_count, assignment_type, user_partition_id, group_id).
If no cohort ID is specified, returns the JSON representation of all cohorts.
This is returned as a dict with the list of cohort information stored under the
key `cohorts`.
PUT or POST or PATCH
If a cohort ID is specified, updates the cohort with the specified ID. Currently the only
properties that can be updated are `name`, `user_partition_id` and `group_id`.
Returns the JSON representation of the updated cohort.
If no cohort ID is specified, creates a new cohort and returns the JSON representation of the updated
cohort.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string)
course = get_course_with_access(request.user, 'staff', course_key)
if request.method == 'GET':
if not cohort_id:
all_cohorts = [
_get_cohort_representation(c, course)
for c in cohorts.get_course_cohorts(course)
]
return JsonResponse({'cohorts': all_cohorts})
else:
cohort = cohorts.get_cohort_by_id(course_key, cohort_id)
return JsonResponse(_get_cohort_representation(cohort, course))
else:
name = request.json.get('name')
assignment_type = request.json.get('assignment_type')
if not name:
# Note: error message not translated because it is not exposed to the user (UI prevents this state).
return JsonResponse({"error": "Cohort name must be specified."}, 400)
if not assignment_type:
# Note: error message not translated because it is not exposed to the user (UI prevents this state).
return JsonResponse({"error": "Assignment type must be specified."}, 400)
# If cohort_id is specified, update the existing cohort. Otherwise, create a new cohort.
if cohort_id:
cohort = cohorts.get_cohort_by_id(course_key, cohort_id)
if name != cohort.name:
if cohorts.is_cohort_exists(course_key, name):
err_msg = ugettext("A cohort with the same name already exists.")
return JsonResponse({"error": unicode(err_msg)}, 400)
cohort.name = name
cohort.save()
try:
cohorts.set_assignment_type(cohort, assignment_type)
except ValueError as err:
return JsonResponse({"error": unicode(err)}, 400)
else:
try:
cohort = cohorts.add_cohort(course_key, name, assignment_type)
except ValueError as err:
return JsonResponse({"error": unicode(err)}, 400)
group_id = request.json.get('group_id')
if group_id is not None:
user_partition_id = request.json.get('user_partition_id')
if user_partition_id is None:
# Note: error message not translated because it is not exposed to the user (UI prevents this state).
return JsonResponse(
{"error": "If group_id is specified, user_partition_id must also be specified."}, 400
)
existing_group_id, existing_partition_id = cohorts.get_group_info_for_cohort(cohort)
if group_id != existing_group_id or user_partition_id != existing_partition_id:
unlink_cohort_partition_group(cohort)
link_cohort_to_partition_group(cohort, user_partition_id, group_id)
else:
# If group_id was specified as None, unlink the cohort if it previously was associated with a group.
existing_group_id, _ = cohorts.get_group_info_for_cohort(cohort)
if existing_group_id is not None:
unlink_cohort_partition_group(cohort)
return JsonResponse(_get_cohort_representation(cohort, course))
@ensure_csrf_cookie
def users_in_cohort(request, course_key_string, cohort_id):
"""
Return users in the cohort. Show up to 100 per page, and page
using the 'page' GET attribute in the call. Format:
Returns:
Json dump of dictionary in the following format:
{'success': True,
'page': page,
'num_pages': paginator.num_pages,
'users': [{'username': ..., 'email': ..., 'name': ...}]
}
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string)
get_course_with_access(request.user, 'staff', course_key)
# this will error if called with a non-int cohort_id. That's ok--it
# shouldn't happen for valid clients.
cohort = cohorts.get_cohort_by_id(course_key, int(cohort_id))
paginator = Paginator(cohort.users.all(), 100)
try:
page = int(request.GET.get('page'))
except (TypeError, ValueError):
# These strings aren't user-facing so don't translate them
return HttpResponseBadRequest('Requested page must be numeric')
else:
if page < 0:
return HttpResponseBadRequest('Requested page must be greater than zero')
try:
users = paginator.page(page)
except EmptyPage:
users = [] # When page > number of pages, return a blank page
user_info = [{'username': u.username,
'email': u.email,
'name': '{0} {1}'.format(u.first_name, u.last_name)}
for u in users]
return json_http_response({'success': True,
'page': page,
'num_pages': paginator.num_pages,
'users': user_info})
@transaction.non_atomic_requests
@ensure_csrf_cookie
@require_POST
def add_users_to_cohort(request, course_key_string, cohort_id):
"""
Return json dict of:
{'success': True,
'added': [{'username': ...,
'name': ...,
'email': ...}, ...],
'changed': [{'username': ...,
'name': ...,
'email': ...,
'previous_cohort': ...}, ...],
'present': [str1, str2, ...], # already there
'unknown': [str1, str2, ...]}
Raises Http404 if the cohort cannot be found for the given course.
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string)
get_course_with_access(request.user, 'staff', course_key)
try:
cohort = cohorts.get_cohort_by_id(course_key, cohort_id)
except CourseUserGroup.DoesNotExist:
raise Http404("Cohort (ID {cohort_id}) not found for {course_key_string}".format(
cohort_id=cohort_id,
course_key_string=course_key_string
))
users = request.POST.get('users', '')
added = []
changed = []
present = []
unknown = []
for username_or_email in split_by_comma_and_whitespace(users):
if not username_or_email:
continue
try:
(user, previous_cohort) = cohorts.add_user_to_cohort(cohort, username_or_email)
info = {
'username': user.username,
'email': user.email,
}
if previous_cohort:
info['previous_cohort'] = previous_cohort
changed.append(info)
else:
added.append(info)
except ValueError:
present.append(username_or_email)
except User.DoesNotExist:
unknown.append(username_or_email)
return json_http_response({'success': True,
'added': added,
'changed': changed,
'present': present,
'unknown': unknown})
@ensure_csrf_cookie
@require_POST
def remove_user_from_cohort(request, course_key_string, cohort_id):
"""
Expects 'username': username in POST data.
Return json dict of:
{'success': True} or
{'success': False,
'msg': error_msg}
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string)
get_course_with_access(request.user, 'staff', course_key)
username = request.POST.get('username')
if username is None:
return json_http_response({'success': False,
'msg': 'No username specified'})
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
log.debug('no user')
return json_http_response({'success': False,
'msg': "No user '{0}'".format(username)})
try:
membership = CohortMembership.objects.get(user=user, course_id=course_key)
membership.delete()
except CohortMembership.DoesNotExist:
pass
return json_http_response({'success': True})
def debug_cohort_mgmt(request, course_key_string):
"""
Debugging view for dev.
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string)
# add staff check to make sure it's safe if it's accidentally deployed.
get_course_with_access(request.user, 'staff', course_key)
context = {'cohorts_url': reverse(
'cohorts',
kwargs={'course_key': course_key.to_deprecated_string()}
)}
return render_to_response('/course_groups/debug.html', context)
@expect_json
@login_required
def cohort_discussion_topics(request, course_key_string):
"""
The handler for cohort discussion categories requests.
This will raise 404 if user is not staff.
Returns the JSON representation of discussion topics w.r.t categories for the course.
Example:
>>> example = {
>>> "course_wide_discussions": {
>>> "entries": {
>>> "General": {
>>> "sort_key": "General",
>>> "is_cohorted": True,
>>> "id": "i4x-edx-eiorguegnru-course-foobarbaz"
>>> }
>>> }
>>> "children": ["General", "entry"]
>>> },
>>> "inline_discussions" : {
>>> "subcategories": {
>>> "Getting Started": {
>>> "subcategories": {},
>>> "children": [
>>> ["Working with Videos", "entry"],
>>> ["Videos on edX", "entry"]
>>> ],
>>> "entries": {
>>> "Working with Videos": {
>>> "sort_key": None,
>>> "is_cohorted": False,
>>> "id": "d9f970a42067413cbb633f81cfb12604"
>>> },
>>> "Videos on edX": {
>>> "sort_key": None,
>>> "is_cohorted": False,
>>> "id": "98d8feb5971041a085512ae22b398613"
>>> }
>>> }
>>> },
>>> "children": ["Getting Started", "subcategory"]
>>> },
>>> }
>>> }
"""
course_key = CourseKey.from_string(course_key_string)
course = get_course_with_access(request.user, 'staff', course_key)
discussion_topics = {}
discussion_category_map = get_discussion_category_map(
course, request.user, cohorted_if_in_list=True, exclude_unstarted=False
)
# We extract the data for the course wide discussions from the category map.
course_wide_entries = discussion_category_map.pop('entries')
course_wide_children = []
inline_children = []
for name, c_type in discussion_category_map['children']:
if name in course_wide_entries and c_type == TYPE_ENTRY:
course_wide_children.append([name, c_type])
else:
inline_children.append([name, c_type])
discussion_topics['course_wide_discussions'] = {
'entries': course_wide_entries,
'children': course_wide_children
}
discussion_category_map['children'] = inline_children
discussion_topics['inline_discussions'] = discussion_category_map
return JsonResponse(discussion_topics)
| romain-li/edx-platform | openedx/core/djangoapps/course_groups/views.py | Python | agpl-3.0 | 19,604 |
#coding:utf-8
from openerp.osv import osv,fields
class rainsoft_stock_return(osv.Model):
_inherit='stock.return.picking'
_defaults={
'invoice_state': '2binvoiced',
}
| kevin8909/xjerp | openerp/addons/Rainsoft_Xiangjie/rainsoft_stock_return.py | Python | agpl-3.0 | 185 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from eve.utils import config
from copy import deepcopy
from superdesk import get_resource_service
from superdesk.resource import Resource
from superdesk.services import BaseService
from apps.archive.common import ITEM_UPDATE, get_user, ITEM_CREATE
from superdesk.metadata.item import CONTENT_STATE, ITEM_STATE
from superdesk.utc import utcnow
log = logging.getLogger(__name__)
fields_to_remove = ['_id', '_etag', 'versioncreator', 'originalcreator', 'versioncreated',
'_current_version', 'version', '_updated', 'lock_session', 'lock_user', 'lock_time', 'lock_action',
'force_unlock', '_created', 'guid', 'family_id', 'firstcreated', 'original_creator']
class ArchiveHistoryResource(Resource):
endpoint_name = 'archive_history'
resource_methods = ['GET']
item_methods = ['GET']
schema = {
'item_id': {'type': 'string'},
'user_id': Resource.rel('users', True),
'operation': {'type': 'string'},
'update': {'type': 'dict', 'nullable': True},
'version': {'type': 'integer'},
'original_item_id': {'type': 'string'}
}
mongo_indexes = {'item_id': ([('item_id', 1)], {'background': True})}
class ArchiveHistoryService(BaseService):
def on_item_updated(self, updates, original, operation=None):
item = deepcopy(original)
if updates:
item.update(updates)
self._save_history(item, updates, operation or ITEM_UPDATE)
def on_item_deleted(self, doc):
lookup = {'item_id': doc[config.ID_FIELD]}
self.delete(lookup=lookup)
def get_user_id(self, item):
user = get_user()
if user:
return user.get(config.ID_FIELD)
def _save_history(self, item, update, operation):
# in case of auto-routing, if the original_creator exists in our database
# then create item create record in the archive history.
if item.get(ITEM_STATE) == CONTENT_STATE.ROUTED and item.get('original_creator') \
and not item.get('original_id'):
user = get_resource_service('users').find_one(req=None, _id=item.get('original_creator'))
firstcreated = item.get('firstcreated', utcnow())
if user:
history = {
'item_id': item[config.ID_FIELD],
'user_id': user.get(config.ID_FIELD),
'operation': ITEM_CREATE,
'update': self._remove_unwanted_fields(update, item),
'version': item.get(config.VERSION, 1),
'_created': firstcreated,
'_updated': firstcreated
}
self.post([history])
history = {
'item_id': item[config.ID_FIELD],
'user_id': self.get_user_id(item),
'operation': operation,
'update': self._remove_unwanted_fields(update, item),
'version': item.get(config.VERSION, 1)
}
self.post([history])
def _remove_unwanted_fields(self, update, original):
if update:
update_copy = deepcopy(update)
for field in fields_to_remove:
update_copy.pop(field, None)
if original.get('sms_message') == update_copy.get('sms_message'):
update_copy.pop('sms_message', None)
return update_copy
| hlmnrmr/superdesk-core | apps/archive_history/service.py | Python | agpl-3.0 | 3,708 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from copy import copy
from eve.utils import config, ParsedRequest
from superdesk.utc import utcnow
from superdesk.services import BaseService
from superdesk.publish.formatters.ninjs_formatter import NINJSFormatter
from superdesk import get_resource_service
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE
logger = logging.getLogger('superdesk')
class PublishService(BaseService):
"""A service for publishing to the content api.
Serves mainly as a proxy to the data layer.
"""
formatter = NINJSFormatter()
subscriber = {'config': {}}
def publish(self, item, subscribers=[]):
"""Publish an item to content api.
This must be enabled via ``PUBLISH_TO_CONTENT_API`` setting.
:param item: item to publish
"""
if not self._filter_item(item):
doc = self.formatter._transform_to_ninjs(item, self.subscriber)
now = utcnow()
doc.setdefault('firstcreated', now)
doc.setdefault('versioncreated', now)
doc.setdefault(config.VERSION, item.get(config.VERSION, 1))
doc['subscribers'] = [str(sub['_id']) for sub in subscribers]
if 'evolvedfrom' in doc:
parent_item = self.find_one(req=None, _id=doc['evolvedfrom'])
if parent_item:
doc['ancestors'] = copy(parent_item.get('ancestors', []))
doc['ancestors'].append(doc['evolvedfrom'])
doc['bookmarks'] = parent_item.get('bookmarks', [])
else:
logger.warning("Failed to find evolvedfrom item '{}' for '{}'".format(
doc['evolvedfrom'], doc['guid'])
)
self._assign_associations(item, doc)
logger.info('publishing %s to %s' % (doc['guid'], subscribers))
_id = self._create_doc(doc)
if 'evolvedfrom' in doc and parent_item:
self.system_update(parent_item['_id'], {'nextversion': _id}, parent_item)
return _id
else:
return None
def create(self, docs, **kwargs):
ids = []
for doc in docs:
ids.append(self._create_doc(doc, **kwargs))
return ids
def _create_doc(self, doc, **kwargs):
"""Create a new item or update existing."""
item = copy(doc)
item.setdefault('_id', item.get('guid'))
_id = item[config.ID_FIELD] = item.pop('guid')
# merging the existing and new subscribers
original = self.find_one(req=None, _id=_id)
if original:
item['subscribers'] = list(set(original.get('subscribers', [])) | set(item.get('subscribers', [])))
self._process_associations(item, original)
self._create_version_doc(item)
if original:
self.update(_id, item, original)
return _id
else:
return super().create([item], **kwargs)[0]
def _create_version_doc(self, item):
"""
Store the item in the item version collection
:param item:
:return:
"""
version_item = copy(item)
version_item['_id_document'] = version_item.pop('_id')
get_resource_service('items_versions').create([version_item])
# if the update is a cancel we need to cancel all versions
if item.get('pubstatus', '') == 'canceled':
self._cancel_versions(item.get('_id'))
def _cancel_versions(self, doc_id):
"""
Given an id of a document set the pubstatus to canceled for all versions
:param doc_id:
:return:
"""
query = {'_id_document': doc_id}
update = {'pubstatus': 'canceled'}
for item in get_resource_service('items_versions').get_from_mongo(req=None, lookup=query):
if item.get('pubstatus') != 'canceled':
get_resource_service('items_versions').update(item['_id'], update, item)
def _filter_item(self, item):
"""
Filter the item out if it matches any API Block filter conditions
:param item:
:return: True of the item is blocked, False if it is OK to publish it on the API.
"""
# Get the API blocking Filters
req = ParsedRequest()
filter_conditions = list(get_resource_service('content_filters').get(req=req, lookup={'api_block': True}))
# No API blocking filters
if not filter_conditions:
return False
filter_service = get_resource_service('content_filters')
for fc in filter_conditions:
if filter_service.does_match(fc, item):
logger.info('API Filter block {} matched for item {}.'.format(fc, item.get(config.ID_FIELD)))
return True
return False
def _assign_associations(self, item, doc):
"""Assign Associations to published item
:param dict item: item being published
:param dit doc: ninjs documents
"""
if item[ITEM_TYPE] != CONTENT_TYPE.TEXT:
return
for assoc, assoc_item in (item.get('associations') or {}).items():
if not assoc_item:
continue
doc.get('associations', {}).get(assoc)['subscribers'] = list(map(str, assoc_item.get('subscribers') or []))
def _process_associations(self, updates, original):
"""Update associations using existing published item and ensure that associated item subscribers
are equal or subset of the parent subscribers.
:param updates:
:param original:
:return:
"""
if updates[ITEM_TYPE] != CONTENT_TYPE.TEXT:
return
subscribers = updates.get('subscribers') or []
for assoc, update_assoc in (updates.get('associations') or {}).items():
if not update_assoc:
continue
if original:
original_assoc = (original.get('associations') or {}).get(assoc)
if original_assoc and original_assoc.get(config.ID_FIELD) == update_assoc.get(config.ID_FIELD):
update_assoc['subscribers'] = list(set(original_assoc.get('subscribers') or []) |
set(update_assoc.get('subscribers') or []))
update_assoc['subscribers'] = list(set(update_assoc['subscribers']) & set(subscribers))
| marwoodandrew/superdesk-core | content_api/publish/service.py | Python | agpl-3.0 | 6,723 |
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Nicolas Badoux <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# pylint: disable=C8101
{
"name": "Survey Phone",
"summary": "Make the filling of survey by internal users easier.",
"version": "12.0.1.0.0",
"category": "Other",
"sequence": 150,
"author": "Compassion CH",
"license": "AGPL-3",
"website": "http://www.compassion.ch",
"depends": [
"survey", # oca_addons/survey
"base_phone", # oca_addons/connector-telephony
"survey",
"partner_contact_birthdate", # oca_addons/partner_contact
"advanced_translation",
],
"data": [
"views/survey_user_input_view.xml",
"views/survey_phone.xml",
"report/survey_report.xml",
],
"demo": [],
"installable": True,
"auto_install": False,
}
| CompassionCH/compassion-modules | survey_phone/__manifest__.py | Python | agpl-3.0 | 2,093 |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'kmol'
SITENAME = 'CDW11 網頁 (虎尾科大MDE)'
SITEURL = 'http://cdw11-40323200.rhcloud.com/static/'
# 不要用文章所在目錄作為類別
USE_FOLDER_AS_CATEGORY = False
#PATH = 'content'
#OUTPUT_PATH = 'output'
TIMEZONE = 'Asia/Taipei'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('pelican-bootstrap3', 'https://github.com/DandyDev/pelican-bootstrap3/'),
('pelican-plugins', 'https://github.com/getpelican/pelican-plugins'),
('Tipue search', 'https://github.com/Tipue/Tipue-Search'),)
# Social widget
#SOCIAL = (('You can add links in your config file', '#'),('Another social link', '#'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
# 必須絕對目錄或相對於設定檔案所在目錄
PLUGIN_PATHS = ['plugin']
PLUGINS = ['liquid_tags.notebook', 'summary', 'tipue_search', 'sitemap', 'render_math']
# for sitemap plugin
SITEMAP = {
'format': 'xml',
'priorities': {
'articles': 0.5,
'indexes': 0.5,
'pages': 0.5
},
'changefreqs': {
'articles': 'monthly',
'indexes': 'daily',
'pages': 'monthly'
}
}
# search is for Tipue search
DIRECT_TEMPLATES = (('index', 'tags', 'categories', 'authors', 'archives', 'search'))
# for pelican-bootstrap3 theme settings
#TAG_CLOUD_MAX_ITEMS = 50
DISPLAY_CATEGORIES_ON_SIDEBAR = True
DISPLAY_RECENT_POSTS_ON_SIDEBAR = True
DISPLAY_TAGS_ON_SIDEBAR = True
DISPLAY_TAGS_INLINE = True
TAGS_URL = "tags.html"
CATEGORIES_URL = "categories.html"
#SHOW_ARTICLE_AUTHOR = True
#MENUITEMS = [('Home', '/'), ('Archives', '/archives.html'), ('Search', '/search.html')]
# 希望將部份常用的 Javascript 最新版程式庫放到這裡, 可以透過 http://cadlab.mde.tw/post/js/ 呼叫
STATIC_PATHS = ['js'] | tsrnnash/bg8-cdw11 | static/pelicanconf.py | Python | agpl-3.0 | 2,146 |
import os
from flask import current_app
def get_absolute_file_path(relative_path: str) -> str:
return os.path.join(current_app.config['UPLOAD_FOLDER'], relative_path)
| SamR1/FitTrackee | fittrackee/workouts/utils_files.py | Python | agpl-3.0 | 174 |
# -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0611, W0614
import sys
import json
import lms.envs.common
from lms.envs.common import (
USE_TZ, TECH_SUPPORT_EMAIL, PLATFORM_NAME, BUGS_EMAIL, DOC_STORE_CONFIG, ALL_LANGUAGES
)
from path import path
from lms.lib.xblock.mixin import LmsBlockMixin
from cms.lib.xblock.mixin import CmsBlockMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.x_module import XModuleMixin, prefer_xmodules
from dealer.git import git
############################ FEATURE CONFIGURATION #############################
FEATURES = {
'USE_DJANGO_PIPELINE': True,
'GITHUB_PUSH': False,
'ENABLE_DISCUSSION_SERVICE': False,
'AUTH_USE_CERTIFICATES': False,
# email address for studio staff (eg to request course creation)
'STUDIO_REQUEST_EMAIL': '',
'STUDIO_NPS_SURVEY': True,
# Segment.io - must explicitly turn it on for production
'SEGMENT_IO': False,
# Enable URL that shows information about the status of various services
'ENABLE_SERVICE_STATUS': False,
# Don't autoplay videos for course authors
'AUTOPLAY_VIDEOS': False,
# If set to True, new Studio users won't be able to author courses unless
# edX has explicitly added them to the course creator group.
'ENABLE_CREATOR_GROUP': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# If set to True, Studio won't restrict the set of advanced components
# to just those pre-approved by edX
'ALLOW_ALL_ADVANCED_COMPONENTS': False,
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Allow editing of short description in course settings in cms
'EDITABLE_SHORT_DESCRIPTION': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles embargo functionality
'EMBARGO': False,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
}
ENABLE_JASMINE = False
########### course fields #############
# COURSE_EXTEND_FIELDS = lms.envs.common.COURSE_EXTEND_FIELDS
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/cms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
LMS_ROOT = REPO_ROOT / "lms"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
GITHUB_REPO_ROOT = ENV_ROOT / "data"
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
############################# WEB CONFIGURATION #############################
# This is where we stick our compiled template files.
from tempdir import mkdtemp_clean
MAKO_MODULE_DIR = mkdtemp_clean('mako')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_js' / 'templates',
]
for namespace, template_dirs in lms.envs.common.MAKO_TEMPLATES.iteritems():
MAKO_TEMPLATES['lms.' + namespace] = template_dirs
TEMPLATE_DIRS = MAKO_TEMPLATES['main']
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/signin'
LOGIN_URL = EDX_ROOT_URL + '/signin'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
'dealer.contrib.django.staff.context_processor', # access git revision
'contentstore.context_processors.doc_url',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
LMS_BASE = None
#################### CAPA External Code Evaluation #############################
XQUEUE_INTERFACE = {
'url': 'http://localhost:8888',
'django_auth': {'username': 'local',
'password': 'local'},
'basic_auth': None,
}
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'method_override.middleware.MethodOverrideMiddleware',
# Instead of AuthenticationMiddleware, we use a cache-backed version
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############# XBlock Configuration ##########
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, CmsBlockMixin, InheritanceMixin, XModuleMixin)
# Allow any XBlock in Studio
# You should also enable the ALLOW_ALL_ADVANCED_COMPONENTS feature flag, so that
# xblocks can be added via advanced settings
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############################ SIGNAL HANDLERS ################################
# This is imported to register the exception signal handling that logs exceptions
import monitoring.exceptions # noqa
############################ DJANGO_BUILTINS ################################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
# Site info
SITE_ID = 1
SITE_NAME = "0.0.0.0:8001"
HTTPS = 'on'
ROOT_URLCONF = 'cms.urls'
IGNORABLE_404_ENDS = ('favicon.ico')
# Email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.126.com'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_HOST_USER = 'xiaodunxin'
EMAIL_HOST_PASSWORD = '123456qr'
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/' + git.revision + "/"
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ENV_ROOT / "staticfiles" / git.revision
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
LMS_ROOT / "static",
# This is how you would use the textbook images locally
# ("book", ENV_ROOT / "book_images"),
]
# Locale/Internationalization
TIME_ZONE = 'Asia/Shanghai' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'zh-cn' # http://www.i18nguy.com/unicode/language-identifiers.html
SITE_NAME = 'mooc.diandiyun.com:18010'
LANGUAGES = lms.envs.common.LANGUAGES
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# If this is true, random scores will be generated for the purpose of debugging the profile graphs
GENERATE_PROFILE_SCORES = False
############################### Pipeline #######################################
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
from rooted_paths import rooted_glob
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/normalize.css',
'css/vendor/font-awesome.css',
'css/vendor/html5-input-polyfills/number-polyfill.css',
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'css/vendor/jquery.qtip.min.css',
'js/vendor/markitup/skins/simple/style.css',
'js/vendor/markitup/sets/wiki/style.css',
],
'output_filename': 'css/cms-style-vendor.css',
},
'style-app': {
'source_filenames': [
'sass/style-app.css',
],
'output_filename': 'css/cms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/style-app-extend1.css',
],
'output_filename': 'css/cms-style-app-extend1.css',
},
'style-xmodule': {
'source_filenames': [
'sass/style-xmodule.css',
],
'output_filename': 'css/cms-style-xmodule.css',
},
'style-calendar-vendor': {
'source_filenames': [
'css/vendor/fullcalendar/fullcalendar.css',
'css/vendor/fullcalendar/fullcalendar_s.css',
'css/vendor/fullcalendar/fullcalendar.print.css',
],
'output_filename': 'css/lms-style-fullcalendar-vendor.css',
}
}
fullcalendar_vendor_js = [
'js/vendor/fullcalendar/moment.min.js',
'js/vendor/fullcalendar/fullcalendar.min.js',
'js/vendor/fullcalendar/jquery-ui.custom.min.js',
'js/vendor/fullcalendar/lang-all.js',
]
# test_order: Determines the position of this chunk of javascript on
# the jasmine test page
PIPELINE_JS = {
'module-js': {
'source_filenames': (
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/modules/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/discussion/*.js')
),
'output_filename': 'js/cms-modules.js',
'test_order': 1
},
'calendar_vendor': {
'source_filenames': fullcalendar_vendor_js,
'output_filename': 'js/lms-fullcalendar_vendor.js',
'test_order': 0,
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.coffee.CoffeeScriptCompiler',
)
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"*.py",
"*.pyc"
# it would be nice if we could do, for example, "**/*.scss",
# but these strings get passed down to the `fnmatch` module,
# which doesn't support that. :(
# http://docs.python.org/2/library/fnmatch.html
"sass/*.scss",
"sass/*/*.scss",
"sass/*/*/*.scss",
"sass/*/*/*/*.scss",
"coffee/*.coffee",
"coffee/*/*.coffee",
"coffee/*/*/*.coffee",
"coffee/*/*/*/*.coffee",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############################## Video ##########################################
# URL to test YouTube availability
YOUTUBE_TEST_URL = 'https://gdata.youtube.com/feeds/api/videos/'
############################ APPS #####################################
INSTALLED_APPS = (
# Standard apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'djcelery',
'south',
'method_override',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# Testing
'django_nose',
# For CMS
'contentstore',
'course_creators',
'student', # misleading name due to sharing with lms
'course_groups', # not used in cms (yet), but tests run
# Tracking
'track',
'eventtracking.django',
# Monitoring
'datadog',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
# comment common
'django_comment_common',
# for course creator table
'django.contrib.admin',
# XBlocks containing migrations
'mentoring',
# for managing course modes
'course_modes',
# Dark-launching languages
'dark_lang',
# Student identity reverification
'reverification',
# User preferences
'user_api',
'django_openid_auth',
'embargo',
)
################# EDX MARKETING SITE ##################################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
}
COURSES_WITH_UNSAFE_CODE = []
############################## EVENT TRACKING #################################
TRACK_MAX_EVENT = 10000
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
TRACKING_ENABLED = True
# Current youtube api for requesting transcripts.
# for example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
YOUTUBE_API = {
'url': "http://video.google.com/timedtext",
'params': {'lang': 'en', 'v': 'set_youtube_id_of_11_symbols_here'}
}
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
### JSdraw (only installed in some instances)
try:
import edx_jsdraw
except ImportError:
pass
else:
INSTALLED_APPS += ('edx_jsdraw',)
############## SSO KEY ################
SSO_KEY = "SSOFOUNDER"
############## user auth ##############
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher'
)
############## SSO KEY ################
SSO_KEY = "SSOFOUNDER"
############## BUSINESS SYSTEM #################
XIAODUN_BACK_HOST = 'http://busi.xiaodun.cn/app'
############## video mettings ##################
VEDIO_MEETING_DOMAIN = "http://passport.guoshi.com/mp"
############## wenjuan domain ##################
WENJUAN_DOMAIN = "http://apitest.wenjuan.com:8000"
####### wenjuan secret_key #######
WENJUAN_SECKEY = "9d15a674a6e621058f1ea9171413b7c0"
| XiaodunServerGroup/ddyedx | cms/envs/common.py | Python | agpl-3.0 | 18,114 |
"""TOSEC models"""
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=256)
description = models.CharField(max_length=256)
category = models.CharField(max_length=256)
version = models.CharField(max_length=32)
author = models.CharField(max_length=128)
section = models.CharField(max_length=12, default='TOSEC')
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Categories'
ordering = ('name', )
class Game(models.Model):
category = models.ForeignKey(Category, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
description = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
ordering = ('category', 'name')
class Rom(models.Model):
game = models.ForeignKey(Game, related_name='roms', on_delete=models.CASCADE)
name = models.CharField(max_length=255)
size = models.IntegerField()
crc = models.CharField(max_length=16)
md5 = models.CharField(max_length=32)
sha1 = models.CharField(max_length=64)
def __str__(self):
return self.name
| lutris/website | tosec/models.py | Python | agpl-3.0 | 1,177 |
"""autogenerated by genpy from qbo_arduqbo/NoiseLevels.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class NoiseLevels(genpy.Message):
_md5sum = "e44910923ee5ef3281d32758158e1379"
_type = "qbo_arduqbo/NoiseLevels"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# Software License Agreement (LGPL v2.1 License)
#
# Copyright (c) 2012 Thecorpora, Inc.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License,
# or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.# Software License Agreement (LGPL v2.1 License)
#
# Copyright (c) 2012 Thecorpora, Inc.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License,
# or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
Header header
uint16 m0
uint16 m1
uint16 m2
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','m0','m1','m2']
_slot_types = ['std_msgs/Header','uint16','uint16','uint16']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,m0,m1,m2
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(NoiseLevels, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.m0 is None:
self.m0 = 0
if self.m1 is None:
self.m1 = 0
if self.m2 is None:
self.m2 = 0
else:
self.header = std_msgs.msg.Header()
self.m0 = 0
self.m1 = 0
self.m2 = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3H.pack(_x.m0, _x.m1, _x.m2))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 6
(_x.m0, _x.m1, _x.m2,) = _struct_3H.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3H.pack(_x.m0, _x.m1, _x.m2))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 6
(_x.m0, _x.m1, _x.m2,) = _struct_3H.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_3H = struct.Struct("<3H")
| HailStorm32/Q.bo_stacks | qbo_arduqbo/src/qbo_arduqbo/msg/_NoiseLevels.py | Python | lgpl-2.1 | 7,510 |
"""
This file shows how to use pyDatalog using facts stored in python objects.
It has 3 parts :
1. define python class and business rules
2. create python objects for 2 employees
3. Query the objects using the datalog engine
"""
from pyDatalog import pyDatalog
""" 1. define python class and business rules """
class Employee(pyDatalog.Mixin): # --> Employee inherits the pyDatalog capability to use logic clauses
def __init__(self, name, manager, salary): # method to initialize Employee instances
super(Employee, self).__init__() # calls the initialization method of the Mixin class
self.name = name
self.manager = manager # direct manager of the employee, or None
self.salary = salary # monthly salary of the employee
def __repr__(self): # specifies how to display an Employee
return self.name
@pyDatalog.program() # indicates that the following method contains pyDatalog clauses
def Employee(self):
# the salary class N of employee X is computed as a function of his/her salary
# this statement is a logic equality, not an assignment !
Employee.salary_class[X] = Employee.salary[X]//1000
# all the indirect managers Y of employee X are derived from his manager, recursively
Employee.indirect_manager(X,Y) <= (Employee.manager[X]==Y) & (Y != None)
Employee.indirect_manager(X,Y) <= (Employee.manager[X]==Z) & Employee.indirect_manager(Z,Y) & (Y != None)
# count the number of reports of X
(Employee.report_count[X] == len(Y)) <= Employee.indirect_manager(Y,X)
""" 2. create python objects for 3 employees """
# John is the manager of Mary, who is the manager of Sam
John = Employee('John', None, 6800)
Mary = Employee('Mary', John, 6300)
Sam = Employee('Sam', Mary, 5900)
""" 3. Query the objects using the datalog engine """
# the following python statements implicitly use the datalog clauses in the class definition
# What is the salary class of John ?
print(John.salary_class) # prints 6
# who has a salary of 6300 ?
pyDatalog.create_terms('X')
Employee.salary[X] == 6300 # notice the similarity to a pyDatalog query
print(X) # prints [Mary]
print(X.v()) # prints Mary
# who are the indirect managers of Mary ?
Employee.indirect_manager(Mary, X)
print(X) # prints [John]
# Who are the employees of John with a salary below 6000 ?
result = (Employee.salary[X] < 6000) & Employee.indirect_manager(X, John)
print(result) # Sam is in the result
print(X) # prints [Sam]
print((Employee.salary_class[X] == 5) & Employee.indirect_manager(X, John) >= X) # Sam
# verify that the manager of Mary is John
assert Employee.manager[Mary]==John
# who is his own indirect manager ?
Employee.indirect_manager(X, X)
print(X) # prints []
# who has 2 reports ?
Employee.report_count[X] == 2
print(X) # prints [John]
# what is the total salary of the employees of John ?
# note : it is better to place aggregation clauses in the class definition
pyDatalog.load("(Employee.budget[X] == sum(N, for_each=Y)) <= (Employee.indirect_manager(Y, X)) & (Employee.salary[Y]==N)")
Employee.budget[John]==X
print(X) # prints [12200]
# who has the lowest salary ?
pyDatalog.load("(lowest[1] == min(X, order_by=N)) <= (Employee.salary[X]==N)")
# must use ask() because inline queries cannot use unprefixed literals
print(pyDatalog.ask("lowest[1]==X")) # prints set([(1, 'Sam')])
| pcarbonn/pyDatalog | pyDatalog/examples/python.py | Python | lgpl-2.1 | 3,454 |
"""
ECE 4564
Final Project
Team: Immortal
Title: HomeGuard - Home Visitors Detection and Alert System
Filename: publisher.py
Members: Arun Rai, Mohammad Islam, and Yihan Pang
Date: 11/26/2014
---------------------------------------------------------------------
Description:
1. Receive host user information, and send it to the subscriber.
2. Receive visitors' message and send it to the subscriber.
3. Receive sensor reading, and send trigger signal to camera to the subscriber.
Network protocols: TCP/IP and AMQP
---------------------------------------------------------------------
"""
#!/usr/bin/python
import sys
import threading
from infoSetup import infoSetup
from getSensorData import getSensorData
from setVisitorMessage import setVisitorMessage
import time
import signal
import socket
import json
""" Default host IP address and port number """
HOST = "127.0.0.1"
PORT = 9000
class HostInformation:
def __init__(self):
self.senderNumber = '';
self.receiverNumber = ''
self.receiverEmail = '[email protected]';
self.loop = True;
self.s = '';
self.emailOnly = True;
self.smsOnly = False;
self.both = False;
self.Message = {};
self.msgSignal = False;
""" Set receiver's phone number """
def setPhoneNumber(self, number):
self.receiverNumber = number;
self.smsOnly = True;
self.emailOnly = False;
self.both = False;
""" Set receiver's email id """
def setEmail(self, email):
self.receiverEmail = email;
self.emailOnly = True;
self.smsOnly = False;
self.both = False;
""" Set both email and phone nubmer """
def setBoth(self, email, phone):
self.receiverEmail = email;
self.receiverNumber = phone;
self.both = True;
self.emailOnly = False;
self.smsOnly = False;
def getReceiverNumber(self):
return self.receiverNumber;
def getReceiverEmail(self):
return self.receiverEmail;
def getSenderEmail(self):
return self.senderEmail;
def getSenderEmailPass(self):
return self.senderPassword;
def messageInEmail(self):
return self.emailOnly;
def messageInSms(self):
return self.smsOnly;
def messageInBoth(self):
return self.both;
def setLoopState(self, signal=None, frame=None):
print 'Gracefully closing the socket .................'
self.loop = False;
self.s.close();
""" Here, the message is of type dictionary """
def setMessage(self, message):
self.Message = message;
def getMessage(self):
return self.Message;
def setMessageSignal(self, sig):
self.msgSignal = sig;
def getMessageSignal(self):
return self.msgSignal;
def getLoopState(self):
return self.loop;
""" The function is called before the program exits
for gracefully closing the socket when user enters
Ctrl + c. """
def closeSocket(self, s):
self.s = s;
def main():
setHostInfo = HostInformation()
""" Sensor thread: sensor reading is performed"""
sensorThread = threading.Thread(target = getSensorData, args = [setHostInfo,]);
""" start the thread """
sensorThread.start()
""" Setup signal handlers to shutdown this app when SIGINT
or SIGTERM is sent to this app """
signal_num = signal.SIGINT
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
""" main thread """
while setHostInfo.getLoopState():
try:
setHostInfo.closeSocket(s);
signal.signal(signal_num, setHostInfo.setLoopState)
signal_num = signal.SIGTERM
signal.signal(signal_num, setHostInfo.setLoopState)
except ValueError as error1:
print "Warning: Greceful shutdown may not be possible: Unsupported"
print "Signal: " + signal_num
conn, addr = s.accept()
message = conn.recv(1024)
if len(message) > 3:
if message[0] == '$' and message[1] == '$' and message[2] == '$':
setVisitorMessage(setHostInfo, message)
else:
infoSetup(setHostInfo, message)
except socket.error, se:
print 'connection failed/socket closed. \n', se
if s:
s.close();
sensorThread.join()
if __name__ == '__main__':
main();
| raiarun/HomeGuard | publisher.py | Python | lgpl-2.1 | 4,016 |
#!/usr/bin/env python
#
# Generated Thu Jul 22 14:11:34 2010 by generateDS.py.
#
import sys
import getopt
from xml.dom import minidom
from xml.dom import Node
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
s1 = inStr
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('"', '"')
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
#
# Data representation classes.
#
class GenerateModel:
subclass = None
def __init__(self, Module=None, PythonExport=None):
if Module is None:
self.Module = []
else:
self.Module = Module
if PythonExport is None:
self.PythonExport = []
else:
self.PythonExport = PythonExport
def factory(*args_, **kwargs_):
if GenerateModel.subclass:
return GenerateModel.subclass(*args_, **kwargs_)
else:
return GenerateModel(*args_, **kwargs_)
factory = staticmethod(factory)
def getModule(self): return self.Module
def setModule(self, Module): self.Module = Module
def addModule(self, value): self.Module.append(value)
def insertModule(self, index, value): self.Module[index] = value
def getPythonexport(self): return self.PythonExport
def setPythonexport(self, PythonExport): self.PythonExport = PythonExport
def addPythonexport(self, value): self.PythonExport.append(value)
def insertPythonexport(self, index, value): self.PythonExport[index] = value
def export(self, outfile, level, name_='GenerateModel'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='GenerateModel'):
pass
def exportChildren(self, outfile, level, name_='GenerateModel'):
for Module_ in self.getModule():
Module_.export(outfile, level)
for PythonExport_ in self.getPythonexport():
PythonExport_.export(outfile, level)
def exportLiteral(self, outfile, level, name_='GenerateModel'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Module=[\n')
level += 1
for Module in self.Module:
showIndent(outfile, level)
outfile.write('Module(\n')
Module.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('PythonExport=[\n')
level += 1
for PythonExport in self.PythonExport:
showIndent(outfile, level)
outfile.write('PythonExport(\n')
PythonExport.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Module':
obj_ = Module.factory()
obj_.build(child_)
self.Module.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'PythonExport':
obj_ = PythonExport.factory()
obj_.build(child_)
self.PythonExport.append(obj_)
# end class GenerateModel
class PythonExport:
subclass = None
def __init__(self, FatherNamespace='', RichCompare=0, Name='', Reference=0, FatherInclude='', Father='', Namespace='', Twin='', Constructor=0, TwinPointer='', Include='', NumberProtocol=0, Delete=0, Documentation=None, Methode=None, Attribute=None, Sequence=None, CustomAttributes='', ClassDeclarations='', Initialization=0):
self.FatherNamespace = FatherNamespace
self.RichCompare = RichCompare
self.Name = Name
self.Reference = Reference
self.FatherInclude = FatherInclude
self.Father = Father
self.Namespace = Namespace
self.Twin = Twin
self.Constructor = Constructor
self.TwinPointer = TwinPointer
self.Include = Include
self.NumberProtocol = NumberProtocol
self.Delete = Delete
self.Documentation = Documentation
self.Initialization = Initialization
if Methode is None:
self.Methode = []
else:
self.Methode = Methode
if Attribute is None:
self.Attribute = []
else:
self.Attribute = Attribute
self.Sequence = Sequence
self.CustomAttributes = CustomAttributes
self.ClassDeclarations = ClassDeclarations
def factory(*args_, **kwargs_):
if PythonExport.subclass:
return PythonExport.subclass(*args_, **kwargs_)
else:
return PythonExport(*args_, **kwargs_)
factory = staticmethod(factory)
def getInitialization(self): return self.Initialization
def setInitialization(self, Initialization): self.Initialization = Initialization
def getDocumentation(self): return self.Documentation
def setDocumentation(self, Documentation): self.Documentation = Documentation
def getMethode(self): return self.Methode
def setMethode(self, Methode): self.Methode = Methode
def addMethode(self, value): self.Methode.append(value)
def insertMethode(self, index, value): self.Methode[index] = value
def getAttribute(self): return self.Attribute
def setAttribute(self, Attribute): self.Attribute = Attribute
def addAttribute(self, value): self.Attribute.append(value)
def insertAttribute(self, index, value): self.Attribute[index] = value
def getSequence(self): return self.Sequence
def setSequence(self, Sequence): self.Sequence = Sequence
def getCustomattributes(self): return self.CustomAttributes
def setCustomattributes(self, CustomAttributes): self.CustomAttributes = CustomAttributes
def getClassdeclarations(self): return self.ClassDeclarations
def setClassdeclarations(self, ClassDeclarations): self.ClassDeclarations = ClassDeclarations
def getFathernamespace(self): return self.FatherNamespace
def setFathernamespace(self, FatherNamespace): self.FatherNamespace = FatherNamespace
def getRichcompare(self): return self.RichCompare
def setRichcompare(self, RichCompare): self.RichCompare = RichCompare
def getName(self): return self.Name
def setName(self, Name): self.Name = Name
def getReference(self): return self.Reference
def setReference(self, Reference): self.Reference = Reference
def getFatherinclude(self): return self.FatherInclude
def setFatherinclude(self, FatherInclude): self.FatherInclude = FatherInclude
def getFather(self): return self.Father
def setFather(self, Father): self.Father = Father
def getNamespace(self): return self.Namespace
def setNamespace(self, Namespace): self.Namespace = Namespace
def getTwin(self): return self.Twin
def setTwin(self, Twin): self.Twin = Twin
def getConstructor(self): return self.Constructor
def setConstructor(self, Constructor): self.Constructor = Constructor
def getTwinpointer(self): return self.TwinPointer
def setTwinpointer(self, TwinPointer): self.TwinPointer = TwinPointer
def getInclude(self): return self.Include
def setInclude(self, Include): self.Include = Include
def getNumberprotocol(self): return self.NumberProtocol
def setNumberprotocol(self, NumberProtocol): self.NumberProtocol = NumberProtocol
def getDelete(self): return self.Delete
def setDelete(self, Delete): self.Delete = Delete
def export(self, outfile, level, name_='PythonExport'):
showIndent(outfile, level)
outfile.write('<%s' % (name_, ))
self.exportAttributes(outfile, level, name_='PythonExport')
outfile.write('>\n')
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='PythonExport'):
outfile.write(' FatherNamespace="%s"' % (self.getFathernamespace(), ))
if self.getRichcompare() is not None:
outfile.write(' RichCompare="%s"' % (self.getRichcompare(), ))
outfile.write(' Name="%s"' % (self.getName(), ))
if self.getReference() is not None:
outfile.write(' Reference="%s"' % (self.getReference(), ))
outfile.write(' FatherInclude="%s"' % (self.getFatherinclude(), ))
outfile.write(' Father="%s"' % (self.getFather(), ))
outfile.write(' Namespace="%s"' % (self.getNamespace(), ))
outfile.write(' Twin="%s"' % (self.getTwin(), ))
if self.getConstructor() is not None:
outfile.write(' Constructor="%s"' % (self.getConstructor(), ))
if self.getInitialization() is not None:
outfile.write(' Initialization="%s"' % (self.getInitialization(), ))
outfile.write(' TwinPointer="%s"' % (self.getTwinpointer(), ))
outfile.write(' Include="%s"' % (self.getInclude(), ))
if self.getNumberprotocol() is not None:
outfile.write(' NumberProtocol="%s"' % (self.getNumberprotocol(), ))
if self.getDelete() is not None:
outfile.write(' Delete="%s"' % (self.getDelete(), ))
def exportChildren(self, outfile, level, name_='PythonExport'):
if self.Documentation:
self.Documentation.export(outfile, level)
for Methode_ in self.getMethode():
Methode_.export(outfile, level)
for Attribute_ in self.getAttribute():
Attribute_.export(outfile, level)
if self.Sequence:
self.Sequence.export(outfile, level)
showIndent(outfile, level)
outfile.write('<CustomAttributes>%s</CustomAttributes>\n' % quote_xml(self.getCustomattributes()))
showIndent(outfile, level)
outfile.write('<ClassDeclarations>%s</ClassDeclarations>\n' % quote_xml(self.getClassdeclarations()))
def exportLiteral(self, outfile, level, name_='PythonExport'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('FatherNamespace = "%s",\n' % (self.getFathernamespace(),))
showIndent(outfile, level)
outfile.write('RichCompare = "%s",\n' % (self.getRichcompare(),))
showIndent(outfile, level)
outfile.write('Name = "%s",\n' % (self.getName(),))
showIndent(outfile, level)
outfile.write('Reference = "%s",\n' % (self.getReference(),))
showIndent(outfile, level)
outfile.write('FatherInclude = "%s",\n' % (self.getFatherinclude(),))
showIndent(outfile, level)
outfile.write('Father = "%s",\n' % (self.getFather(),))
showIndent(outfile, level)
outfile.write('Namespace = "%s",\n' % (self.getNamespace(),))
showIndent(outfile, level)
outfile.write('Twin = "%s",\n' % (self.getTwin(),))
showIndent(outfile, level)
outfile.write('Constructor = "%s",\n' % (self.getConstructor(),))
showIndent(outfile, level)
outfile.write('Initialization = "%s",\n' % (self.getInitialization(),))
outfile.write('TwinPointer = "%s",\n' % (self.getTwinpointer(),))
showIndent(outfile, level)
outfile.write('Include = "%s",\n' % (self.getInclude(),))
showIndent(outfile, level)
outfile.write('NumberProtocol = "%s",\n' % (self.getNumberprotocol(),))
showIndent(outfile, level)
outfile.write('Delete = "%s",\n' % (self.getDelete(),))
def exportLiteralChildren(self, outfile, level, name_):
if self.Documentation:
showIndent(outfile, level)
outfile.write('Documentation=Documentation(\n')
self.Documentation.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Methode=[\n')
level += 1
for Methode in self.Methode:
showIndent(outfile, level)
outfile.write('Methode(\n')
Methode.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Attribute=[\n')
level += 1
for Attribute in self.Attribute:
showIndent(outfile, level)
outfile.write('Attribute(\n')
Attribute.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.Sequence:
showIndent(outfile, level)
outfile.write('Sequence=Sequence(\n')
self.Sequence.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('CustomAttributes=%s,\n' % quote_python(self.getCustomattributes()))
showIndent(outfile, level)
outfile.write('ClassDeclarations=%s,\n' % quote_python(self.getClassdeclarations()))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('FatherNamespace'):
self.FatherNamespace = attrs.get('FatherNamespace').value
if attrs.get('RichCompare'):
if attrs.get('RichCompare').value in ('true', '1'):
self.RichCompare = 1
elif attrs.get('RichCompare').value in ('false', '0'):
self.RichCompare = 0
else:
raise ValueError('Bad boolean attribute (RichCompare)')
if attrs.get('Name'):
self.Name = attrs.get('Name').value
if attrs.get('Reference'):
if attrs.get('Reference').value in ('true', '1'):
self.Reference = 1
elif attrs.get('Reference').value in ('false', '0'):
self.Reference = 0
else:
raise ValueError('Bad boolean attribute (Reference)')
if attrs.get('FatherInclude'):
self.FatherInclude = attrs.get('FatherInclude').value
if attrs.get('Father'):
self.Father = attrs.get('Father').value
if attrs.get('Namespace'):
self.Namespace = attrs.get('Namespace').value
if attrs.get('Twin'):
self.Twin = attrs.get('Twin').value
if attrs.get('Constructor'):
if attrs.get('Constructor').value in ('true', '1'):
self.Constructor = 1
elif attrs.get('Constructor').value in ('false', '0'):
self.Constructor = 0
else:
raise ValueError('Bad boolean attribute (Constructor)')
if attrs.get('Initialization'):
if attrs.get('Initialization').value in ('true', '1'):
self.Initialization = 1
elif attrs.get('Initialization').value in ('false', '0'):
self.Initialization = 0
else:
raise ValueError('Bad boolean attribute (Initialization)')
if attrs.get('TwinPointer'):
self.TwinPointer = attrs.get('TwinPointer').value
if attrs.get('Include'):
self.Include = attrs.get('Include').value
if attrs.get('NumberProtocol'):
if attrs.get('NumberProtocol').value in ('true', '1'):
self.NumberProtocol = 1
elif attrs.get('NumberProtocol').value in ('false', '0'):
self.NumberProtocol = 0
else:
raise ValueError('Bad boolean attribute (NumberProtocol)')
if attrs.get('Delete'):
if attrs.get('Delete').value in ('true', '1'):
self.Delete = 1
elif attrs.get('Delete').value in ('false', '0'):
self.Delete = 0
else:
raise ValueError('Bad boolean attribute (Delete)')
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Documentation':
obj_ = Documentation.factory()
obj_.build(child_)
self.setDocumentation(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Methode':
obj_ = Methode.factory()
obj_.build(child_)
self.Methode.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Attribute':
obj_ = Attribute.factory()
obj_.build(child_)
self.Attribute.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Sequence':
obj_ = Sequence.factory()
obj_.build(child_)
self.setSequence(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'CustomAttributes':
CustomAttributes_ = ''
for text__content_ in child_.childNodes:
CustomAttributes_ += text__content_.nodeValue
self.CustomAttributes = CustomAttributes_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'ClassDeclarations':
ClassDeclarations_ = ''
for text__content_ in child_.childNodes:
ClassDeclarations_ += text__content_.nodeValue
self.ClassDeclarations = ClassDeclarations_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Initialization':
obj_ = Documentation.factory()
obj_.build(child_)
self.setDocumentation(obj_)
# end class PythonExport
class Methode:
subclass = None
def __init__(self, Const=0, Name='', Keyword=0, Documentation=None, Parameter=None):
self.Const = Const
self.Name = Name
self.Keyword = Keyword
self.Documentation = Documentation
if Parameter is None:
self.Parameter = []
else:
self.Parameter = Parameter
def factory(*args_, **kwargs_):
if Methode.subclass:
return Methode.subclass(*args_, **kwargs_)
else:
return Methode(*args_, **kwargs_)
factory = staticmethod(factory)
def getDocumentation(self): return self.Documentation
def setDocumentation(self, Documentation): self.Documentation = Documentation
def getParameter(self): return self.Parameter
def setParameter(self, Parameter): self.Parameter = Parameter
def addParameter(self, value): self.Parameter.append(value)
def insertParameter(self, index, value): self.Parameter[index] = value
def getConst(self): return self.Const
def setConst(self, Const): self.Const = Const
def getName(self): return self.Name
def setName(self, Name): self.Name = Name
def getKeyword(self): return self.Keyword
def setKeyword(self, Keyword): self.Keyword = Keyword
def export(self, outfile, level, name_='Methode'):
showIndent(outfile, level)
outfile.write('<%s' % (name_, ))
self.exportAttributes(outfile, level, name_='Methode')
outfile.write('>\n')
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Methode'):
if self.getConst() is not None:
outfile.write(' Const="%s"' % (self.getConst(), ))
outfile.write(' Name="%s"' % (self.getName(), ))
if self.getKeyword() is not None:
outfile.write(' Keyword="%s"' % (self.getKeyword(), ))
def exportChildren(self, outfile, level, name_='Methode'):
if self.Documentation:
self.Documentation.export(outfile, level)
for Parameter_ in self.getParameter():
Parameter_.export(outfile, level)
def exportLiteral(self, outfile, level, name_='Methode'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Const = "%s",\n' % (self.getConst(),))
showIndent(outfile, level)
outfile.write('Name = "%s",\n' % (self.getName(),))
showIndent(outfile, level)
outfile.write('Keyword = "%s",\n' % (self.getKeyword(),))
def exportLiteralChildren(self, outfile, level, name_):
if self.Documentation:
showIndent(outfile, level)
outfile.write('Documentation=Documentation(\n')
self.Documentation.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Parameter=[\n')
level += 1
for Parameter in self.Parameter:
showIndent(outfile, level)
outfile.write('Parameter(\n')
Parameter.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('Const'):
if attrs.get('Const').value in ('true', '1'):
self.Const = 1
elif attrs.get('Const').value in ('false', '0'):
self.Const = 0
else:
raise ValueError('Bad boolean attribute (Const)')
if attrs.get('Name'):
self.Name = attrs.get('Name').value
if attrs.get('Keyword'):
if attrs.get('Keyword').value in ('true', '1'):
self.Keyword = 1
elif attrs.get('Keyword').value in ('false', '0'):
self.Keyword = 0
else:
raise ValueError('Bad boolean attribute (Keyword)')
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Documentation':
obj_ = Documentation.factory()
obj_.build(child_)
self.setDocumentation(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Parameter':
obj_ = Parameter.factory()
obj_.build(child_)
self.Parameter.append(obj_)
# end class Methode
class Attribute:
subclass = None
def __init__(self, ReadOnly=0, Name='', Documentation=None, Parameter=None):
self.ReadOnly = ReadOnly
self.Name = Name
self.Documentation = Documentation
self.Parameter = Parameter
def factory(*args_, **kwargs_):
if Attribute.subclass:
return Attribute.subclass(*args_, **kwargs_)
else:
return Attribute(*args_, **kwargs_)
factory = staticmethod(factory)
def getDocumentation(self): return self.Documentation
def setDocumentation(self, Documentation): self.Documentation = Documentation
def getParameter(self): return self.Parameter
def setParameter(self, Parameter): self.Parameter = Parameter
def getReadonly(self): return self.ReadOnly
def setReadonly(self, ReadOnly): self.ReadOnly = ReadOnly
def getName(self): return self.Name
def setName(self, Name): self.Name = Name
def export(self, outfile, level, name_='Attribute'):
showIndent(outfile, level)
outfile.write('<%s' % (name_, ))
self.exportAttributes(outfile, level, name_='Attribute')
outfile.write('>\n')
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Attribute'):
outfile.write(' ReadOnly="%s"' % (self.getReadonly(), ))
outfile.write(' Name="%s"' % (self.getName(), ))
def exportChildren(self, outfile, level, name_='Attribute'):
if self.Documentation:
self.Documentation.export(outfile, level)
if self.Parameter:
self.Parameter.export(outfile, level)
def exportLiteral(self, outfile, level, name_='Attribute'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('ReadOnly = "%s",\n' % (self.getReadonly(),))
showIndent(outfile, level)
outfile.write('Name = "%s",\n' % (self.getName(),))
def exportLiteralChildren(self, outfile, level, name_):
if self.Documentation:
showIndent(outfile, level)
outfile.write('Documentation=Documentation(\n')
self.Documentation.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Parameter:
showIndent(outfile, level)
outfile.write('Parameter=Parameter(\n')
self.Parameter.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('ReadOnly'):
if attrs.get('ReadOnly').value in ('true', '1'):
self.ReadOnly = 1
elif attrs.get('ReadOnly').value in ('false', '0'):
self.ReadOnly = 0
else:
raise ValueError('Bad boolean attribute (ReadOnly)')
if attrs.get('Name'):
self.Name = attrs.get('Name').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Documentation':
obj_ = Documentation.factory()
obj_.build(child_)
self.setDocumentation(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Parameter':
obj_ = Parameter.factory()
obj_.build(child_)
self.setParameter(obj_)
# end class Attribute
class Sequence:
subclass = None
def __init__(self, sq_slice=0, sq_item=0, sq_concat=0, sq_inplace_repeat=0, sq_ass_slice=0, sq_contains=0, sq_ass_item=0, sq_repeat=0, sq_length=0, sq_inplace_concat=0, valueOf_=''):
self.sq_slice = sq_slice
self.sq_item = sq_item
self.sq_concat = sq_concat
self.sq_inplace_repeat = sq_inplace_repeat
self.sq_ass_slice = sq_ass_slice
self.sq_contains = sq_contains
self.sq_ass_item = sq_ass_item
self.sq_repeat = sq_repeat
self.sq_length = sq_length
self.sq_inplace_concat = sq_inplace_concat
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if Sequence.subclass:
return Sequence.subclass(*args_, **kwargs_)
else:
return Sequence(*args_, **kwargs_)
factory = staticmethod(factory)
def getSq_slice(self): return self.sq_slice
def setSq_slice(self, sq_slice): self.sq_slice = sq_slice
def getSq_item(self): return self.sq_item
def setSq_item(self, sq_item): self.sq_item = sq_item
def getSq_concat(self): return self.sq_concat
def setSq_concat(self, sq_concat): self.sq_concat = sq_concat
def getSq_inplace_repeat(self): return self.sq_inplace_repeat
def setSq_inplace_repeat(self, sq_inplace_repeat): self.sq_inplace_repeat = sq_inplace_repeat
def getSq_ass_slice(self): return self.sq_ass_slice
def setSq_ass_slice(self, sq_ass_slice): self.sq_ass_slice = sq_ass_slice
def getSq_contains(self): return self.sq_contains
def setSq_contains(self, sq_contains): self.sq_contains = sq_contains
def getSq_ass_item(self): return self.sq_ass_item
def setSq_ass_item(self, sq_ass_item): self.sq_ass_item = sq_ass_item
def getSq_repeat(self): return self.sq_repeat
def setSq_repeat(self, sq_repeat): self.sq_repeat = sq_repeat
def getSq_length(self): return self.sq_length
def setSq_length(self, sq_length): self.sq_length = sq_length
def getSq_inplace_concat(self): return self.sq_inplace_concat
def setSq_inplace_concat(self, sq_inplace_concat): self.sq_inplace_concat = sq_inplace_concat
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, name_='Sequence'):
showIndent(outfile, level)
outfile.write('<%s' % (name_, ))
self.exportAttributes(outfile, level, name_='Sequence')
outfile.write('>\n')
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Sequence'):
outfile.write(' sq_slice="%s"' % (self.getSq_slice(), ))
outfile.write(' sq_item="%s"' % (self.getSq_item(), ))
outfile.write(' sq_concat="%s"' % (self.getSq_concat(), ))
outfile.write(' sq_inplace_repeat="%s"' % (self.getSq_inplace_repeat(), ))
outfile.write(' sq_ass_slice="%s"' % (self.getSq_ass_slice(), ))
outfile.write(' sq_contains="%s"' % (self.getSq_contains(), ))
outfile.write(' sq_ass_item="%s"' % (self.getSq_ass_item(), ))
outfile.write(' sq_repeat="%s"' % (self.getSq_repeat(), ))
outfile.write(' sq_length="%s"' % (self.getSq_length(), ))
outfile.write(' sq_inplace_concat="%s"' % (self.getSq_inplace_concat(), ))
def exportChildren(self, outfile, level, name_='Sequence'):
outfile.write(self.valueOf_)
def exportLiteral(self, outfile, level, name_='Sequence'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('sq_slice = "%s",\n' % (self.getSq_slice(),))
showIndent(outfile, level)
outfile.write('sq_item = "%s",\n' % (self.getSq_item(),))
showIndent(outfile, level)
outfile.write('sq_concat = "%s",\n' % (self.getSq_concat(),))
showIndent(outfile, level)
outfile.write('sq_inplace_repeat = "%s",\n' % (self.getSq_inplace_repeat(),))
showIndent(outfile, level)
outfile.write('sq_ass_slice = "%s",\n' % (self.getSq_ass_slice(),))
showIndent(outfile, level)
outfile.write('sq_contains = "%s",\n' % (self.getSq_contains(),))
showIndent(outfile, level)
outfile.write('sq_ass_item = "%s",\n' % (self.getSq_ass_item(),))
showIndent(outfile, level)
outfile.write('sq_repeat = "%s",\n' % (self.getSq_repeat(),))
showIndent(outfile, level)
outfile.write('sq_length = "%s",\n' % (self.getSq_length(),))
showIndent(outfile, level)
outfile.write('sq_inplace_concat = "%s",\n' % (self.getSq_inplace_concat(),))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('sq_slice'):
if attrs.get('sq_slice').value in ('true', '1'):
self.sq_slice = 1
elif attrs.get('sq_slice').value in ('false', '0'):
self.sq_slice = 0
else:
raise ValueError('Bad boolean attribute (sq_slice)')
if attrs.get('sq_item'):
if attrs.get('sq_item').value in ('true', '1'):
self.sq_item = 1
elif attrs.get('sq_item').value in ('false', '0'):
self.sq_item = 0
else:
raise ValueError('Bad boolean attribute (sq_item)')
if attrs.get('sq_concat'):
if attrs.get('sq_concat').value in ('true', '1'):
self.sq_concat = 1
elif attrs.get('sq_concat').value in ('false', '0'):
self.sq_concat = 0
else:
raise ValueError('Bad boolean attribute (sq_concat)')
if attrs.get('sq_inplace_repeat'):
if attrs.get('sq_inplace_repeat').value in ('true', '1'):
self.sq_inplace_repeat = 1
elif attrs.get('sq_inplace_repeat').value in ('false', '0'):
self.sq_inplace_repeat = 0
else:
raise ValueError('Bad boolean attribute (sq_inplace_repeat)')
if attrs.get('sq_ass_slice'):
if attrs.get('sq_ass_slice').value in ('true', '1'):
self.sq_ass_slice = 1
elif attrs.get('sq_ass_slice').value in ('false', '0'):
self.sq_ass_slice = 0
else:
raise ValueError('Bad boolean attribute (sq_ass_slice)')
if attrs.get('sq_contains'):
if attrs.get('sq_contains').value in ('true', '1'):
self.sq_contains = 1
elif attrs.get('sq_contains').value in ('false', '0'):
self.sq_contains = 0
else:
raise ValueError('Bad boolean attribute (sq_contains)')
if attrs.get('sq_ass_item'):
if attrs.get('sq_ass_item').value in ('true', '1'):
self.sq_ass_item = 1
elif attrs.get('sq_ass_item').value in ('false', '0'):
self.sq_ass_item = 0
else:
raise ValueError('Bad boolean attribute (sq_ass_item)')
if attrs.get('sq_repeat'):
if attrs.get('sq_repeat').value in ('true', '1'):
self.sq_repeat = 1
elif attrs.get('sq_repeat').value in ('false', '0'):
self.sq_repeat = 0
else:
raise ValueError('Bad boolean attribute (sq_repeat)')
if attrs.get('sq_length'):
if attrs.get('sq_length').value in ('true', '1'):
self.sq_length = 1
elif attrs.get('sq_length').value in ('false', '0'):
self.sq_length = 0
else:
raise ValueError('Bad boolean attribute (sq_length)')
if attrs.get('sq_inplace_concat'):
if attrs.get('sq_inplace_concat').value in ('true', '1'):
self.sq_inplace_concat = 1
elif attrs.get('sq_inplace_concat').value in ('false', '0'):
self.sq_inplace_concat = 0
else:
raise ValueError('Bad boolean attribute (sq_inplace_concat)')
def buildChildren(self, child_, nodeName_):
self.valueOf_ = ''
for child in child_.childNodes:
if child.nodeType == Node.TEXT_NODE:
self.valueOf_ += child.nodeValue
# end class Sequence
class Module:
subclass = None
def __init__(self, Name='', Documentation=None, Dependencies=None, Content=None):
self.Name = Name
self.Documentation = Documentation
self.Dependencies = Dependencies
self.Content = Content
def factory(*args_, **kwargs_):
if Module.subclass:
return Module.subclass(*args_, **kwargs_)
else:
return Module(*args_, **kwargs_)
factory = staticmethod(factory)
def getDocumentation(self): return self.Documentation
def setDocumentation(self, Documentation): self.Documentation = Documentation
def getDependencies(self): return self.Dependencies
def setDependencies(self, Dependencies): self.Dependencies = Dependencies
def getContent(self): return self.Content
def setContent(self, Content): self.Content = Content
def getName(self): return self.Name
def setName(self, Name): self.Name = Name
def export(self, outfile, level, name_='Module'):
showIndent(outfile, level)
outfile.write('<%s' % (name_, ))
self.exportAttributes(outfile, level, name_='Module')
outfile.write('>\n')
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Module'):
outfile.write(' Name="%s"' % (self.getName(), ))
def exportChildren(self, outfile, level, name_='Module'):
if self.Documentation:
self.Documentation.export(outfile, level)
if self.Dependencies:
self.Dependencies.export(outfile, level)
if self.Content:
self.Content.export(outfile, level)
def exportLiteral(self, outfile, level, name_='Module'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Name = "%s",\n' % (self.getName(),))
def exportLiteralChildren(self, outfile, level, name_):
if self.Documentation:
showIndent(outfile, level)
outfile.write('Documentation=Documentation(\n')
self.Documentation.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Dependencies:
showIndent(outfile, level)
outfile.write('Dependencies=Dependencies(\n')
self.Dependencies.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Content:
showIndent(outfile, level)
outfile.write('Content=Content(\n')
self.Content.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('Name'):
self.Name = attrs.get('Name').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Documentation':
obj_ = Documentation.factory()
obj_.build(child_)
self.setDocumentation(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Dependencies':
obj_ = Dependencies.factory()
obj_.build(child_)
self.setDependencies(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Content':
obj_ = Content.factory()
obj_.build(child_)
self.setContent(obj_)
# end class Module
class Dependencies:
subclass = None
def __init__(self, Module=None):
if Module is None:
self.Module = []
else:
self.Module = Module
def factory(*args_, **kwargs_):
if Dependencies.subclass:
return Dependencies.subclass(*args_, **kwargs_)
else:
return Dependencies(*args_, **kwargs_)
factory = staticmethod(factory)
def getModule(self): return self.Module
def setModule(self, Module): self.Module = Module
def addModule(self, value): self.Module.append(value)
def insertModule(self, index, value): self.Module[index] = value
def export(self, outfile, level, name_='Dependencies'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Dependencies'):
pass
def exportChildren(self, outfile, level, name_='Dependencies'):
for Module_ in self.getModule():
Module_.export(outfile, level)
def exportLiteral(self, outfile, level, name_='Dependencies'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Module=[\n')
level += 1
for Module in self.Module:
showIndent(outfile, level)
outfile.write('Module(\n')
Module.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Module':
obj_ = Module.factory()
obj_.build(child_)
self.Module.append(obj_)
# end class Dependencies
class Content:
subclass = None
def __init__(self, Property=None, Feature=None, DocObject=None, GuiCommand=None, PreferencesPage=None):
if Property is None:
self.Property = []
else:
self.Property = Property
if Feature is None:
self.Feature = []
else:
self.Feature = Feature
if DocObject is None:
self.DocObject = []
else:
self.DocObject = DocObject
if GuiCommand is None:
self.GuiCommand = []
else:
self.GuiCommand = GuiCommand
if PreferencesPage is None:
self.PreferencesPage = []
else:
self.PreferencesPage = PreferencesPage
def factory(*args_, **kwargs_):
if Content.subclass:
return Content.subclass(*args_, **kwargs_)
else:
return Content(*args_, **kwargs_)
factory = staticmethod(factory)
def getProperty(self): return self.Property
def setProperty(self, Property): self.Property = Property
def addProperty(self, value): self.Property.append(value)
def insertProperty(self, index, value): self.Property[index] = value
def getFeature(self): return self.Feature
def setFeature(self, Feature): self.Feature = Feature
def addFeature(self, value): self.Feature.append(value)
def insertFeature(self, index, value): self.Feature[index] = value
def getDocobject(self): return self.DocObject
def setDocobject(self, DocObject): self.DocObject = DocObject
def addDocobject(self, value): self.DocObject.append(value)
def insertDocobject(self, index, value): self.DocObject[index] = value
def getGuicommand(self): return self.GuiCommand
def setGuicommand(self, GuiCommand): self.GuiCommand = GuiCommand
def addGuicommand(self, value): self.GuiCommand.append(value)
def insertGuicommand(self, index, value): self.GuiCommand[index] = value
def getPreferencespage(self): return self.PreferencesPage
def setPreferencespage(self, PreferencesPage): self.PreferencesPage = PreferencesPage
def addPreferencespage(self, value): self.PreferencesPage.append(value)
def insertPreferencespage(self, index, value): self.PreferencesPage[index] = value
def export(self, outfile, level, name_='Content'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Content'):
pass
def exportChildren(self, outfile, level, name_='Content'):
for Property_ in self.getProperty():
Property_.export(outfile, level)
for Feature_ in self.getFeature():
Feature_.export(outfile, level)
for DocObject_ in self.getDocobject():
DocObject_.export(outfile, level)
for GuiCommand_ in self.getGuicommand():
showIndent(outfile, level)
outfile.write('<GuiCommand>%s</GuiCommand>\n' % quote_xml(GuiCommand_))
for PreferencesPage_ in self.getPreferencespage():
showIndent(outfile, level)
outfile.write('<PreferencesPage>%s</PreferencesPage>\n' % quote_xml(PreferencesPage_))
def exportLiteral(self, outfile, level, name_='Content'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Property=[\n')
level += 1
for Property in self.Property:
showIndent(outfile, level)
outfile.write('Property(\n')
Property.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Feature=[\n')
level += 1
for Feature in self.Feature:
showIndent(outfile, level)
outfile.write('Feature(\n')
Feature.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('DocObject=[\n')
level += 1
for DocObject in self.DocObject:
showIndent(outfile, level)
outfile.write('DocObject(\n')
DocObject.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('GuiCommand=[\n')
level += 1
for GuiCommand in self.GuiCommand:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(GuiCommand))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('PreferencesPage=[\n')
level += 1
for PreferencesPage in self.PreferencesPage:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(PreferencesPage))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Property':
obj_ = Property.factory()
obj_.build(child_)
self.Property.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Feature':
obj_ = Feature.factory()
obj_.build(child_)
self.Feature.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'DocObject':
obj_ = DocObject.factory()
obj_.build(child_)
self.DocObject.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'GuiCommand':
GuiCommand_ = ''
for text__content_ in child_.childNodes:
GuiCommand_ += text__content_.nodeValue
self.GuiCommand.append(GuiCommand_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'PreferencesPage':
PreferencesPage_ = ''
for text__content_ in child_.childNodes:
PreferencesPage_ += text__content_.nodeValue
self.PreferencesPage.append(PreferencesPage_)
# end class Content
class Feature:
subclass = None
def __init__(self, Name='', Documentation=None, Property=None, ViewProvider=None):
self.Name = Name
self.Documentation = Documentation
if Property is None:
self.Property = []
else:
self.Property = Property
self.ViewProvider = ViewProvider
def factory(*args_, **kwargs_):
if Feature.subclass:
return Feature.subclass(*args_, **kwargs_)
else:
return Feature(*args_, **kwargs_)
factory = staticmethod(factory)
def getDocumentation(self): return self.Documentation
def setDocumentation(self, Documentation): self.Documentation = Documentation
def getProperty(self): return self.Property
def setProperty(self, Property): self.Property = Property
def addProperty(self, value): self.Property.append(value)
def insertProperty(self, index, value): self.Property[index] = value
def getViewprovider(self): return self.ViewProvider
def setViewprovider(self, ViewProvider): self.ViewProvider = ViewProvider
def getName(self): return self.Name
def setName(self, Name): self.Name = Name
def export(self, outfile, level, name_='Feature'):
showIndent(outfile, level)
outfile.write('<%s' % (name_, ))
self.exportAttributes(outfile, level, name_='Feature')
outfile.write('>\n')
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Feature'):
outfile.write(' Name="%s"' % (self.getName(), ))
def exportChildren(self, outfile, level, name_='Feature'):
if self.Documentation:
self.Documentation.export(outfile, level)
for Property_ in self.getProperty():
Property_.export(outfile, level)
if self.ViewProvider:
self.ViewProvider.export(outfile, level)
def exportLiteral(self, outfile, level, name_='Feature'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Name = "%s",\n' % (self.getName(),))
def exportLiteralChildren(self, outfile, level, name_):
if self.Documentation:
showIndent(outfile, level)
outfile.write('Documentation=Documentation(\n')
self.Documentation.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Property=[\n')
level += 1
for Property in self.Property:
showIndent(outfile, level)
outfile.write('Property(\n')
Property.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.ViewProvider:
showIndent(outfile, level)
outfile.write('ViewProvider=ViewProvider(\n')
self.ViewProvider.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('Name'):
self.Name = attrs.get('Name').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Documentation':
obj_ = Documentation.factory()
obj_.build(child_)
self.setDocumentation(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Property':
obj_ = Property.factory()
obj_.build(child_)
self.Property.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'ViewProvider':
obj_ = ViewProvider.factory()
obj_.build(child_)
self.setViewprovider(obj_)
# end class Feature
class DocObject:
subclass = None
def __init__(self, Name='', Documentation=None, Property=None):
self.Name = Name
self.Documentation = Documentation
if Property is None:
self.Property = []
else:
self.Property = Property
def factory(*args_, **kwargs_):
if DocObject.subclass:
return DocObject.subclass(*args_, **kwargs_)
else:
return DocObject(*args_, **kwargs_)
factory = staticmethod(factory)
def getDocumentation(self): return self.Documentation
def setDocumentation(self, Documentation): self.Documentation = Documentation
def getProperty(self): return self.Property
def setProperty(self, Property): self.Property = Property
def addProperty(self, value): self.Property.append(value)
def insertProperty(self, index, value): self.Property[index] = value
def getName(self): return self.Name
def setName(self, Name): self.Name = Name
def export(self, outfile, level, name_='DocObject'):
showIndent(outfile, level)
outfile.write('<%s' % (name_, ))
self.exportAttributes(outfile, level, name_='DocObject')
outfile.write('>\n')
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='DocObject'):
outfile.write(' Name="%s"' % (self.getName(), ))
def exportChildren(self, outfile, level, name_='DocObject'):
if self.Documentation:
self.Documentation.export(outfile, level)
for Property_ in self.getProperty():
Property_.export(outfile, level)
def exportLiteral(self, outfile, level, name_='DocObject'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Name = "%s",\n' % (self.getName(),))
def exportLiteralChildren(self, outfile, level, name_):
if self.Documentation:
showIndent(outfile, level)
outfile.write('Documentation=Documentation(\n')
self.Documentation.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Property=[\n')
level += 1
for Property in self.Property:
showIndent(outfile, level)
outfile.write('Property(\n')
Property.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('Name'):
self.Name = attrs.get('Name').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Documentation':
obj_ = Documentation.factory()
obj_.build(child_)
self.setDocumentation(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Property':
obj_ = Property.factory()
obj_.build(child_)
self.Property.append(obj_)
# end class DocObject
class Property:
subclass = None
def __init__(self, Type='', Name='', StartValue='', Documentation=None):
self.Type = Type
self.Name = Name
self.StartValue = StartValue
self.Documentation = Documentation
def factory(*args_, **kwargs_):
if Property.subclass:
return Property.subclass(*args_, **kwargs_)
else:
return Property(*args_, **kwargs_)
factory = staticmethod(factory)
def getDocumentation(self): return self.Documentation
def setDocumentation(self, Documentation): self.Documentation = Documentation
def getType(self): return self.Type
def setType(self, Type): self.Type = Type
def getName(self): return self.Name
def setName(self, Name): self.Name = Name
def getStartvalue(self): return self.StartValue
def setStartvalue(self, StartValue): self.StartValue = StartValue
def export(self, outfile, level, name_='Property'):
showIndent(outfile, level)
outfile.write('<%s' % (name_, ))
self.exportAttributes(outfile, level, name_='Property')
outfile.write('>\n')
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Property'):
outfile.write(' Type="%s"' % (self.getType(), ))
outfile.write(' Name="%s"' % (self.getName(), ))
if self.getStartvalue() is not None:
outfile.write(' StartValue="%s"' % (self.getStartvalue(), ))
def exportChildren(self, outfile, level, name_='Property'):
if self.Documentation:
self.Documentation.export(outfile, level)
def exportLiteral(self, outfile, level, name_='Property'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Type = "%s",\n' % (self.getType(),))
showIndent(outfile, level)
outfile.write('Name = "%s",\n' % (self.getName(),))
showIndent(outfile, level)
outfile.write('StartValue = "%s",\n' % (self.getStartvalue(),))
def exportLiteralChildren(self, outfile, level, name_):
if self.Documentation:
showIndent(outfile, level)
outfile.write('Documentation=Documentation(\n')
self.Documentation.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('Type'):
self.Type = attrs.get('Type').value
if attrs.get('Name'):
self.Name = attrs.get('Name').value
if attrs.get('StartValue'):
self.StartValue = attrs.get('StartValue').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Documentation':
obj_ = Documentation.factory()
obj_.build(child_)
self.setDocumentation(obj_)
# end class Property
class Documentation:
subclass = None
def __init__(self, Author=None, DeveloperDocu='', UserDocu=''):
self.Author = Author
self.DeveloperDocu = DeveloperDocu
self.UserDocu = UserDocu
def factory(*args_, **kwargs_):
if Documentation.subclass:
return Documentation.subclass(*args_, **kwargs_)
else:
return Documentation(*args_, **kwargs_)
factory = staticmethod(factory)
def getAuthor(self): return self.Author
def setAuthor(self, Author): self.Author = Author
def getDeveloperdocu(self): return self.DeveloperDocu
def setDeveloperdocu(self, DeveloperDocu): self.DeveloperDocu = DeveloperDocu
def getUserdocu(self): return self.UserDocu
def setUserdocu(self, UserDocu): self.UserDocu = UserDocu
def export(self, outfile, level, name_='Documentation'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Documentation'):
pass
def exportChildren(self, outfile, level, name_='Documentation'):
if self.Author:
self.Author.export(outfile, level)
showIndent(outfile, level)
outfile.write('<DeveloperDocu>%s</DeveloperDocu>\n' % quote_xml(self.getDeveloperdocu()))
showIndent(outfile, level)
outfile.write('<UserDocu>%s</UserDocu>\n' % quote_xml(self.getUserdocu()))
def exportLiteral(self, outfile, level, name_='Documentation'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Author:
showIndent(outfile, level)
outfile.write('Author=Author(\n')
self.Author.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('DeveloperDocu=%s,\n' % quote_python(self.getDeveloperdocu()))
showIndent(outfile, level)
outfile.write('UserDocu=%s,\n' % quote_python(self.getUserdocu()))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Author':
obj_ = Author.factory()
obj_.build(child_)
self.setAuthor(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'DeveloperDocu':
DeveloperDocu_ = ''
for text__content_ in child_.childNodes:
DeveloperDocu_ += text__content_.nodeValue
self.DeveloperDocu = DeveloperDocu_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'UserDocu':
UserDocu_ = ''
for text__content_ in child_.childNodes:
UserDocu_ += text__content_.nodeValue
self.UserDocu = UserDocu_
# end class Documentation
class Author:
subclass = None
def __init__(self, Name='', Licence='', EMail='', valueOf_=''):
self.Name = Name
self.Licence = Licence
self.EMail = EMail
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if Author.subclass:
return Author.subclass(*args_, **kwargs_)
else:
return Author(*args_, **kwargs_)
factory = staticmethod(factory)
def getName(self): return self.Name
def setName(self, Name): self.Name = Name
def getLicence(self): return self.Licence
def setLicence(self, Licence): self.Licence = Licence
def getEmail(self): return self.EMail
def setEmail(self, EMail): self.EMail = EMail
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, name_='Author'):
showIndent(outfile, level)
outfile.write('<%s' % (name_, ))
self.exportAttributes(outfile, level, name_='Author')
outfile.write('>\n')
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Author'):
outfile.write(' Name="%s"' % (self.getName(), ))
if self.getLicence() is not None:
outfile.write(' Licence="%s"' % (self.getLicence(), ))
outfile.write(' EMail="%s"' % (self.getEmail(), ))
def exportChildren(self, outfile, level, name_='Author'):
outfile.write(self.valueOf_)
def exportLiteral(self, outfile, level, name_='Author'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Name = "%s",\n' % (self.getName(),))
showIndent(outfile, level)
outfile.write('Licence = "%s",\n' % (self.getLicence(),))
showIndent(outfile, level)
outfile.write('EMail = "%s",\n' % (self.getEmail(),))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('Name'):
self.Name = attrs.get('Name').value
if attrs.get('Licence'):
self.Licence = attrs.get('Licence').value
if attrs.get('EMail'):
self.EMail = attrs.get('EMail').value
def buildChildren(self, child_, nodeName_):
self.valueOf_ = ''
for child in child_.childNodes:
if child.nodeType == Node.TEXT_NODE:
self.valueOf_ += child.nodeValue
# end class Author
class ViewProvider:
subclass = None
def __init__(self, Property=None):
if Property is None:
self.Property = []
else:
self.Property = Property
def factory(*args_, **kwargs_):
if ViewProvider.subclass:
return ViewProvider.subclass(*args_, **kwargs_)
else:
return ViewProvider(*args_, **kwargs_)
factory = staticmethod(factory)
def getProperty(self): return self.Property
def setProperty(self, Property): self.Property = Property
def addProperty(self, value): self.Property.append(value)
def insertProperty(self, index, value): self.Property[index] = value
def export(self, outfile, level, name_='ViewProvider'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='ViewProvider'):
pass
def exportChildren(self, outfile, level, name_='ViewProvider'):
for Property_ in self.getProperty():
Property_.export(outfile, level)
def exportLiteral(self, outfile, level, name_='ViewProvider'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Property=[\n')
level += 1
for Property in self.Property:
showIndent(outfile, level)
outfile.write('Property(\n')
Property.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'Property':
obj_ = Property.factory()
obj_.build(child_)
self.Property.append(obj_)
# end class ViewProvider
class Parameter:
subclass = None
def __init__(self, Type='', Name='', valueOf_=''):
self.Type = Type
self.Name = Name
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if Parameter.subclass:
return Parameter.subclass(*args_, **kwargs_)
else:
return Parameter(*args_, **kwargs_)
factory = staticmethod(factory)
def getType(self): return self.Type
def setType(self, Type): self.Type = Type
def getName(self): return self.Name
def setName(self, Name): self.Name = Name
def getValueOf_(self): return self.valueOf_
def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, name_='Parameter'):
showIndent(outfile, level)
outfile.write('<%s' % (name_, ))
self.exportAttributes(outfile, level, name_='Parameter')
outfile.write('>\n')
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportAttributes(self, outfile, level, name_='Parameter'):
outfile.write(' Type="%s"' % (self.getType(), ))
outfile.write(' Name="%s"' % (self.getName(), ))
def exportChildren(self, outfile, level, name_='Parameter'):
outfile.write(self.valueOf_)
def exportLiteral(self, outfile, level, name_='Parameter'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Type = "%s",\n' % (self.getType(),))
showIndent(outfile, level)
outfile.write('Name = "%s",\n' % (self.getName(),))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('Type'):
self.Type = attrs.get('Type').value
if attrs.get('Name'):
self.Name = attrs.get('Name').value
def buildChildren(self, child_, nodeName_):
self.valueOf_ = ''
for child in child_.childNodes:
if child.nodeType == Node.TEXT_NODE:
self.valueOf_ += child.nodeValue
# end class Parameter
from xml.sax import handler, make_parser
class SaxStackElement:
def __init__(self, name='', obj=None):
self.name = name
self.obj = obj
self.content = ''
#
# SAX handler
#
class SaxGeneratemodelHandler(handler.ContentHandler):
def __init__(self):
self.stack = []
self.root = None
def getRoot(self):
return self.root
def setDocumentLocator(self, locator):
self.locator = locator
def showError(self, msg):
print '*** (showError):', msg
sys.exit(-1)
def startElement(self, name, attrs):
done = 0
if name == 'GenerateModel':
obj = GenerateModel.factory()
stackObj = SaxStackElement('GenerateModel', obj)
self.stack.append(stackObj)
done = 1
elif name == 'Module':
obj = Module.factory()
stackObj = SaxStackElement('Module', obj)
self.stack.append(stackObj)
done = 1
elif name == 'PythonExport':
obj = PythonExport.factory()
val = attrs.get('FatherNamespace', None)
if val is not None:
obj.setFathernamespace(val)
val = attrs.get('RichCompare', None)
if val is not None:
if val in ('true', '1'):
obj.setRichcompare(1)
elif val in ('false', '0'):
obj.setRichcompare(0)
else:
self.reportError('"RichCompare" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('Name', None)
if val is not None:
obj.setName(val)
val = attrs.get('Reference', None)
if val is not None:
if val in ('true', '1'):
obj.setReference(1)
elif val in ('false', '0'):
obj.setReference(0)
else:
self.reportError('"Reference" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('FatherInclude', None)
if val is not None:
obj.setFatherinclude(val)
val = attrs.get('Father', None)
if val is not None:
obj.setFather(val)
val = attrs.get('Namespace', None)
if val is not None:
obj.setNamespace(val)
val = attrs.get('Twin', None)
if val is not None:
obj.setTwin(val)
val = attrs.get('Constructor', None)
if val is not None:
if val in ('true', '1'):
obj.setConstructor(1)
elif val in ('false', '0'):
obj.setConstructor(0)
else:
self.reportError('"Constructor" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('Initialization', None)
if val is not None:
if val in ('true', '1'):
obj.setInitialization(1)
elif val in ('false', '0'):
obj.setInitialization(0)
else:
self.reportError('"Initialization" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('TwinPointer', None)
if val is not None:
obj.setTwinpointer(val)
val = attrs.get('Include', None)
if val is not None:
obj.setInclude(val)
val = attrs.get('NumberProtocol', None)
if val is not None:
if val in ('true', '1'):
obj.setNumberprotocol(1)
elif val in ('false', '0'):
obj.setNumberprotocol(0)
else:
self.reportError('"NumberProtocol" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('Delete', None)
if val is not None:
if val in ('true', '1'):
obj.setDelete(1)
elif val in ('false', '0'):
obj.setDelete(0)
else:
self.reportError('"Delete" attribute must be boolean ("true", "1", "false", "0")')
stackObj = SaxStackElement('PythonExport', obj)
self.stack.append(stackObj)
done = 1
elif name == 'Documentation':
obj = Documentation.factory()
stackObj = SaxStackElement('Documentation', obj)
self.stack.append(stackObj)
done = 1
elif name == 'Methode':
obj = Methode.factory()
val = attrs.get('Const', None)
if val is not None:
if val in ('true', '1'):
obj.setConst(1)
elif val in ('false', '0'):
obj.setConst(0)
else:
self.reportError('"Const" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('Name', None)
if val is not None:
obj.setName(val)
val = attrs.get('Keyword', None)
if val is not None:
if val in ('true', '1'):
obj.setKeyword(1)
elif val in ('false', '0'):
obj.setKeyword(0)
else:
self.reportError('"Keyword" attribute must be boolean ("true", "1", "false", "0")')
stackObj = SaxStackElement('Methode', obj)
self.stack.append(stackObj)
done = 1
elif name == 'Parameter':
obj = Parameter.factory()
val = attrs.get('Type', None)
if val is not None:
obj.setType(val)
val = attrs.get('Name', None)
if val is not None:
obj.setName(val)
stackObj = SaxStackElement('Parameter', obj)
self.stack.append(stackObj)
done = 1
elif name == 'Attribute':
obj = Attribute.factory()
val = attrs.get('ReadOnly', None)
if val is not None:
if val in ('true', '1'):
obj.setReadonly(1)
elif val in ('false', '0'):
obj.setReadonly(0)
else:
self.reportError('"ReadOnly" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('Name', None)
if val is not None:
obj.setName(val)
stackObj = SaxStackElement('Attribute', obj)
self.stack.append(stackObj)
done = 1
elif name == 'Sequence':
obj = Sequence.factory()
val = attrs.get('sq_slice', None)
if val is not None:
if val in ('true', '1'):
obj.setSq_slice(1)
elif val in ('false', '0'):
obj.setSq_slice(0)
else:
self.reportError('"sq_slice" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('sq_item', None)
if val is not None:
if val in ('true', '1'):
obj.setSq_item(1)
elif val in ('false', '0'):
obj.setSq_item(0)
else:
self.reportError('"sq_item" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('sq_concat', None)
if val is not None:
if val in ('true', '1'):
obj.setSq_concat(1)
elif val in ('false', '0'):
obj.setSq_concat(0)
else:
self.reportError('"sq_concat" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('sq_inplace_repeat', None)
if val is not None:
if val in ('true', '1'):
obj.setSq_inplace_repeat(1)
elif val in ('false', '0'):
obj.setSq_inplace_repeat(0)
else:
self.reportError('"sq_inplace_repeat" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('sq_ass_slice', None)
if val is not None:
if val in ('true', '1'):
obj.setSq_ass_slice(1)
elif val in ('false', '0'):
obj.setSq_ass_slice(0)
else:
self.reportError('"sq_ass_slice" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('sq_contains', None)
if val is not None:
if val in ('true', '1'):
obj.setSq_contains(1)
elif val in ('false', '0'):
obj.setSq_contains(0)
else:
self.reportError('"sq_contains" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('sq_ass_item', None)
if val is not None:
if val in ('true', '1'):
obj.setSq_ass_item(1)
elif val in ('false', '0'):
obj.setSq_ass_item(0)
else:
self.reportError('"sq_ass_item" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('sq_repeat', None)
if val is not None:
if val in ('true', '1'):
obj.setSq_repeat(1)
elif val in ('false', '0'):
obj.setSq_repeat(0)
else:
self.reportError('"sq_repeat" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('sq_length', None)
if val is not None:
if val in ('true', '1'):
obj.setSq_length(1)
elif val in ('false', '0'):
obj.setSq_length(0)
else:
self.reportError('"sq_length" attribute must be boolean ("true", "1", "false", "0")')
val = attrs.get('sq_inplace_concat', None)
if val is not None:
if val in ('true', '1'):
obj.setSq_inplace_concat(1)
elif val in ('false', '0'):
obj.setSq_inplace_concat(0)
else:
self.reportError('"sq_inplace_concat" attribute must be boolean ("true", "1", "false", "0")')
stackObj = SaxStackElement('Sequence', obj)
self.stack.append(stackObj)
done = 1
elif name == 'CustomAttributes':
stackObj = SaxStackElement('CustomAttributes', None)
self.stack.append(stackObj)
done = 1
elif name == 'ClassDeclarations':
stackObj = SaxStackElement('ClassDeclarations', None)
self.stack.append(stackObj)
done = 1
elif name == 'Dependencies':
obj = Dependencies.factory()
stackObj = SaxStackElement('Dependencies', obj)
self.stack.append(stackObj)
done = 1
elif name == 'Content':
obj = Content.factory()
stackObj = SaxStackElement('Content', obj)
self.stack.append(stackObj)
done = 1
elif name == 'Property':
obj = Property.factory()
stackObj = SaxStackElement('Property', obj)
self.stack.append(stackObj)
done = 1
elif name == 'Feature':
obj = Feature.factory()
val = attrs.get('Name', None)
if val is not None:
obj.setName(val)
stackObj = SaxStackElement('Feature', obj)
self.stack.append(stackObj)
done = 1
elif name == 'ViewProvider':
obj = ViewProvider.factory()
stackObj = SaxStackElement('ViewProvider', obj)
self.stack.append(stackObj)
done = 1
elif name == 'DocObject':
obj = DocObject.factory()
val = attrs.get('Name', None)
if val is not None:
obj.setName(val)
stackObj = SaxStackElement('DocObject', obj)
self.stack.append(stackObj)
done = 1
elif name == 'GuiCommand':
stackObj = SaxStackElement('GuiCommand', None)
self.stack.append(stackObj)
done = 1
elif name == 'PreferencesPage':
stackObj = SaxStackElement('PreferencesPage', None)
self.stack.append(stackObj)
done = 1
elif name == 'Author':
obj = Author.factory()
val = attrs.get('Name', None)
if val is not None:
obj.setName(val)
val = attrs.get('Licence', None)
if val is not None:
obj.setLicence(val)
val = attrs.get('EMail', None)
if val is not None:
obj.setEmail(val)
stackObj = SaxStackElement('Author', obj)
self.stack.append(stackObj)
done = 1
elif name == 'DeveloperDocu':
stackObj = SaxStackElement('DeveloperDocu', None)
self.stack.append(stackObj)
done = 1
elif name == 'UserDocu':
stackObj = SaxStackElement('UserDocu', None)
self.stack.append(stackObj)
done = 1
if not done:
self.reportError('"%s" element not allowed here.' % name)
def endElement(self, name):
done = 0
if name == 'GenerateModel':
if len(self.stack) == 1:
self.root = self.stack[-1].obj
self.stack.pop()
done = 1
elif name == 'Module':
if len(self.stack) >= 2:
self.stack[-2].obj.addModule(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'PythonExport':
if len(self.stack) >= 2:
self.stack[-2].obj.addPythonexport(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'Documentation':
if len(self.stack) >= 2:
self.stack[-2].obj.setDocumentation(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'Methode':
if len(self.stack) >= 2:
self.stack[-2].obj.addMethode(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'Parameter':
if len(self.stack) >= 2:
self.stack[-2].obj.addParameter(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'Attribute':
if len(self.stack) >= 2:
self.stack[-2].obj.addAttribute(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'Sequence':
if len(self.stack) >= 2:
self.stack[-2].obj.setSequence(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'CustomAttributes':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setCustomattributes(content)
self.stack.pop()
done = 1
elif name == 'ClassDeclarations':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setClassdeclarations(content)
self.stack.pop()
done = 1
elif name == 'Dependencies':
if len(self.stack) >= 2:
self.stack[-2].obj.setDependencies(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'Content':
if len(self.stack) >= 2:
self.stack[-2].obj.setContent(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'Property':
if len(self.stack) >= 2:
self.stack[-2].obj.addProperty(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'Feature':
if len(self.stack) >= 2:
self.stack[-2].obj.addFeature(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'ViewProvider':
if len(self.stack) >= 2:
self.stack[-2].obj.setViewprovider(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'DocObject':
if len(self.stack) >= 2:
self.stack[-2].obj.addDocobject(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'GuiCommand':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.addGuicommand(content)
self.stack.pop()
done = 1
elif name == 'PreferencesPage':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.addPreferencespage(content)
self.stack.pop()
done = 1
elif name == 'Author':
if len(self.stack) >= 2:
self.stack[-2].obj.setAuthor(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'DeveloperDocu':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setDeveloperdocu(content)
self.stack.pop()
done = 1
elif name == 'UserDocu':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setUserdocu(content)
self.stack.pop()
done = 1
if not done:
self.reportError('"%s" element not allowed here.' % name)
def characters(self, chrs, start, end):
if len(self.stack) > 0:
self.stack[-1].content += chrs[start:end]
def reportError(self, mesg):
locator = self.locator
sys.stderr.write('Doc: %s Line: %d Column: %d\n' % \
(locator.getSystemId(), locator.getLineNumber(),
locator.getColumnNumber() + 1))
sys.stderr.write(mesg)
sys.stderr.write('\n')
sys.exit(-1)
#raise RuntimeError
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
Options:
-s Use the SAX parser, not the minidom parser.
"""
def usage():
print USAGE_TEXT
sys.exit(-1)
#
# SAX handler used to determine the top level element.
#
class SaxSelectorHandler(handler.ContentHandler):
def __init__(self):
self.topElementName = None
def getTopElementName(self):
return self.topElementName
def startElement(self, name, attrs):
self.topElementName = name
raise StopIteration
def parseSelect(inFileName):
infile = file(inFileName, 'r')
topElementName = None
parser = make_parser()
documentHandler = SaxSelectorHandler()
parser.setContentHandler(documentHandler)
try:
try:
parser.parse(infile)
except StopIteration:
topElementName = documentHandler.getTopElementName()
if topElementName is None:
raise RuntimeError, 'no top level element'
topElementName = topElementName.replace('-', '_').replace(':', '_')
if topElementName not in globals():
raise RuntimeError, 'no class for top element: %s' % topElementName
topElement = globals()[topElementName]
infile.seek(0)
doc = minidom.parse(infile)
finally:
infile.close()
rootNode = doc.childNodes[0]
rootObj = topElement.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0)
return rootObj
def saxParse(inFileName):
parser = make_parser()
documentHandler = SaxGeneratemodelHandler()
parser.setDocumentHandler(documentHandler)
parser.parse('file:%s' % inFileName)
root = documentHandler.getRoot()
sys.stdout.write('<?xml version="1.0" ?>\n')
root.export(sys.stdout, 0)
return root
def saxParseString(inString):
parser = make_parser()
documentHandler = SaxGeneratemodelHandler()
parser.setDocumentHandler(documentHandler)
parser.feed(inString)
parser.close()
rootObj = documentHandler.getRoot()
#sys.stdout.write('<?xml version="1.0" ?>\n')
#rootObj.export(sys.stdout, 0)
return rootObj
def parse(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.documentElement
rootObj = GenerateModel.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="GenerateModel")
return rootObj
def parseString(inString):
doc = minidom.parseString(inString)
rootNode = doc.documentElement
rootObj = GenerateModel.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="GenerateModel")
return rootObj
def parseLiteral(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.documentElement
rootObj = GenerateModel.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('from generateModel_Module import *\n\n')
sys.stdout.write('rootObj = GenerateModel(\n')
rootObj.exportLiteral(sys.stdout, 0, name_="GenerateModel")
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 2 and args[0] == '-s':
saxParse(args[1])
elif len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
main()
#import pdb
#pdb.run('main()')
| wood-galaxy/FreeCAD | src/Tools/generateBase/generateModel_Module.py | Python | lgpl-2.1 | 101,247 |
# Copyright (C)2016 D. Plaindoux.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2, or (at your option) any
# later version.
class WebException(Exception):
def __init__(self, status, message=None):
Exception.__init__(self)
self.status = status
self.message = message
@staticmethod
def get(status):
switcher = {
400: WebException.badRequest,
401: WebException.unauthorized,
402: WebException.paymentRequired,
404: WebException.notFound,
406: WebException.notAcceptable
}
return switcher.get(status, lambda m: WebException(status, m))
@staticmethod
def badRequest(message=None):
return WebException(400,
"Bad request" if message is None else message)
@staticmethod
def unauthorized(message=None):
return WebException(401,
"Unauthorized" if message is None else message)
@staticmethod
def paymentRequired(message=None):
return WebException(402,
"Payment Required" if message is None else message)
@staticmethod
def notFound(message=None):
return WebException(404,
"Not found" if message is None else message)
@staticmethod
def notAcceptable(message=None):
return WebException(406,
"Not acceptable" if message is None else message)
| d-plaindoux/fluent-rest | fluent_rest/runtime/response.py | Python | lgpl-2.1 | 1,628 |
#!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ola_rdm_get.py
# Copyright (C) 2010 Simon Newton
'''Automated testing for RDM responders.'''
__author__ = '[email protected] (Simon Newton)'
from ola.testing.rdm import TestDefinitions, TestRunner
from ola.testing.rdm.DMXSender import DMXSender
from ola.testing.rdm.TestState import TestState
import logging
import re
import sys
import textwrap
import time
from ola import PidStore
from ola.ClientWrapper import ClientWrapper
from ola.UID import UID
from optparse import OptionParser, OptionGroup, OptionValueError
def ParseOptions():
usage = 'Usage: %prog [options] <uid>'
description = textwrap.dedent("""\
Run a series of tests on a RDM responder to check the behaviour.
This requires the OLA server to be running, and the RDM device to have been
detected. You can confirm this by running ola_rdm_discover -u
UNIVERSE. This will send SET commands to the broadcast UIDs which means
the start address, device label etc. will be changed for all devices
connected to the responder. Think twice about running this on your
production lighting rig.
""")
parser = OptionParser(usage, description=description)
parser.add_option('-c', '--slot-count', default=10,
help='Number of slots to send when sending DMX.')
parser.add_option('-d', '--debug', action='store_true',
help='Print debug information to assist in diagnosing '
'failures.')
parser.add_option('-f', '--dmx-frame-rate', default=0,
type='int',
help='Send DMX frames at this rate in the background.')
parser.add_option('-l', '--log', metavar='FILE',
help='Also log to the file named FILE.uid.timestamp.')
parser.add_option('--list-tests', action='store_true',
help='Display a list of all tests')
parser.add_option('-p', '--pid-store', metavar='FILE',
help='The location of the PID definitions.')
parser.add_option('-s', '--skip-check', action='store_true',
help='Skip the check for multiple devices.')
parser.add_option('-t', '--tests', metavar='TEST1,TEST2',
help='A comma separated list of tests to run.')
parser.add_option('--timestamp', action='store_true',
help='Add timestamps to each test.')
parser.add_option('--no-factory-defaults', action='store_true',
help="Don't run the SET factory defaults tests")
parser.add_option('-w', '--broadcast-write-delay', default=0,
type='int',
help='The time in ms to wait after sending broadcast set'
'commands.')
parser.add_option('-u', '--universe', default=0,
type='int',
help='The universe number to use, default is universe 0.')
options, args = parser.parse_args()
if options.list_tests:
return options
if not args:
parser.print_help()
sys.exit(2)
uid = UID.FromString(args[0])
if uid is None:
parser.print_usage()
print 'Invalid UID: %s' % args[0]
sys.exit(2)
options.uid = uid
return options
class MyFilter(object):
"""Filter out the ascii coloring."""
def filter(self, record):
msg = record.msg
record.msg = re.sub('\x1b\[\d*m', '', str(msg))
return True
def SetupLogging(options):
"""Setup the logging for test results."""
level = logging.INFO
if options.debug:
level = logging.DEBUG
logging.basicConfig(
level=level,
format='%(message)s')
if options.log:
file_name = '%s.%s.%d' % (options.log, options.uid, time.time())
file_handler =logging.FileHandler(file_name, 'w')
file_handler.addFilter(MyFilter())
if options.debug:
file_handler.setLevel(logging.DEBUG)
logging.getLogger('').addHandler(file_handler)
def DisplaySummary(tests):
"""Print a summary of the tests."""
by_category = {}
warnings = []
advisories = []
count_by_state = {}
for test in tests:
state = test.state
count_by_state[state] = count_by_state.get(state, 0) + 1
warnings.extend(test.warnings)
advisories.extend(test.advisories)
by_category.setdefault(test.category, {})
by_category[test.category][state] = (1 +
by_category[test.category].get(state, 0))
total = sum(count_by_state.values())
logging.info('------------------- Warnings --------------------')
for warning in sorted(warnings):
logging.info(warning)
logging.info('------------------ Advisories -------------------')
for advisory in sorted(advisories):
logging.info(advisory)
logging.info('------------------ By Category ------------------')
for category, counts in by_category.iteritems():
passed = counts.get(TestState.PASSED, 0)
total_run = (passed + counts.get(TestState.FAILED, 0))
if total_run == 0:
continue
percent = 1.0 * passed / total_run
logging.info(' %26s: %3d / %3d %.0f%%' %
(category, passed, total_run, percent * 100))
logging.info('-------------------------------------------------')
logging.info('%d / %d tests run, %d passed, %d failed, %d broken' % (
total - count_by_state.get(TestState.NOT_RUN, 0),
total,
count_by_state.get(TestState.PASSED, 0),
count_by_state.get(TestState.FAILED, 0),
count_by_state.get(TestState.BROKEN, 0)))
def main():
options = ParseOptions()
test_classes = TestRunner.GetTestClasses(TestDefinitions)
if options.list_tests:
for test_name in sorted(c.__name__ for c in test_classes):
print test_name
sys.exit(0)
SetupLogging(options)
pid_store = PidStore.GetStore(options.pid_store, ('pids.proto',))
wrapper = ClientWrapper()
global uid_ok
uid_ok = False
def UIDList(state, uids):
wrapper.Stop()
global uid_ok
if not state.Succeeded():
logging.error('Fetch failed: %s' % state.message)
return
for uid in uids:
if uid == options.uid:
logging.debug('Found UID %s' % options.uid)
uid_ok = True
if not uid_ok:
logging.error('UID %s not found in universe %d' %
(options.uid, options.universe))
return
if len(uids) > 1:
logging.info(
'The following devices were detected and will be reconfigured')
for uid in uids:
logging.info(' %s' % uid)
if not options.skip_check:
logging.info('Continue ? [Y/n]')
response = raw_input().strip().lower()
uid_ok = response == 'y' or response == ''
logging.debug('Fetching UID list from server')
wrapper.Client().FetchUIDList(options.universe, UIDList)
wrapper.Run()
wrapper.Reset()
if not uid_ok:
sys.exit()
test_filter = None
if options.tests is not None:
logging.info('Restricting tests to %s' % options.tests)
test_filter = set(options.tests.split(','))
logging.info(
'Starting tests, universe %d, UID %s, broadcast write delay %dms' %
(options.universe, options.uid, options.broadcast_write_delay))
runner = TestRunner.TestRunner(options.universe,
options.uid,
options.broadcast_write_delay,
pid_store,
wrapper,
options.timestamp)
for test_class in test_classes:
runner.RegisterTest(test_class)
dmx_sender = DMXSender(wrapper,
options.universe,
options.dmx_frame_rate,
options.slot_count)
tests, device = runner.RunTests(test_filter, options.no_factory_defaults)
DisplaySummary(tests)
if __name__ == '__main__':
main()
| mlba-team/open-lighting | tools/rdm/rdm_responder_test.py | Python | lgpl-2.1 | 8,444 |
#! /usr/bin/python
# Copyright 2004 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import sys, os
class settings:
module_name='TnFOX'
boost_path = ''
boost_libs_path = ''
gccxml_path = ''
pygccxml_path = ''
pyplusplus_path = ''
tnfox_path = ''
tnfox_include_path = ''
tnfox_python_include_path = ''
tnfox_libs_path = ''
python_libs_path = ''
python_include_path = ''
working_dir = ''
generated_files_dir = ''
unittests_dir = ''
xml_files = ''
defined_symbols = [ "FXDISABLE_GLOBALALLOCATORREPLACEMENTS"
, "FX_INCLUDE_ABSOLUTELY_EVERYTHING"
, "FOXPYTHONDLL_EXPORTS"
, "FX_NO_GLOBAL_NAMESPACE" ]
# For debugging purposes to get far smaller bindings, you can define FX_DISABLEGUI
#defined_symbols.append("FX_DISABLEGUI=1")
if 'big'==sys.byteorder:
defined_symbols.append("FOX_BIGENDIAN=1")
else:
defined_symbols.append("FOX_BIGENDIAN=0")
if 'win32'==sys.platform or 'win64'==sys.platform:
defined_symbols.append("WIN32")
defined_symbols_gccxml = defined_symbols + ["__GCCXML__"
, "_NATIVE_WCHAR_T_DEFINED=1" ]
def setup_environment():
sys.path.append( settings.pygccxml_path )
sys.path.append( settings.pyplusplus_path )
setup_environment = staticmethod(setup_environment)
rootdir=os.path.normpath(os.getcwd()+'/../..')
settings.boost_path = rootdir+'/boost'
settings.boost_libs_path = rootdir+'/boost/libs'
#settings.gccxml_path = ''
#settings.pygccxml_path = ''
#settings.pyplusplus_path = ''
settings.python_include_path = os.getenv('PYTHON_INCLUDE')
settings.python_libs_path = os.getenv('PYTHON_ROOT')+'/libs'
settings.tnfox_path = rootdir+'/TnFOX'
settings.tnfox_include_path = rootdir+'/TnFOX/include'
settings.tnfox_libs_path = rootdir+'/TnFOX/lib'
settings.working_dir = os.getcwd()
settings.generated_files_dir = rootdir+'/TnFOX/Python/generated'
settings.unittests_dir = rootdir+'/TnFOX/Python/unittests'
settings.setup_environment()
| ned14/tnfox | Python/environment.py | Python | lgpl-2.1 | 2,231 |
############################################################################
#
# Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
# Contact: http://www.qt-project.org/legal
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and Digia. For licensing terms and
# conditions see http://qt.digia.com/licensing. For further information
# use the contact form at http://qt.digia.com/contact-us.
#
# GNU Lesser General Public License Usage
# Alternatively, this file may be used under the terms of the GNU Lesser
# General Public License version 2.1 as published by the Free Software
# Foundation and appearing in the file LICENSE.LGPL included in the
# packaging of this file. Please review the following information to
# ensure the GNU Lesser General Public License version 2.1 requirements
# will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
#
# In addition, as a special exception, Digia gives you certain additional
# rights. These rights are described in the Digia Qt LGPL Exception
# version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
#
############################################################################
import os
import sys
import base64
if sys.version_info[0] >= 3:
xrange = range
toInteger = int
else:
toInteger = long
verbosity = 0
verbosity = 1
def hasPlot():
fileName = "/usr/bin/gnuplot"
return os.path.isfile(fileName) and os.access(fileName, os.X_OK)
try:
import subprocess
def arrayForms():
if hasPlot():
return "Normal,Plot"
return "Normal"
except:
def arrayForms():
return "Normal"
def bytesToString(b):
if sys.version_info[0] == 2:
return b
return b.decode("utf8")
def stringToBytes(s):
if sys.version_info[0] == 2:
return s
return s.encode("utf8")
# Base 16 decoding operating on string->string
def b16decode(s):
return bytesToString(base64.b16decode(stringToBytes(s), True))
# Base 16 decoding operating on string->string
def b16encode(s):
return bytesToString(base64.b16encode(stringToBytes(s)))
# Base 64 decoding operating on string->string
def b64decode(s):
return bytesToString(base64.b64decode(stringToBytes(s)))
# Base 64 decoding operating on string->string
def b64encode(s):
return bytesToString(base64.b64encode(stringToBytes(s)))
#
# Gnuplot based display for array-like structures.
#
gnuplotPipe = {}
gnuplotPid = {}
def warn(message):
print("XXX: %s\n" % message.encode("latin1"))
def showException(msg, exType, exValue, exTraceback):
warn("**** CAUGHT EXCEPTION: %s ****" % msg)
try:
import traceback
for line in traceback.format_exception(exType, exValue, exTraceback):
warn("%s" % line)
except:
pass
def stripClassTag(typeName):
if typeName.startswith("class "):
return typeName[6:]
if typeName.startswith("struct "):
return typeName[7:]
if typeName.startswith("const "):
return typeName[6:]
if typeName.startswith("volatile "):
return typeName[9:]
return typeName
class Children:
def __init__(self, d, numChild = 1, childType = None, childNumChild = None,
maxNumChild = None, addrBase = None, addrStep = None):
self.d = d
self.numChild = numChild
self.childNumChild = childNumChild
self.maxNumChild = maxNumChild
self.addrBase = addrBase
self.addrStep = addrStep
self.printsAddress = True
if childType is None:
self.childType = None
else:
self.childType = stripClassTag(str(childType))
self.d.put('childtype="%s",' % self.childType)
if childNumChild is None:
pass
#if self.d.isSimpleType(childType):
# self.d.put('childnumchild="0",')
# self.childNumChild = 0
#elif childType.code == PointerCode:
# self.d.put('childnumchild="1",')
# self.childNumChild = 1
else:
self.d.put('childnumchild="%s",' % childNumChild)
self.childNumChild = childNumChild
try:
if not addrBase is None and not addrStep is None:
self.d.put('addrbase="0x%x",' % toInteger(addrBase))
self.d.put('addrstep="0x%x",' % toInteger(addrStep))
self.printsAddress = False
except:
warn("ADDRBASE: %s" % addrBase)
warn("ADDRSTEP: %s" % addrStep)
#warn("CHILDREN: %s %s %s" % (numChild, childType, childNumChild))
def __enter__(self):
self.savedChildType = self.d.currentChildType
self.savedChildNumChild = self.d.currentChildNumChild
self.savedNumChild = self.d.currentNumChild
self.savedMaxNumChild = self.d.currentMaxNumChild
self.savedPrintsAddress = self.d.currentPrintsAddress
self.d.currentChildType = self.childType
self.d.currentChildNumChild = self.childNumChild
self.d.currentNumChild = self.numChild
self.d.currentMaxNumChild = self.maxNumChild
self.d.currentPrintsAddress = self.printsAddress
self.d.put("children=[")
def __exit__(self, exType, exValue, exTraceBack):
if not exType is None:
if self.d.passExceptions:
showException("CHILDREN", exType, exValue, exTraceBack)
self.d.putNumChild(0)
self.d.putValue("<not accessible>")
if not self.d.currentMaxNumChild is None:
if self.d.currentMaxNumChild < self.d.currentNumChild:
self.d.put('{name="<incomplete>",value="",type="",numchild="0"},')
self.d.currentChildType = self.savedChildType
self.d.currentChildNumChild = self.savedChildNumChild
self.d.currentNumChild = self.savedNumChild
self.d.currentMaxNumChild = self.savedMaxNumChild
self.d.currentPrintsAddress = self.savedPrintsAddress
self.d.put('],')
return True
class SubItem:
def __init__(self, d, component):
self.d = d
self.name = component
self.iname = None
def __enter__(self):
self.d.enterSubItem(self)
def __exit__(self, exType, exValue, exTraceBack):
return self.d.exitSubItem(self, exType, exValue, exTraceBack)
class NoAddress:
def __init__(self, d):
self.d = d
def __enter__(self):
self.savedPrintsAddress = self.d.currentPrintsAddress
self.d.currentPrintsAddress = False
def __exit__(self, exType, exValue, exTraceBack):
self.d.currentPrintsAddress = self.savedPrintsAddress
class TopLevelItem(SubItem):
def __init__(self, d, iname):
self.d = d
self.iname = iname
self.name = None
class UnnamedSubItem(SubItem):
def __init__(self, d, component):
self.d = d
self.iname = "%s.%s" % (self.d.currentIName, component)
self.name = None
class DumperBase:
def __init__(self):
self.isCdb = False
self.isGdb = False
self.isLldb = False
# Later set, or not set:
# cachedQtVersion
self.stringCutOff = 10000
# This is a cache mapping from 'type name' to 'display alternatives'.
self.qqFormats = {}
# This is a cache of all known dumpers.
self.qqDumpers = {}
# This is a cache of all dumpers that support writing.
self.qqEditable = {}
# This keeps canonical forms of the typenames, without array indices etc.
self.cachedFormats = {}
def stripForFormat(self, typeName):
if typeName in self.cachedFormats:
return self.cachedFormats[typeName]
stripped = ""
inArray = 0
for c in stripClassTag(typeName):
if c == '<':
break
if c == ' ':
continue
if c == '[':
inArray += 1
elif c == ']':
inArray -= 1
if inArray and ord(c) >= 48 and ord(c) <= 57:
continue
stripped += c
self.cachedFormats[typeName] = stripped
return stripped
def is32bit(self):
return self.ptrSize() == 4
def computeLimit(self, size, limit):
if limit is None:
return size
if limit == 0:
return min(size, self.stringCutOff)
return min(size, limit)
def byteArrayDataHelper(self, addr):
if self.qtVersion() >= 0x050000:
# QTypedArray:
# - QtPrivate::RefCount ref
# - int size
# - uint alloc : 31, capacityReserved : 1
# - qptrdiff offset
size = self.extractInt(addr + 4)
alloc = self.extractInt(addr + 8) & 0x7ffffff
data = addr + self.dereference(addr + 8 + self.ptrSize())
if self.ptrSize() == 4:
data = data & 0xffffffff
else:
data = data & 0xffffffffffffffff
else:
# Data:
# - QBasicAtomicInt ref;
# - int alloc, size;
# - [padding]
# - char *data;
alloc = self.extractInt(addr + 4)
size = self.extractInt(addr + 8)
data = self.dereference(addr + 8 + self.ptrSize())
return data, size, alloc
# addr is the begin of a QByteArrayData structure
def encodeStringHelper(self, addr, limit = 0):
# Should not happen, but we get it with LLDB as result
# of inferior calls
if addr == 0:
return ""
data, size, alloc = self.byteArrayDataHelper(addr)
if alloc != 0:
self.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
limit = self.computeLimit(size, limit)
s = self.readMemory(data, 2 * limit)
if limit < size:
s += "2e002e002e00"
return s
def encodeByteArrayHelper(self, addr, limit = None):
data, size, alloc = self.byteArrayDataHelper(addr)
if alloc != 0:
self.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
limit = self.computeLimit(size, limit)
s = self.readMemory(data, limit)
if limit < size:
s += "2e2e2e"
return s
def encodeByteArray(self, value):
return self.encodeByteArrayHelper(self.dereferenceValue(value))
def byteArrayData(self, value):
return self.byteArrayDataHelper(self.dereferenceValue(value))
def putByteArrayValue(self, value):
return self.putValue(self.encodeByteArray(value), Hex2EncodedLatin1)
def putStringValueByAddress(self, addr):
self.putValue(self.encodeStringHelper(self.dereference(addr)),
Hex4EncodedLittleEndian)
def encodeString(self, value):
return self.encodeStringHelper(self.dereferenceValue(value))
def stringData(self, value):
return self.byteArrayDataHelper(self.dereferenceValue(value))
def extractTemplateArgument(self, typename, position):
level = 0
skipSpace = False
inner = ''
for c in typename[typename.find('<') + 1 : -1]:
if c == '<':
inner += c
level += 1
elif c == '>':
level -= 1
inner += c
elif c == ',':
if level == 0:
if position == 0:
return inner.strip()
position -= 1
inner = ''
else:
inner += c
skipSpace = True
else:
if skipSpace and c == ' ':
pass
else:
inner += c
skipSpace = False
return inner.strip()
def putStringValue(self, value):
return self.putValue(self.encodeString(value), Hex4EncodedLittleEndian)
def putAddressItem(self, name, value, type = ""):
with SubItem(self, name):
self.putValue("0x%x" % value)
self.putType(type)
self.putNumChild(0)
def putIntItem(self, name, value):
with SubItem(self, name):
self.putValue(value)
self.putType("int")
self.putNumChild(0)
def putBoolItem(self, name, value):
with SubItem(self, name):
self.putValue(value)
self.putType("bool")
self.putNumChild(0)
def putGenericItem(self, name, type, value, encoding = None):
with SubItem(self, name):
self.putValue(value, encoding)
self.putType(type)
self.putNumChild(0)
def putMapName(self, value):
ns = self.qtNamespace()
if str(value.type) == ns + "QString":
self.put('key="%s",' % self.encodeString(value))
self.put('keyencoded="%s",' % Hex4EncodedLittleEndian)
elif str(value.type) == ns + "QByteArray":
self.put('key="%s",' % self.encodeByteArray(value))
self.put('keyencoded="%s",' % Hex2EncodedLatin1)
else:
if self.isLldb:
self.put('name="%s",' % value.GetValue())
else:
self.put('name="%s",' % value)
def isMapCompact(self, keyType, valueType):
format = self.currentItemFormat()
if format == 2:
return True # Compact.
return self.isSimpleType(keyType) and self.isSimpleType(valueType)
def check(self, exp):
if not exp:
raise RuntimeError("Check failed")
def checkRef(self, ref):
try:
count = int(ref["atomic"]["_q_value"]) # Qt 5.
minimum = -1
except:
count = int(ref["_q_value"]) # Qt 4.
minimum = 0
# Assume there aren't a million references to any object.
self.check(count >= minimum)
self.check(count < 1000000)
def findFirstZero(self, p, maximum):
for i in xrange(maximum):
if int(p.dereference()) == 0:
return i
p = p + 1
return maximum + 1
def encodeCArray(self, p, innerType, suffix):
t = self.lookupType(innerType)
p = p.cast(t.pointer())
limit = self.findFirstZero(p, self.stringCutOff)
s = self.readMemory(p, limit * t.sizeof)
if limit > self.stringCutOff:
s += suffix
return s
def encodeCharArray(self, p):
return self.encodeCArray(p, "unsigned char", "2e2e2e")
def encodeChar2Array(self, p):
return self.encodeCArray(p, "unsigned short", "2e002e002e00")
def encodeChar4Array(self, p):
return self.encodeCArray(p, "unsigned int", "2e0000002e0000002e000000")
def putItemCount(self, count, maximum = 1000000000):
# This needs to override the default value, so don't use 'put' directly.
if count > maximum:
self.putValue('<>%s items>' % maximum)
else:
self.putValue('<%s items>' % count)
def putNoType(self):
# FIXME: replace with something that does not need special handling
# in SubItem.__exit__().
self.putBetterType(" ")
def putInaccessible(self):
#self.putBetterType(" ")
self.putNumChild(0)
self.currentValue = None
def putQObjectNameValue(self, value):
try:
intSize = self.intSize()
ptrSize = self.ptrSize()
# dd = value["d_ptr"]["d"] is just behind the vtable.
dd = self.dereference(self.addressOf(value) + ptrSize)
if self.qtVersion() < 0x050000:
# Size of QObjectData: 5 pointer + 2 int
# - vtable
# - QObject *q_ptr;
# - QObject *parent;
# - QObjectList children;
# - uint isWidget : 1; etc..
# - int postedEvents;
# - QMetaObject *metaObject;
# Offset of objectName in QObjectPrivate: 5 pointer + 2 int
# - [QObjectData base]
# - QString objectName
objectName = self.dereference(dd + 5 * ptrSize + 2 * intSize)
else:
# Size of QObjectData: 5 pointer + 2 int
# - vtable
# - QObject *q_ptr;
# - QObject *parent;
# - QObjectList children;
# - uint isWidget : 1; etc...
# - int postedEvents;
# - QDynamicMetaObjectData *metaObject;
extra = self.dereference(dd + 5 * ptrSize + 2 * intSize)
if extra == 0:
return False
# Offset of objectName in ExtraData: 6 pointer
# - QVector<QObjectUserData *> userData; only #ifndef QT_NO_USERDATA
# - QList<QByteArray> propertyNames;
# - QList<QVariant> propertyValues;
# - QVector<int> runningTimers;
# - QList<QPointer<QObject> > eventFilters;
# - QString objectName
objectName = self.dereference(extra + 5 * ptrSize)
data, size, alloc = self.byteArrayDataHelper(objectName)
if size == 0:
return False
str = self.readMemory(data, 2 * size)
self.putValue(str, Hex4EncodedLittleEndian, 1)
return True
except:
pass
def isKnownMovableType(self, type):
if type in (
"QBrush", "QBitArray", "QByteArray", "QCustomTypeInfo", "QChar", "QDate",
"QDateTime", "QFileInfo", "QFixed", "QFixedPoint", "QFixedSize",
"QHashDummyValue", "QIcon", "QImage", "QLine", "QLineF", "QLatin1Char",
"QLocale", "QMatrix", "QModelIndex", "QPoint", "QPointF", "QPen",
"QPersistentModelIndex", "QResourceRoot", "QRect", "QRectF", "QRegExp",
"QSize", "QSizeF", "QString", "QTime", "QTextBlock", "QUrl", "QVariant",
"QXmlStreamAttribute", "QXmlStreamNamespaceDeclaration",
"QXmlStreamNotationDeclaration", "QXmlStreamEntityDeclaration"
):
return True
return type == "QStringList" and self.qtVersion() >= 0x050000
def currentItemFormat(self, type = None):
format = self.formats.get(self.currentIName)
if format is None:
if type is None:
type = self.currentType
needle = self.stripForFormat(str(type))
format = self.typeformats.get(needle)
return format
def cleanAddress(addr):
if addr is None:
return "<no address>"
# We cannot use str(addr) as it yields rubbish for char pointers
# that might trigger Unicode encoding errors.
#return addr.cast(lookupType("void").pointer())
# We do not use "hex(...)" as it (sometimes?) adds a "L" suffix.
return "0x%x" % toInteger(addr)
# Some "Enums"
# Encodings. Keep that synchronized with DebuggerEncoding in debuggerprotocol.h
Unencoded8Bit, \
Base64Encoded8BitWithQuotes, \
Base64Encoded16BitWithQuotes, \
Base64Encoded32BitWithQuotes, \
Base64Encoded16Bit, \
Base64Encoded8Bit, \
Hex2EncodedLatin1, \
Hex4EncodedLittleEndian, \
Hex8EncodedLittleEndian, \
Hex2EncodedUtf8, \
Hex8EncodedBigEndian, \
Hex4EncodedBigEndian, \
Hex4EncodedLittleEndianWithoutQuotes, \
Hex2EncodedLocal8Bit, \
JulianDate, \
MillisecondsSinceMidnight, \
JulianDateAndMillisecondsSinceMidnight, \
Hex2EncodedInt1, \
Hex2EncodedInt2, \
Hex2EncodedInt4, \
Hex2EncodedInt8, \
Hex2EncodedUInt1, \
Hex2EncodedUInt2, \
Hex2EncodedUInt4, \
Hex2EncodedUInt8, \
Hex2EncodedFloat4, \
Hex2EncodedFloat8, \
IPv6AddressAndHexScopeId, \
Hex2EncodedUtf8WithoutQuotes, \
MillisecondsSinceEpoch \
= range(30)
# Display modes. Keep that synchronized with DebuggerDisplay in watchutils.h
StopDisplay, \
DisplayImageData, \
DisplayUtf16String, \
DisplayImageFile, \
DisplayProcess, \
DisplayLatin1String, \
DisplayUtf8String \
= range(7)
def mapForms():
return "Normal,Compact"
def arrayForms():
if hasPlot():
return "Normal,Plot"
return "Normal"
| richardmg/qtcreator | share/qtcreator/debugger/dumper.py | Python | lgpl-2.1 | 20,529 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Authors: Chinmaya Pancholi <[email protected]>, Shiva Manne <[email protected]>
# Copyright (C) 2017 RaRe Technologies s.r.o.
"""Learn word representations via fasttext's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_.
Notes
-----
There are more ways to get word vectors in Gensim than just FastText.
See wrappers for VarEmbed and WordRank or Word2Vec
This module allows training a word embedding from a training corpus with the additional ability
to obtain word vectors for out-of-vocabulary words.
For a tutorial on gensim's native fasttext, refer to the noteboook -- [2]_
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) fasttext training**
.. [1] P. Bojanowski, E. Grave, A. Joulin, T. Mikolov
Enriching Word Vectors with Subword Information. In arXiv preprint arXiv:1607.04606.
https://arxiv.org/abs/1607.04606
.. [2] https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/FastText_Tutorial.ipynb
"""
import logging
import numpy as np
from numpy import zeros, ones, vstack, sum as np_sum, empty, float32 as REAL
from gensim.models.word2vec import Word2Vec, train_sg_pair, train_cbow_pair
from gensim.models.wrappers.fasttext import FastTextKeyedVectors
from gensim.models.wrappers.fasttext import FastText as Ft_Wrapper, compute_ngrams, ft_hash
logger = logging.getLogger(__name__)
try:
from gensim.models.fasttext_inner import train_batch_sg, train_batch_cbow
from gensim.models.fasttext_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
logger.debug('Fast version of Fasttext is being used')
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
logger.warning('Slow version of Fasttext is being used')
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
def train_batch_cbow(model, sentences, alpha, work=None, neu1=None):
"""Update CBOW model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from :meth:`gensim.models.fasttext.FastText.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from fasttext_inner instead.
Parameters
----------
model : :class:`~gensim.models.fasttext.FastText`
`FastText` instance.
sentences : iterable of iterables
Iterable of the sentences directly from disk/network.
alpha : float
Learning rate.
work : :class:`numpy.ndarray`
Private working memory for each worker.
neu1 : :class:`numpy.ndarray`
Private working memory for each worker.
Returns
-------
int
Effective number of words trained.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window)
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
word2_subwords = []
vocab_subwords_indices = []
ngrams_subwords_indices = []
for index in word2_indices:
vocab_subwords_indices += [index]
word2_subwords += model.wv.ngrams_word[model.wv.index2word[index]]
for subword in word2_subwords:
ngrams_subwords_indices.append(model.wv.ngrams[subword])
l1_vocab = np_sum(model.wv.syn0_vocab[vocab_subwords_indices], axis=0) # 1 x vector_size
l1_ngrams = np_sum(model.wv.syn0_ngrams[ngrams_subwords_indices], axis=0) # 1 x vector_size
l1 = np_sum([l1_vocab, l1_ngrams], axis=0)
subwords_indices = [vocab_subwords_indices] + [ngrams_subwords_indices]
if (subwords_indices[0] or subwords_indices[1]) and model.cbow_mean:
l1 /= (len(subwords_indices[0]) + len(subwords_indices[1]))
# train on the sliding window for target word
train_cbow_pair(model, word, subwords_indices, l1, alpha, is_ft=True)
result += len(word_vocabs)
return result
def train_batch_sg(model, sentences, alpha, work=None, neu1=None):
"""Update skip-gram model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from :meth:`gensim.models.fasttext.FastText.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from fasttext_inner instead.
Parameters
----------
model : :class:`~gensim.models.fasttext.FastText`
`FastText` instance.
sentences : iterable of iterables
Iterable of the sentences directly from disk/network.
alpha : float
Learning rate.
work : :class:`numpy.ndarray`
Private working memory for each worker.
neu1 : :class:`numpy.ndarray`
Private working memory for each worker.
Returns
-------
int
Effective number of words trained.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
subwords_indices = [word.index]
word2_subwords = model.wv.ngrams_word[model.wv.index2word[word.index]]
for subword in word2_subwords:
subwords_indices.append(model.wv.ngrams[subword])
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
if pos2 != pos: # don't train on the `word` itself
train_sg_pair(model, model.wv.index2word[word2.index], subwords_indices, alpha, is_ft=True)
result += len(word_vocabs)
return result
class FastText(Word2Vec):
"""Class for training, using and evaluating word representations learned using method
described in [1]_ aka Fasttext.
The model can be stored/loaded via its :meth:`~gensim.models.fasttext.FastText.save()` and
:meth:`~gensim.models.fasttext.FastText.load()` methods, or loaded in a format compatible with the original
fasttext implementation via :meth:`~gensim.models.fasttext.FastText.load_fasttext_format()`.
"""
def __init__(
self, sentences=None, sg=0, hs=0, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, word_ngrams=1, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0, min_n=3, max_n=6, sorted_vocab=1,
bucket=2000000, trim_rule=None, batch_words=MAX_WORDS_IN_BATCH):
"""Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it
in some other way.
sg : int {1, 0}
Defines the training algorithm. If 1, CBOW is used, otherwise, skip-gram is employed.
size : int
Dimensionality of the feature vectors.
window : int
The maximum distance between the current and predicted word within a sentence.
alpha : float
The initial learning rate.
min_alpha : float
Learning rate will linearly drop to `min_alpha` as training progresses.
seed : int
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
min_count : int
Ignores all words with total frequency lower than this.
max_vocab_size : int
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
workers : int
Use these many worker threads to train the model (=faster training with multicore machines).
hs : int {1,0}
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
negative : int
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
cbow_mean : int {1,0}
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
hashfxn : function
Hash function to use to randomly initialize weights, for increased training reproducibility.
iter : int
Number of iterations (epochs) over the corpus.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
sorted_vocab : int {1,0}
If 1, sort the vocabulary by descending frequency before assigning word indexes.
batch_words : int
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
min_n : int
Min length of char ngrams to be used for training word representations.
max_n : int
Max length of char ngrams to be used for training word representations. Set `max_n` to be
lesser than `min_n` to avoid char ngrams being used.
word_ngrams : int {1,0}
If 1, uses enriches word vectors with subword(ngrams) information.
If 0, this is equivalent to word2vec.
bucket : int
Character ngrams are hashed into a fixed number of buckets, in order to limit the
memory usage of the model. This option specifies the number of buckets used by the model.
Examples
--------
Initialize and train a `FastText` model
>>> from gensim.models import FastText
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = FastText(sentences, min_count=1)
>>> say_vector = model['say'] # get vector for word
>>> of_vector = model['of'] # get vector for out-of-vocab word
"""
# fastText specific params
self.bucket = bucket
self.word_ngrams = word_ngrams
self.min_n = min_n
self.max_n = max_n
if self.word_ngrams <= 1 and self.max_n == 0:
self.bucket = 0
super(FastText, self).__init__(
sentences=sentences, size=size, alpha=alpha, window=window, min_count=min_count,
max_vocab_size=max_vocab_size, sample=sample, seed=seed, workers=workers, min_alpha=min_alpha,
sg=sg, hs=hs, negative=negative, cbow_mean=cbow_mean, hashfxn=hashfxn, iter=iter, null_word=null_word,
trim_rule=trim_rule, sorted_vocab=sorted_vocab, batch_words=batch_words)
def initialize_word_vectors(self):
"""Initializes FastTextKeyedVectors instance to store all vocab/ngram vectors for the model."""
self.wv = FastTextKeyedVectors()
self.wv.min_n = self.min_n
self.wv.max_n = self.max_n
def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000, update=False):
"""Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
keep_raw_vocab : bool
If not true, delete the raw vocabulary after the scaling is done and free up RAM.
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
progress_per : int
Indicates how many words to process before showing/updating the progress.
update: bool
If true, the new words in `sentences` will be added to model's vocab.
Example
-------
Train a model and update vocab for online training
>>> from gensim.models import FastText
>>> sentences_1 = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>> sentences_2 = [["dude", "say", "wazzup!"]]
>>>
>>> model = FastText(min_count=1)
>>> model.build_vocab(sentences_1)
>>> model.train(sentences_1, total_examples=model.corpus_count, epochs=model.iter)
>>> model.build_vocab(sentences_2, update=True)
>>> model.train(sentences_2, total_examples=model.corpus_count, epochs=model.iter)
"""
if update:
if not len(self.wv.vocab):
raise RuntimeError(
"You cannot do an online vocabulary-update of a model which has no prior vocabulary. "
"First build the vocabulary of your model with a corpus "
"before doing an online update.")
self.old_vocab_len = len(self.wv.vocab)
self.old_hash2index_len = len(self.wv.hash2index)
super(FastText, self).build_vocab(
sentences, keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, progress_per=progress_per, update=update)
self.init_ngrams(update=update)
def init_ngrams(self, update=False):
"""Compute ngrams of all words present in vocabulary and stores vectors for only those ngrams.
Vectors for other ngrams are initialized with a random uniform distribution in FastText.
Parameters
----------
update : bool
If True, the new vocab words and their new ngrams word vectors are initialized
with random uniform distribution and updated/added to the existing vocab word and ngram vectors.
"""
if not update:
self.wv.ngrams = {}
self.wv.syn0_vocab = empty((len(self.wv.vocab), self.vector_size), dtype=REAL)
self.syn0_vocab_lockf = ones((len(self.wv.vocab), self.vector_size), dtype=REAL)
self.wv.syn0_ngrams = empty((self.bucket, self.vector_size), dtype=REAL)
self.syn0_ngrams_lockf = ones((self.bucket, self.vector_size), dtype=REAL)
all_ngrams = []
for w, v in self.wv.vocab.items():
self.wv.ngrams_word[w] = compute_ngrams(w, self.min_n, self.max_n)
all_ngrams += self.wv.ngrams_word[w]
all_ngrams = list(set(all_ngrams))
self.num_ngram_vectors = len(all_ngrams)
logger.info("Total number of ngrams is %d", len(all_ngrams))
self.wv.hash2index = {}
ngram_indices = []
new_hash_count = 0
for i, ngram in enumerate(all_ngrams):
ngram_hash = ft_hash(ngram) % self.bucket
if ngram_hash in self.wv.hash2index:
self.wv.ngrams[ngram] = self.wv.hash2index[ngram_hash]
else:
ngram_indices.append(ngram_hash % self.bucket)
self.wv.hash2index[ngram_hash] = new_hash_count
self.wv.ngrams[ngram] = self.wv.hash2index[ngram_hash]
new_hash_count = new_hash_count + 1
self.wv.syn0_ngrams = self.wv.syn0_ngrams.take(ngram_indices, axis=0)
self.syn0_ngrams_lockf = self.syn0_ngrams_lockf.take(ngram_indices, axis=0)
self.reset_ngram_weights()
else:
new_ngrams = []
for w, v in self.wv.vocab.items():
self.wv.ngrams_word[w] = compute_ngrams(w, self.min_n, self.max_n)
new_ngrams += [ng for ng in self.wv.ngrams_word[w] if ng not in self.wv.ngrams]
new_ngrams = list(set(new_ngrams))
logger.info("Number of new ngrams is %d", len(new_ngrams))
new_hash_count = 0
for i, ngram in enumerate(new_ngrams):
ngram_hash = ft_hash(ngram) % self.bucket
if ngram_hash not in self.wv.hash2index:
self.wv.hash2index[ngram_hash] = new_hash_count + self.old_hash2index_len
self.wv.ngrams[ngram] = self.wv.hash2index[ngram_hash]
new_hash_count = new_hash_count + 1
else:
self.wv.ngrams[ngram] = self.wv.hash2index[ngram_hash]
rand_obj = np.random
rand_obj.seed(self.seed)
new_vocab_rows = rand_obj.uniform(
-1.0 / self.vector_size, 1.0 / self.vector_size,
(len(self.wv.vocab) - self.old_vocab_len, self.vector_size)
).astype(REAL)
new_vocab_lockf_rows = ones((len(self.wv.vocab) - self.old_vocab_len, self.vector_size), dtype=REAL)
new_ngram_rows = rand_obj.uniform(
-1.0 / self.vector_size, 1.0 / self.vector_size,
(len(self.wv.hash2index) - self.old_hash2index_len, self.vector_size)
).astype(REAL)
new_ngram_lockf_rows = ones(
(len(self.wv.hash2index) - self.old_hash2index_len,
self.vector_size),
dtype=REAL)
self.wv.syn0_vocab = vstack([self.wv.syn0_vocab, new_vocab_rows])
self.syn0_vocab_lockf = vstack([self.syn0_vocab_lockf, new_vocab_lockf_rows])
self.wv.syn0_ngrams = vstack([self.wv.syn0_ngrams, new_ngram_rows])
self.syn0_ngrams_lockf = vstack([self.syn0_ngrams_lockf, new_ngram_lockf_rows])
def reset_ngram_weights(self):
"""Reset all projection weights to an initial (untrained) state,
but keep the existing vocabulary and their ngrams.
"""
rand_obj = np.random
rand_obj.seed(self.seed)
for index in range(len(self.wv.vocab)):
self.wv.syn0_vocab[index] = rand_obj.uniform(
-1.0 / self.vector_size, 1.0 / self.vector_size, self.vector_size
).astype(REAL)
for index in range(len(self.wv.hash2index)):
self.wv.syn0_ngrams[index] = rand_obj.uniform(
-1.0 / self.vector_size, 1.0 / self.vector_size, self.vector_size
).astype(REAL)
def _do_train_job(self, sentences, alpha, inits):
"""Train a single batch of sentences. Return 2-tuple `(effective word count after
ignoring unknown words and sentence length trimming, total word count)`.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
alpha : float
The current learning rate.
inits : (:class:`numpy.ndarray`, :class:`numpy.ndarray`)
Each worker's private work memory.
Returns
-------
(int, int)
Tuple of (effective word count after ignoring unknown words and sentence length trimming, total word count)
"""
work, neu1 = inits
tally = 0
if self.sg:
tally += train_batch_sg(self, sentences, alpha, work, neu1)
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1)
return tally, self._raw_word_count(sentences)
def train(self, sentences, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None,
word_count=0, queue_factor=2, report_delay=1.0):
"""Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
For FastText, each sentence must be a list of unicode strings. (Subclasses may accept other examples.)
To support linear learning-rate decay from (initial) alpha to min_alpha, and accurate
progress-percentage logging, either total_examples (count of sentences) or total_words (count of
raw words in sentences) **MUST** be provided (if the corpus is the same as was provided to
:meth:`~gensim.models.fasttext.FastText.build_vocab()`, the count of examples in that corpus
will be available in the model's :attr:`corpus_count` property).
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument **MUST** be provided. In the common and recommended case,
where :meth:`~gensim.models.fasttext.FastText.train()` is only called once,
the model's cached `iter` value should be supplied as `epochs` value.
Parameters
----------
sentences : iterable of iterables
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
total_examples : int
Count of sentences.
total_words : int
Count of raw words in sentences.
epochs : int
Number of iterations (epochs) over the corpus.
start_alpha : float
Initial learning rate.
end_alpha : float
Final learning rate. Drops linearly from `start_alpha`.
word_count : int
Count of words already trained. Set this to 0 for the usual
case of training on all words in sentences.
queue_factor : int
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float
Seconds to wait before reporting progress.
Examples
--------
>>> from gensim.models import FastText
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = FastText(min_count=1)
>>> model.build_vocab(sentences)
>>> model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
"""
self.neg_labels = []
if self.negative > 0:
# precompute negative labels optimization for pure-python training
self.neg_labels = zeros(self.negative + 1)
self.neg_labels[0] = 1.
Word2Vec.train(
self, sentences, total_examples=self.corpus_count, epochs=self.iter,
start_alpha=self.alpha, end_alpha=self.min_alpha)
self.get_vocab_word_vecs()
def __getitem__(self, word):
"""Get `word` representations in vector space, as a 1D numpy array.
Parameters
----------
word : str
A single word whose vector needs to be returned.
Returns
-------
:class:`numpy.ndarray`
The word's representations in vector space, as a 1D numpy array.
Raises
------
KeyError
For words with all ngrams absent, a KeyError is raised.
Example
-------
>>> from gensim.models import FastText
>>> from gensim.test.utils import datapath
>>>
>>> trained_model = FastText.load_fasttext_format(datapath('lee_fasttext'))
>>> meow_vector = trained_model['hello'] # get vector for word
"""
return self.word_vec(word)
def get_vocab_word_vecs(self):
"""Calculate vectors for words in vocabulary and stores them in `wv.syn0`."""
for w, v in self.wv.vocab.items():
word_vec = np.copy(self.wv.syn0_vocab[v.index])
ngrams = self.wv.ngrams_word[w]
ngram_weights = self.wv.syn0_ngrams
for ngram in ngrams:
word_vec += ngram_weights[self.wv.ngrams[ngram]]
word_vec /= (len(ngrams) + 1)
self.wv.syn0[v.index] = word_vec
def word_vec(self, word, use_norm=False):
"""Get the word's representations in vector space, as a 1D numpy array.
Parameters
----------
word : str
A single word whose vector needs to be returned.
use_norm : bool
If True, returns normalized vector.
Returns
-------
:class:`numpy.ndarray`
The word's representations in vector space, as a 1D numpy array.
Raises
------
KeyError
For words with all ngrams absent, a KeyError is raised.
Example
-------
>>> from gensim.models import FastText
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = FastText(sentences, min_count=1)
>>> meow_vector = model.word_vec('meow') # get vector for word
"""
return FastTextKeyedVectors.word_vec(self.wv, word, use_norm=use_norm)
@classmethod
def load_fasttext_format(cls, *args, **kwargs):
"""Load a :class:`~gensim.models.fasttext.FastText` model from a format compatible with
the original fasttext implementation.
Parameters
----------
fname : str
Path to the file.
"""
return Ft_Wrapper.load_fasttext_format(*args, **kwargs)
def save(self, *args, **kwargs):
"""Save the model. This saved model can be loaded again using :func:`~gensim.models.fasttext.FastText.load`,
which supports online training and getting vectors for out-of-vocabulary words.
Parameters
----------
fname : str
Path to the file.
"""
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'syn0_vocab_norm', 'syn0_ngrams_norm'])
super(FastText, self).save(*args, **kwargs)
| markroxor/gensim | gensim/models/fasttext.py | Python | lgpl-2.1 | 29,942 |
"""Wrapper functions for Tcl/Tk.
Tkinter provides classes which allow the display, positioning and
control of widgets. Toplevel widgets are Tk and Toplevel. Other
widgets are Frame, Label, Entry, Text, Canvas, Button, Radiobutton,
Checkbutton, Scale, Listbox, Scrollbar, OptionMenu, Spinbox
LabelFrame and PanedWindow.
Properties of the widgets are specified with keyword arguments.
Keyword arguments have the same name as the corresponding resource
under Tk.
Widgets are positioned with one of the geometry managers Place, Pack
or Grid. These managers can be called with methods place, pack, grid
available in every Widget.
Actions are bound to events by resources (e.g. keyword argument
command) or with the method bind.
Example (Hello, World):
import tkinter
from tkinter.constants import *
tk = tkinter.Tk()
frame = tkinter.Frame(tk, relief=RIDGE, borderwidth=2)
frame.pack(fill=BOTH,expand=1)
label = tkinter.Label(frame, text="Hello, World")
label.pack(fill=X, expand=1)
button = tkinter.Button(frame,text="Exit",command=tk.destroy)
button.pack(side=BOTTOM)
tk.mainloop()
"""
import enum
import sys
import _tkinter # If this fails your Python may not be configured for Tk
TclError = _tkinter.TclError
from tkinter.constants import *
import re
wantobjects = 1
TkVersion = float(_tkinter.TK_VERSION)
TclVersion = float(_tkinter.TCL_VERSION)
READABLE = _tkinter.READABLE
WRITABLE = _tkinter.WRITABLE
EXCEPTION = _tkinter.EXCEPTION
_magic_re = re.compile(r'([\\{}])')
_space_re = re.compile(r'([\s])', re.ASCII)
def _join(value):
"""Internal function."""
return ' '.join(map(_stringify, value))
def _stringify(value):
"""Internal function."""
if isinstance(value, (list, tuple)):
if len(value) == 1:
value = _stringify(value[0])
if _magic_re.search(value):
value = '{%s}' % value
else:
value = '{%s}' % _join(value)
else:
value = str(value)
if not value:
value = '{}'
elif _magic_re.search(value):
# add '\' before special characters and spaces
value = _magic_re.sub(r'\\\1', value)
value = value.replace('\n', r'\n')
value = _space_re.sub(r'\\\1', value)
if value[0] == '"':
value = '\\' + value
elif value[0] == '"' or _space_re.search(value):
value = '{%s}' % value
return value
def _flatten(seq):
"""Internal function."""
res = ()
for item in seq:
if isinstance(item, (tuple, list)):
res = res + _flatten(item)
elif item is not None:
res = res + (item,)
return res
try: _flatten = _tkinter._flatten
except AttributeError: pass
def _cnfmerge(cnfs):
"""Internal function."""
if isinstance(cnfs, dict):
return cnfs
elif isinstance(cnfs, (type(None), str)):
return cnfs
else:
cnf = {}
for c in _flatten(cnfs):
try:
cnf.update(c)
except (AttributeError, TypeError) as msg:
print("_cnfmerge: fallback due to:", msg)
for k, v in c.items():
cnf[k] = v
return cnf
try: _cnfmerge = _tkinter._cnfmerge
except AttributeError: pass
def _splitdict(tk, v, cut_minus=True, conv=None):
"""Return a properly formatted dict built from Tcl list pairs.
If cut_minus is True, the supposed '-' prefix will be removed from
keys. If conv is specified, it is used to convert values.
Tcl list is expected to contain an even number of elements.
"""
t = tk.splitlist(v)
if len(t) % 2:
raise RuntimeError('Tcl list representing a dict is expected '
'to contain an even number of elements')
it = iter(t)
dict = {}
for key, value in zip(it, it):
key = str(key)
if cut_minus and key[0] == '-':
key = key[1:]
if conv:
value = conv(value)
dict[key] = value
return dict
class EventType(str, enum.Enum):
KeyPress = '2'
Key = KeyPress,
KeyRelease = '3'
ButtonPress = '4'
Button = ButtonPress,
ButtonRelease = '5'
Motion = '6'
Enter = '7'
Leave = '8'
FocusIn = '9'
FocusOut = '10'
Keymap = '11' # undocumented
Expose = '12'
GraphicsExpose = '13' # undocumented
NoExpose = '14' # undocumented
Visibility = '15'
Create = '16'
Destroy = '17'
Unmap = '18'
Map = '19'
MapRequest = '20'
Reparent = '21'
Configure = '22'
ConfigureRequest = '23'
Gravity = '24'
ResizeRequest = '25'
Circulate = '26'
CirculateRequest = '27'
Property = '28'
SelectionClear = '29' # undocumented
SelectionRequest = '30' # undocumented
Selection = '31' # undocumented
Colormap = '32'
ClientMessage = '33' # undocumented
Mapping = '34' # undocumented
VirtualEvent = '35', # undocumented
Activate = '36',
Deactivate = '37',
MouseWheel = '38',
def __str__(self):
return self.name
class Event:
"""Container for the properties of an event.
Instances of this type are generated if one of the following events occurs:
KeyPress, KeyRelease - for keyboard events
ButtonPress, ButtonRelease, Motion, Enter, Leave, MouseWheel - for mouse events
Visibility, Unmap, Map, Expose, FocusIn, FocusOut, Circulate,
Colormap, Gravity, Reparent, Property, Destroy, Activate,
Deactivate - for window events.
If a callback function for one of these events is registered
using bind, bind_all, bind_class, or tag_bind, the callback is
called with an Event as first argument. It will have the
following attributes (in braces are the event types for which
the attribute is valid):
serial - serial number of event
num - mouse button pressed (ButtonPress, ButtonRelease)
focus - whether the window has the focus (Enter, Leave)
height - height of the exposed window (Configure, Expose)
width - width of the exposed window (Configure, Expose)
keycode - keycode of the pressed key (KeyPress, KeyRelease)
state - state of the event as a number (ButtonPress, ButtonRelease,
Enter, KeyPress, KeyRelease,
Leave, Motion)
state - state as a string (Visibility)
time - when the event occurred
x - x-position of the mouse
y - y-position of the mouse
x_root - x-position of the mouse on the screen
(ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
y_root - y-position of the mouse on the screen
(ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
char - pressed character (KeyPress, KeyRelease)
send_event - see X/Windows documentation
keysym - keysym of the event as a string (KeyPress, KeyRelease)
keysym_num - keysym of the event as a number (KeyPress, KeyRelease)
type - type of the event as a number
widget - widget in which the event occurred
delta - delta of wheel movement (MouseWheel)
"""
def __repr__(self):
attrs = {k: v for k, v in self.__dict__.items() if v != '??'}
if not self.char:
del attrs['char']
elif self.char != '??':
attrs['char'] = repr(self.char)
if not getattr(self, 'send_event', True):
del attrs['send_event']
if self.state == 0:
del attrs['state']
elif isinstance(self.state, int):
state = self.state
mods = ('Shift', 'Lock', 'Control',
'Mod1', 'Mod2', 'Mod3', 'Mod4', 'Mod5',
'Button1', 'Button2', 'Button3', 'Button4', 'Button5')
s = []
for i, n in enumerate(mods):
if state & (1 << i):
s.append(n)
state = state & ~((1<< len(mods)) - 1)
if state or not s:
s.append(hex(state))
attrs['state'] = '|'.join(s)
if self.delta == 0:
del attrs['delta']
# widget usually is known
# serial and time are not very interesting
# keysym_num duplicates keysym
# x_root and y_root mostly duplicate x and y
keys = ('send_event',
'state', 'keysym', 'keycode', 'char',
'num', 'delta', 'focus',
'x', 'y', 'width', 'height')
return '<%s event%s>' % (
self.type,
''.join(' %s=%s' % (k, attrs[k]) for k in keys if k in attrs)
)
_support_default_root = 1
_default_root = None
def NoDefaultRoot():
"""Inhibit setting of default root window.
Call this function to inhibit that the first instance of
Tk is used for windows without an explicit parent window.
"""
global _support_default_root
_support_default_root = 0
global _default_root
_default_root = None
del _default_root
def _tkerror(err):
"""Internal function."""
pass
def _exit(code=0):
"""Internal function. Calling it will raise the exception SystemExit."""
try:
code = int(code)
except ValueError:
pass
raise SystemExit(code)
_varnum = 0
class Variable:
"""Class to define value holders for e.g. buttons.
Subclasses StringVar, IntVar, DoubleVar, BooleanVar are specializations
that constrain the type of the value returned from get()."""
_default = ""
_tk = None
_tclCommands = None
def __init__(self, master=None, value=None, name=None):
"""Construct a variable
MASTER can be given as master widget.
VALUE is an optional value (defaults to "")
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
# check for type of NAME parameter to override weird error message
# raised from Modules/_tkinter.c:SetVar like:
# TypeError: setvar() takes exactly 3 arguments (2 given)
if name is not None and not isinstance(name, str):
raise TypeError("name must be a string")
global _varnum
if not master:
master = _default_root
self._root = master._root()
self._tk = master.tk
if name:
self._name = name
else:
self._name = 'PY_VAR' + repr(_varnum)
_varnum += 1
if value is not None:
self.initialize(value)
elif not self._tk.getboolean(self._tk.call("info", "exists", self._name)):
self.initialize(self._default)
def __del__(self):
"""Unset the variable in Tcl."""
if self._tk is None:
return
if self._tk.getboolean(self._tk.call("info", "exists", self._name)):
self._tk.globalunsetvar(self._name)
if self._tclCommands is not None:
for name in self._tclCommands:
#print '- Tkinter: deleted command', name
self._tk.deletecommand(name)
self._tclCommands = None
def __str__(self):
"""Return the name of the variable in Tcl."""
return self._name
def set(self, value):
"""Set the variable to VALUE."""
return self._tk.globalsetvar(self._name, value)
initialize = set
def get(self):
"""Return value of variable."""
return self._tk.globalgetvar(self._name)
def _register(self, callback):
f = CallWrapper(callback, None, self._root).__call__
cbname = repr(id(f))
try:
callback = callback.__func__
except AttributeError:
pass
try:
cbname = cbname + callback.__name__
except AttributeError:
pass
self._tk.createcommand(cbname, f)
if self._tclCommands is None:
self._tclCommands = []
self._tclCommands.append(cbname)
return cbname
def trace_add(self, mode, callback):
"""Define a trace callback for the variable.
Mode is one of "read", "write", "unset", or a list or tuple of
such strings.
Callback must be a function which is called when the variable is
read, written or unset.
Return the name of the callback.
"""
cbname = self._register(callback)
self._tk.call('trace', 'add', 'variable',
self._name, mode, (cbname,))
return cbname
def trace_remove(self, mode, cbname):
"""Delete the trace callback for a variable.
Mode is one of "read", "write", "unset" or a list or tuple of
such strings. Must be same as were specified in trace_add().
cbname is the name of the callback returned from trace_add().
"""
self._tk.call('trace', 'remove', 'variable',
self._name, mode, cbname)
for m, ca in self.trace_info():
if self._tk.splitlist(ca)[0] == cbname:
break
else:
self._tk.deletecommand(cbname)
try:
self._tclCommands.remove(cbname)
except ValueError:
pass
def trace_info(self):
"""Return all trace callback information."""
splitlist = self._tk.splitlist
return [(splitlist(k), v) for k, v in map(splitlist,
splitlist(self._tk.call('trace', 'info', 'variable', self._name)))]
def trace_variable(self, mode, callback):
"""Define a trace callback for the variable.
MODE is one of "r", "w", "u" for read, write, undefine.
CALLBACK must be a function which is called when
the variable is read, written or undefined.
Return the name of the callback.
This deprecated method wraps a deprecated Tcl method that will
likely be removed in the future. Use trace_add() instead.
"""
# TODO: Add deprecation warning
cbname = self._register(callback)
self._tk.call("trace", "variable", self._name, mode, cbname)
return cbname
trace = trace_variable
def trace_vdelete(self, mode, cbname):
"""Delete the trace callback for a variable.
MODE is one of "r", "w", "u" for read, write, undefine.
CBNAME is the name of the callback returned from trace_variable or trace.
This deprecated method wraps a deprecated Tcl method that will
likely be removed in the future. Use trace_remove() instead.
"""
# TODO: Add deprecation warning
self._tk.call("trace", "vdelete", self._name, mode, cbname)
cbname = self._tk.splitlist(cbname)[0]
for m, ca in self.trace_info():
if self._tk.splitlist(ca)[0] == cbname:
break
else:
self._tk.deletecommand(cbname)
try:
self._tclCommands.remove(cbname)
except ValueError:
pass
def trace_vinfo(self):
"""Return all trace callback information.
This deprecated method wraps a deprecated Tcl method that will
likely be removed in the future. Use trace_info() instead.
"""
# TODO: Add deprecation warning
return [self._tk.splitlist(x) for x in self._tk.splitlist(
self._tk.call("trace", "vinfo", self._name))]
def __eq__(self, other):
"""Comparison for equality (==).
Note: if the Variable's master matters to behavior
also compare self._master == other._master
"""
return self.__class__.__name__ == other.__class__.__name__ \
and self._name == other._name
class StringVar(Variable):
"""Value holder for strings variables."""
_default = ""
def __init__(self, master=None, value=None, name=None):
"""Construct a string variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to "")
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return value of variable as string."""
value = self._tk.globalgetvar(self._name)
if isinstance(value, str):
return value
return str(value)
class IntVar(Variable):
"""Value holder for integer variables."""
_default = 0
def __init__(self, master=None, value=None, name=None):
"""Construct an integer variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to 0)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return the value of the variable as an integer."""
value = self._tk.globalgetvar(self._name)
try:
return self._tk.getint(value)
except (TypeError, TclError):
return int(self._tk.getdouble(value))
class DoubleVar(Variable):
"""Value holder for float variables."""
_default = 0.0
def __init__(self, master=None, value=None, name=None):
"""Construct a float variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to 0.0)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return the value of the variable as a float."""
return self._tk.getdouble(self._tk.globalgetvar(self._name))
class BooleanVar(Variable):
"""Value holder for boolean variables."""
_default = False
def __init__(self, master=None, value=None, name=None):
"""Construct a boolean variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to False)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def set(self, value):
"""Set the variable to VALUE."""
return self._tk.globalsetvar(self._name, self._tk.getboolean(value))
initialize = set
def get(self):
"""Return the value of the variable as a bool."""
try:
return self._tk.getboolean(self._tk.globalgetvar(self._name))
except TclError:
raise ValueError("invalid literal for getboolean()")
def mainloop(n=0):
"""Run the main loop of Tcl."""
_default_root.tk.mainloop(n)
getint = int
getdouble = float
def getboolean(s):
"""Convert true and false to integer values 1 and 0."""
try:
return _default_root.tk.getboolean(s)
except TclError:
raise ValueError("invalid literal for getboolean()")
# Methods defined on both toplevel and interior widgets
class Misc:
"""Internal class.
Base class which defines methods common for interior widgets."""
# used for generating child widget names
_last_child_ids = None
# XXX font command?
_tclCommands = None
def destroy(self):
"""Internal function.
Delete all Tcl commands created for
this widget in the Tcl interpreter."""
if self._tclCommands is not None:
for name in self._tclCommands:
#print '- Tkinter: deleted command', name
self.tk.deletecommand(name)
self._tclCommands = None
def deletecommand(self, name):
"""Internal function.
Delete the Tcl command provided in NAME."""
#print '- Tkinter: deleted command', name
self.tk.deletecommand(name)
try:
self._tclCommands.remove(name)
except ValueError:
pass
def tk_strictMotif(self, boolean=None):
"""Set Tcl internal variable, whether the look and feel
should adhere to Motif.
A parameter of 1 means adhere to Motif (e.g. no color
change if mouse passes over slider).
Returns the set value."""
return self.tk.getboolean(self.tk.call(
'set', 'tk_strictMotif', boolean))
def tk_bisque(self):
"""Change the color scheme to light brown as used in Tk 3.6 and before."""
self.tk.call('tk_bisque')
def tk_setPalette(self, *args, **kw):
"""Set a new color scheme for all widget elements.
A single color as argument will cause that all colors of Tk
widget elements are derived from this.
Alternatively several keyword parameters and its associated
colors can be given. The following keywords are valid:
activeBackground, foreground, selectColor,
activeForeground, highlightBackground, selectBackground,
background, highlightColor, selectForeground,
disabledForeground, insertBackground, troughColor."""
self.tk.call(('tk_setPalette',)
+ _flatten(args) + _flatten(list(kw.items())))
def wait_variable(self, name='PY_VAR'):
"""Wait until the variable is modified.
A parameter of type IntVar, StringVar, DoubleVar or
BooleanVar must be given."""
self.tk.call('tkwait', 'variable', name)
waitvar = wait_variable # XXX b/w compat
def wait_window(self, window=None):
"""Wait until a WIDGET is destroyed.
If no parameter is given self is used."""
if window is None:
window = self
self.tk.call('tkwait', 'window', window._w)
def wait_visibility(self, window=None):
"""Wait until the visibility of a WIDGET changes
(e.g. it appears).
If no parameter is given self is used."""
if window is None:
window = self
self.tk.call('tkwait', 'visibility', window._w)
def setvar(self, name='PY_VAR', value='1'):
"""Set Tcl variable NAME to VALUE."""
self.tk.setvar(name, value)
def getvar(self, name='PY_VAR'):
"""Return value of Tcl variable NAME."""
return self.tk.getvar(name)
def getint(self, s):
try:
return self.tk.getint(s)
except TclError as exc:
raise ValueError(str(exc))
def getdouble(self, s):
try:
return self.tk.getdouble(s)
except TclError as exc:
raise ValueError(str(exc))
def getboolean(self, s):
"""Return a boolean value for Tcl boolean values true and false given as parameter."""
try:
return self.tk.getboolean(s)
except TclError:
raise ValueError("invalid literal for getboolean()")
def focus_set(self):
"""Direct input focus to this widget.
If the application currently does not have the focus
this widget will get the focus if the application gets
the focus through the window manager."""
self.tk.call('focus', self._w)
focus = focus_set # XXX b/w compat?
def focus_force(self):
"""Direct input focus to this widget even if the
application does not have the focus. Use with
caution!"""
self.tk.call('focus', '-force', self._w)
def focus_get(self):
"""Return the widget which has currently the focus in the
application.
Use focus_displayof to allow working with several
displays. Return None if application does not have
the focus."""
name = self.tk.call('focus')
if name == 'none' or not name: return None
return self._nametowidget(name)
def focus_displayof(self):
"""Return the widget which has currently the focus on the
display where this widget is located.
Return None if the application does not have the focus."""
name = self.tk.call('focus', '-displayof', self._w)
if name == 'none' or not name: return None
return self._nametowidget(name)
def focus_lastfor(self):
"""Return the widget which would have the focus if top level
for this widget gets the focus from the window manager."""
name = self.tk.call('focus', '-lastfor', self._w)
if name == 'none' or not name: return None
return self._nametowidget(name)
def tk_focusFollowsMouse(self):
"""The widget under mouse will get automatically focus. Can not
be disabled easily."""
self.tk.call('tk_focusFollowsMouse')
def tk_focusNext(self):
"""Return the next widget in the focus order which follows
widget which has currently the focus.
The focus order first goes to the next child, then to
the children of the child recursively and then to the
next sibling which is higher in the stacking order. A
widget is omitted if it has the takefocus resource set
to 0."""
name = self.tk.call('tk_focusNext', self._w)
if not name: return None
return self._nametowidget(name)
def tk_focusPrev(self):
"""Return previous widget in the focus order. See tk_focusNext for details."""
name = self.tk.call('tk_focusPrev', self._w)
if not name: return None
return self._nametowidget(name)
def after(self, ms, func=None, *args):
"""Call function once after given time.
MS specifies the time in milliseconds. FUNC gives the
function which shall be called. Additional parameters
are given as parameters to the function call. Return
identifier to cancel scheduling with after_cancel."""
if not func:
# I'd rather use time.sleep(ms*0.001)
self.tk.call('after', ms)
return None
else:
def callit():
try:
func(*args)
finally:
try:
self.deletecommand(name)
except TclError:
pass
callit.__name__ = func.__name__
name = self._register(callit)
return self.tk.call('after', ms, name)
def after_idle(self, func, *args):
"""Call FUNC once if the Tcl main loop has no event to
process.
Return an identifier to cancel the scheduling with
after_cancel."""
return self.after('idle', func, *args)
def after_cancel(self, id):
"""Cancel scheduling of function identified with ID.
Identifier returned by after or after_idle must be
given as first parameter.
"""
if not id:
raise ValueError('id must be a valid identifier returned from '
'after or after_idle')
try:
data = self.tk.call('after', 'info', id)
script = self.tk.splitlist(data)[0]
self.deletecommand(script)
except TclError:
pass
self.tk.call('after', 'cancel', id)
def bell(self, displayof=0):
"""Ring a display's bell."""
self.tk.call(('bell',) + self._displayof(displayof))
# Clipboard handling:
def clipboard_get(self, **kw):
"""Retrieve data from the clipboard on window's display.
The window keyword defaults to the root window of the Tkinter
application.
The type keyword specifies the form in which the data is
to be returned and should be an atom name such as STRING
or FILE_NAME. Type defaults to STRING, except on X11, where the default
is to try UTF8_STRING and fall back to STRING.
This command is equivalent to:
selection_get(CLIPBOARD)
"""
if 'type' not in kw and self._windowingsystem == 'x11':
try:
kw['type'] = 'UTF8_STRING'
return self.tk.call(('clipboard', 'get') + self._options(kw))
except TclError:
del kw['type']
return self.tk.call(('clipboard', 'get') + self._options(kw))
def clipboard_clear(self, **kw):
"""Clear the data in the Tk clipboard.
A widget specified for the optional displayof keyword
argument specifies the target display."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('clipboard', 'clear') + self._options(kw))
def clipboard_append(self, string, **kw):
"""Append STRING to the Tk clipboard.
A widget specified at the optional displayof keyword
argument specifies the target display. The clipboard
can be retrieved with selection_get."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('clipboard', 'append') + self._options(kw)
+ ('--', string))
# XXX grab current w/o window argument
def grab_current(self):
"""Return widget which has currently the grab in this application
or None."""
name = self.tk.call('grab', 'current', self._w)
if not name: return None
return self._nametowidget(name)
def grab_release(self):
"""Release grab for this widget if currently set."""
self.tk.call('grab', 'release', self._w)
def grab_set(self):
"""Set grab for this widget.
A grab directs all events to this and descendant
widgets in the application."""
self.tk.call('grab', 'set', self._w)
def grab_set_global(self):
"""Set global grab for this widget.
A global grab directs all events to this and
descendant widgets on the display. Use with caution -
other applications do not get events anymore."""
self.tk.call('grab', 'set', '-global', self._w)
def grab_status(self):
"""Return None, "local" or "global" if this widget has
no, a local or a global grab."""
status = self.tk.call('grab', 'status', self._w)
if status == 'none': status = None
return status
def option_add(self, pattern, value, priority = None):
"""Set a VALUE (second parameter) for an option
PATTERN (first parameter).
An optional third parameter gives the numeric priority
(defaults to 80)."""
self.tk.call('option', 'add', pattern, value, priority)
def option_clear(self):
"""Clear the option database.
It will be reloaded if option_add is called."""
self.tk.call('option', 'clear')
def option_get(self, name, className):
"""Return the value for an option NAME for this widget
with CLASSNAME.
Values with higher priority override lower values."""
return self.tk.call('option', 'get', self._w, name, className)
def option_readfile(self, fileName, priority = None):
"""Read file FILENAME into the option database.
An optional second parameter gives the numeric
priority."""
self.tk.call('option', 'readfile', fileName, priority)
def selection_clear(self, **kw):
"""Clear the current X selection."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('selection', 'clear') + self._options(kw))
def selection_get(self, **kw):
"""Return the contents of the current X selection.
A keyword parameter selection specifies the name of
the selection and defaults to PRIMARY. A keyword
parameter displayof specifies a widget on the display
to use. A keyword parameter type specifies the form of data to be
fetched, defaulting to STRING except on X11, where UTF8_STRING is tried
before STRING."""
if 'displayof' not in kw: kw['displayof'] = self._w
if 'type' not in kw and self._windowingsystem == 'x11':
try:
kw['type'] = 'UTF8_STRING'
return self.tk.call(('selection', 'get') + self._options(kw))
except TclError:
del kw['type']
return self.tk.call(('selection', 'get') + self._options(kw))
def selection_handle(self, command, **kw):
"""Specify a function COMMAND to call if the X
selection owned by this widget is queried by another
application.
This function must return the contents of the
selection. The function will be called with the
arguments OFFSET and LENGTH which allows the chunking
of very long selections. The following keyword
parameters can be provided:
selection - name of the selection (default PRIMARY),
type - type of the selection (e.g. STRING, FILE_NAME)."""
name = self._register(command)
self.tk.call(('selection', 'handle') + self._options(kw)
+ (self._w, name))
def selection_own(self, **kw):
"""Become owner of X selection.
A keyword parameter selection specifies the name of
the selection (default PRIMARY)."""
self.tk.call(('selection', 'own') +
self._options(kw) + (self._w,))
def selection_own_get(self, **kw):
"""Return owner of X selection.
The following keyword parameter can
be provided:
selection - name of the selection (default PRIMARY),
type - type of the selection (e.g. STRING, FILE_NAME)."""
if 'displayof' not in kw: kw['displayof'] = self._w
name = self.tk.call(('selection', 'own') + self._options(kw))
if not name: return None
return self._nametowidget(name)
def send(self, interp, cmd, *args):
"""Send Tcl command CMD to different interpreter INTERP to be executed."""
return self.tk.call(('send', interp, cmd) + args)
def lower(self, belowThis=None):
"""Lower this widget in the stacking order."""
self.tk.call('lower', self._w, belowThis)
def tkraise(self, aboveThis=None):
"""Raise this widget in the stacking order."""
self.tk.call('raise', self._w, aboveThis)
lift = tkraise
def winfo_atom(self, name, displayof=0):
"""Return integer which represents atom NAME."""
args = ('winfo', 'atom') + self._displayof(displayof) + (name,)
return self.tk.getint(self.tk.call(args))
def winfo_atomname(self, id, displayof=0):
"""Return name of atom with identifier ID."""
args = ('winfo', 'atomname') \
+ self._displayof(displayof) + (id,)
return self.tk.call(args)
def winfo_cells(self):
"""Return number of cells in the colormap for this widget."""
return self.tk.getint(
self.tk.call('winfo', 'cells', self._w))
def winfo_children(self):
"""Return a list of all widgets which are children of this widget."""
result = []
for child in self.tk.splitlist(
self.tk.call('winfo', 'children', self._w)):
try:
# Tcl sometimes returns extra windows, e.g. for
# menus; those need to be skipped
result.append(self._nametowidget(child))
except KeyError:
pass
return result
def winfo_class(self):
"""Return window class name of this widget."""
return self.tk.call('winfo', 'class', self._w)
def winfo_colormapfull(self):
"""Return True if at the last color request the colormap was full."""
return self.tk.getboolean(
self.tk.call('winfo', 'colormapfull', self._w))
def winfo_containing(self, rootX, rootY, displayof=0):
"""Return the widget which is at the root coordinates ROOTX, ROOTY."""
args = ('winfo', 'containing') \
+ self._displayof(displayof) + (rootX, rootY)
name = self.tk.call(args)
if not name: return None
return self._nametowidget(name)
def winfo_depth(self):
"""Return the number of bits per pixel."""
return self.tk.getint(self.tk.call('winfo', 'depth', self._w))
def winfo_exists(self):
"""Return true if this widget exists."""
return self.tk.getint(
self.tk.call('winfo', 'exists', self._w))
def winfo_fpixels(self, number):
"""Return the number of pixels for the given distance NUMBER
(e.g. "3c") as float."""
return self.tk.getdouble(self.tk.call(
'winfo', 'fpixels', self._w, number))
def winfo_geometry(self):
"""Return geometry string for this widget in the form "widthxheight+X+Y"."""
return self.tk.call('winfo', 'geometry', self._w)
def winfo_height(self):
"""Return height of this widget."""
return self.tk.getint(
self.tk.call('winfo', 'height', self._w))
def winfo_id(self):
"""Return identifier ID for this widget."""
return int(self.tk.call('winfo', 'id', self._w), 0)
def winfo_interps(self, displayof=0):
"""Return the name of all Tcl interpreters for this display."""
args = ('winfo', 'interps') + self._displayof(displayof)
return self.tk.splitlist(self.tk.call(args))
def winfo_ismapped(self):
"""Return true if this widget is mapped."""
return self.tk.getint(
self.tk.call('winfo', 'ismapped', self._w))
def winfo_manager(self):
"""Return the window manager name for this widget."""
return self.tk.call('winfo', 'manager', self._w)
def winfo_name(self):
"""Return the name of this widget."""
return self.tk.call('winfo', 'name', self._w)
def winfo_parent(self):
"""Return the name of the parent of this widget."""
return self.tk.call('winfo', 'parent', self._w)
def winfo_pathname(self, id, displayof=0):
"""Return the pathname of the widget given by ID."""
args = ('winfo', 'pathname') \
+ self._displayof(displayof) + (id,)
return self.tk.call(args)
def winfo_pixels(self, number):
"""Rounded integer value of winfo_fpixels."""
return self.tk.getint(
self.tk.call('winfo', 'pixels', self._w, number))
def winfo_pointerx(self):
"""Return the x coordinate of the pointer on the root window."""
return self.tk.getint(
self.tk.call('winfo', 'pointerx', self._w))
def winfo_pointerxy(self):
"""Return a tuple of x and y coordinates of the pointer on the root window."""
return self._getints(
self.tk.call('winfo', 'pointerxy', self._w))
def winfo_pointery(self):
"""Return the y coordinate of the pointer on the root window."""
return self.tk.getint(
self.tk.call('winfo', 'pointery', self._w))
def winfo_reqheight(self):
"""Return requested height of this widget."""
return self.tk.getint(
self.tk.call('winfo', 'reqheight', self._w))
def winfo_reqwidth(self):
"""Return requested width of this widget."""
return self.tk.getint(
self.tk.call('winfo', 'reqwidth', self._w))
def winfo_rgb(self, color):
"""Return tuple of decimal values for red, green, blue for
COLOR in this widget."""
return self._getints(
self.tk.call('winfo', 'rgb', self._w, color))
def winfo_rootx(self):
"""Return x coordinate of upper left corner of this widget on the
root window."""
return self.tk.getint(
self.tk.call('winfo', 'rootx', self._w))
def winfo_rooty(self):
"""Return y coordinate of upper left corner of this widget on the
root window."""
return self.tk.getint(
self.tk.call('winfo', 'rooty', self._w))
def winfo_screen(self):
"""Return the screen name of this widget."""
return self.tk.call('winfo', 'screen', self._w)
def winfo_screencells(self):
"""Return the number of the cells in the colormap of the screen
of this widget."""
return self.tk.getint(
self.tk.call('winfo', 'screencells', self._w))
def winfo_screendepth(self):
"""Return the number of bits per pixel of the root window of the
screen of this widget."""
return self.tk.getint(
self.tk.call('winfo', 'screendepth', self._w))
def winfo_screenheight(self):
"""Return the number of pixels of the height of the screen of this widget
in pixel."""
return self.tk.getint(
self.tk.call('winfo', 'screenheight', self._w))
def winfo_screenmmheight(self):
"""Return the number of pixels of the height of the screen of
this widget in mm."""
return self.tk.getint(
self.tk.call('winfo', 'screenmmheight', self._w))
def winfo_screenmmwidth(self):
"""Return the number of pixels of the width of the screen of
this widget in mm."""
return self.tk.getint(
self.tk.call('winfo', 'screenmmwidth', self._w))
def winfo_screenvisual(self):
"""Return one of the strings directcolor, grayscale, pseudocolor,
staticcolor, staticgray, or truecolor for the default
colormodel of this screen."""
return self.tk.call('winfo', 'screenvisual', self._w)
def winfo_screenwidth(self):
"""Return the number of pixels of the width of the screen of
this widget in pixel."""
return self.tk.getint(
self.tk.call('winfo', 'screenwidth', self._w))
def winfo_server(self):
"""Return information of the X-Server of the screen of this widget in
the form "XmajorRminor vendor vendorVersion"."""
return self.tk.call('winfo', 'server', self._w)
def winfo_toplevel(self):
"""Return the toplevel widget of this widget."""
return self._nametowidget(self.tk.call(
'winfo', 'toplevel', self._w))
def winfo_viewable(self):
"""Return true if the widget and all its higher ancestors are mapped."""
return self.tk.getint(
self.tk.call('winfo', 'viewable', self._w))
def winfo_visual(self):
"""Return one of the strings directcolor, grayscale, pseudocolor,
staticcolor, staticgray, or truecolor for the
colormodel of this widget."""
return self.tk.call('winfo', 'visual', self._w)
def winfo_visualid(self):
"""Return the X identifier for the visual for this widget."""
return self.tk.call('winfo', 'visualid', self._w)
def winfo_visualsavailable(self, includeids=False):
"""Return a list of all visuals available for the screen
of this widget.
Each item in the list consists of a visual name (see winfo_visual), a
depth and if includeids is true is given also the X identifier."""
data = self.tk.call('winfo', 'visualsavailable', self._w,
'includeids' if includeids else None)
data = [self.tk.splitlist(x) for x in self.tk.splitlist(data)]
return [self.__winfo_parseitem(x) for x in data]
def __winfo_parseitem(self, t):
"""Internal function."""
return t[:1] + tuple(map(self.__winfo_getint, t[1:]))
def __winfo_getint(self, x):
"""Internal function."""
return int(x, 0)
def winfo_vrootheight(self):
"""Return the height of the virtual root window associated with this
widget in pixels. If there is no virtual root window return the
height of the screen."""
return self.tk.getint(
self.tk.call('winfo', 'vrootheight', self._w))
def winfo_vrootwidth(self):
"""Return the width of the virtual root window associated with this
widget in pixel. If there is no virtual root window return the
width of the screen."""
return self.tk.getint(
self.tk.call('winfo', 'vrootwidth', self._w))
def winfo_vrootx(self):
"""Return the x offset of the virtual root relative to the root
window of the screen of this widget."""
return self.tk.getint(
self.tk.call('winfo', 'vrootx', self._w))
def winfo_vrooty(self):
"""Return the y offset of the virtual root relative to the root
window of the screen of this widget."""
return self.tk.getint(
self.tk.call('winfo', 'vrooty', self._w))
def winfo_width(self):
"""Return the width of this widget."""
return self.tk.getint(
self.tk.call('winfo', 'width', self._w))
def winfo_x(self):
"""Return the x coordinate of the upper left corner of this widget
in the parent."""
return self.tk.getint(
self.tk.call('winfo', 'x', self._w))
def winfo_y(self):
"""Return the y coordinate of the upper left corner of this widget
in the parent."""
return self.tk.getint(
self.tk.call('winfo', 'y', self._w))
def update(self):
"""Enter event loop until all pending events have been processed by Tcl."""
self.tk.call('update')
def update_idletasks(self):
"""Enter event loop until all idle callbacks have been called. This
will update the display of windows but not process events caused by
the user."""
self.tk.call('update', 'idletasks')
def bindtags(self, tagList=None):
"""Set or get the list of bindtags for this widget.
With no argument return the list of all bindtags associated with
this widget. With a list of strings as argument the bindtags are
set to this list. The bindtags determine in which order events are
processed (see bind)."""
if tagList is None:
return self.tk.splitlist(
self.tk.call('bindtags', self._w))
else:
self.tk.call('bindtags', self._w, tagList)
def _bind(self, what, sequence, func, add, needcleanup=1):
"""Internal function."""
if isinstance(func, str):
self.tk.call(what + (sequence, func))
elif func:
funcid = self._register(func, self._substitute,
needcleanup)
cmd = ('%sif {"[%s %s]" == "break"} break\n'
%
(add and '+' or '',
funcid, self._subst_format_str))
self.tk.call(what + (sequence, cmd))
return funcid
elif sequence:
return self.tk.call(what + (sequence,))
else:
return self.tk.splitlist(self.tk.call(what))
def bind(self, sequence=None, func=None, add=None):
"""Bind to this widget at event SEQUENCE a call to function FUNC.
SEQUENCE is a string of concatenated event
patterns. An event pattern is of the form
<MODIFIER-MODIFIER-TYPE-DETAIL> where MODIFIER is one
of Control, Mod2, M2, Shift, Mod3, M3, Lock, Mod4, M4,
Button1, B1, Mod5, M5 Button2, B2, Meta, M, Button3,
B3, Alt, Button4, B4, Double, Button5, B5 Triple,
Mod1, M1. TYPE is one of Activate, Enter, Map,
ButtonPress, Button, Expose, Motion, ButtonRelease
FocusIn, MouseWheel, Circulate, FocusOut, Property,
Colormap, Gravity Reparent, Configure, KeyPress, Key,
Unmap, Deactivate, KeyRelease Visibility, Destroy,
Leave and DETAIL is the button number for ButtonPress,
ButtonRelease and DETAIL is the Keysym for KeyPress and
KeyRelease. Examples are
<Control-Button-1> for pressing Control and mouse button 1 or
<Alt-A> for pressing A and the Alt key (KeyPress can be omitted).
An event pattern can also be a virtual event of the form
<<AString>> where AString can be arbitrary. This
event can be generated by event_generate.
If events are concatenated they must appear shortly
after each other.
FUNC will be called if the event sequence occurs with an
instance of Event as argument. If the return value of FUNC is
"break" no further bound function is invoked.
An additional boolean parameter ADD specifies whether FUNC will
be called additionally to the other bound function or whether
it will replace the previous function.
Bind will return an identifier to allow deletion of the bound function with
unbind without memory leak.
If FUNC or SEQUENCE is omitted the bound function or list
of bound events are returned."""
return self._bind(('bind', self._w), sequence, func, add)
def unbind(self, sequence, funcid=None):
"""Unbind for this widget for event SEQUENCE the
function identified with FUNCID."""
self.tk.call('bind', self._w, sequence, '')
if funcid:
self.deletecommand(funcid)
def bind_all(self, sequence=None, func=None, add=None):
"""Bind to all widgets at an event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will
be called additionally to the other bound function or whether
it will replace the previous function. See bind for the return value."""
return self._bind(('bind', 'all'), sequence, func, add, 0)
def unbind_all(self, sequence):
"""Unbind for all widgets for event SEQUENCE all functions."""
self.tk.call('bind', 'all' , sequence, '')
def bind_class(self, className, sequence=None, func=None, add=None):
"""Bind to widgets with bindtag CLASSNAME at event
SEQUENCE a call of function FUNC. An additional
boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or
whether it will replace the previous function. See bind for
the return value."""
return self._bind(('bind', className), sequence, func, add, 0)
def unbind_class(self, className, sequence):
"""Unbind for all widgets with bindtag CLASSNAME for event SEQUENCE
all functions."""
self.tk.call('bind', className , sequence, '')
def mainloop(self, n=0):
"""Call the mainloop of Tk."""
self.tk.mainloop(n)
def quit(self):
"""Quit the Tcl interpreter. All widgets will be destroyed."""
self.tk.quit()
def _getints(self, string):
"""Internal function."""
if string:
return tuple(map(self.tk.getint, self.tk.splitlist(string)))
def _getdoubles(self, string):
"""Internal function."""
if string:
return tuple(map(self.tk.getdouble, self.tk.splitlist(string)))
def _getboolean(self, string):
"""Internal function."""
if string:
return self.tk.getboolean(string)
def _displayof(self, displayof):
"""Internal function."""
if displayof:
return ('-displayof', displayof)
if displayof is None:
return ('-displayof', self._w)
return ()
@property
def _windowingsystem(self):
"""Internal function."""
try:
return self._root()._windowingsystem_cached
except AttributeError:
ws = self._root()._windowingsystem_cached = \
self.tk.call('tk', 'windowingsystem')
return ws
def _options(self, cnf, kw = None):
"""Internal function."""
if kw:
cnf = _cnfmerge((cnf, kw))
else:
cnf = _cnfmerge(cnf)
res = ()
for k, v in cnf.items():
if v is not None:
if k[-1] == '_': k = k[:-1]
if callable(v):
v = self._register(v)
elif isinstance(v, (tuple, list)):
nv = []
for item in v:
if isinstance(item, int):
nv.append(str(item))
elif isinstance(item, str):
nv.append(_stringify(item))
else:
break
else:
v = ' '.join(nv)
res = res + ('-'+k, v)
return res
def nametowidget(self, name):
"""Return the Tkinter instance of a widget identified by
its Tcl name NAME."""
name = str(name).split('.')
w = self
if not name[0]:
w = w._root()
name = name[1:]
for n in name:
if not n:
break
w = w.children[n]
return w
_nametowidget = nametowidget
def _register(self, func, subst=None, needcleanup=1):
"""Return a newly created Tcl function. If this
function is called, the Python function FUNC will
be executed. An optional function SUBST can
be given which will be executed before FUNC."""
f = CallWrapper(func, subst, self).__call__
name = repr(id(f))
try:
func = func.__func__
except AttributeError:
pass
try:
name = name + func.__name__
except AttributeError:
pass
self.tk.createcommand(name, f)
if needcleanup:
if self._tclCommands is None:
self._tclCommands = []
self._tclCommands.append(name)
return name
register = _register
def _root(self):
"""Internal function."""
w = self
while w.master: w = w.master
return w
_subst_format = ('%#', '%b', '%f', '%h', '%k',
'%s', '%t', '%w', '%x', '%y',
'%A', '%E', '%K', '%N', '%W', '%T', '%X', '%Y', '%D')
_subst_format_str = " ".join(_subst_format)
def _substitute(self, *args):
"""Internal function."""
if len(args) != len(self._subst_format): return args
getboolean = self.tk.getboolean
getint = self.tk.getint
def getint_event(s):
"""Tk changed behavior in 8.4.2, returning "??" rather more often."""
try:
return getint(s)
except (ValueError, TclError):
return s
nsign, b, f, h, k, s, t, w, x, y, A, E, K, N, W, T, X, Y, D = args
# Missing: (a, c, d, m, o, v, B, R)
e = Event()
# serial field: valid for all events
# number of button: ButtonPress and ButtonRelease events only
# height field: Configure, ConfigureRequest, Create,
# ResizeRequest, and Expose events only
# keycode field: KeyPress and KeyRelease events only
# time field: "valid for events that contain a time field"
# width field: Configure, ConfigureRequest, Create, ResizeRequest,
# and Expose events only
# x field: "valid for events that contain an x field"
# y field: "valid for events that contain a y field"
# keysym as decimal: KeyPress and KeyRelease events only
# x_root, y_root fields: ButtonPress, ButtonRelease, KeyPress,
# KeyRelease, and Motion events
e.serial = getint(nsign)
e.num = getint_event(b)
try: e.focus = getboolean(f)
except TclError: pass
e.height = getint_event(h)
e.keycode = getint_event(k)
e.state = getint_event(s)
e.time = getint_event(t)
e.width = getint_event(w)
e.x = getint_event(x)
e.y = getint_event(y)
e.char = A
try: e.send_event = getboolean(E)
except TclError: pass
e.keysym = K
e.keysym_num = getint_event(N)
try:
e.type = EventType(T)
except ValueError:
e.type = T
try:
e.widget = self._nametowidget(W)
except KeyError:
e.widget = W
e.x_root = getint_event(X)
e.y_root = getint_event(Y)
try:
e.delta = getint(D)
except (ValueError, TclError):
e.delta = 0
return (e,)
def _report_exception(self):
"""Internal function."""
exc, val, tb = sys.exc_info()
root = self._root()
root.report_callback_exception(exc, val, tb)
def _getconfigure(self, *args):
"""Call Tcl configure command and return the result as a dict."""
cnf = {}
for x in self.tk.splitlist(self.tk.call(*args)):
x = self.tk.splitlist(x)
cnf[x[0][1:]] = (x[0][1:],) + x[1:]
return cnf
def _getconfigure1(self, *args):
x = self.tk.splitlist(self.tk.call(*args))
return (x[0][1:],) + x[1:]
def _configure(self, cmd, cnf, kw):
"""Internal function."""
if kw:
cnf = _cnfmerge((cnf, kw))
elif cnf:
cnf = _cnfmerge(cnf)
if cnf is None:
return self._getconfigure(_flatten((self._w, cmd)))
if isinstance(cnf, str):
return self._getconfigure1(_flatten((self._w, cmd, '-'+cnf)))
self.tk.call(_flatten((self._w, cmd)) + self._options(cnf))
# These used to be defined in Widget:
def configure(self, cnf=None, **kw):
"""Configure resources of a widget.
The values for resources are specified as keyword
arguments. To get an overview about
the allowed keyword arguments call the method keys.
"""
return self._configure('configure', cnf, kw)
config = configure
def cget(self, key):
"""Return the resource value for a KEY given as string."""
return self.tk.call(self._w, 'cget', '-' + key)
__getitem__ = cget
def __setitem__(self, key, value):
self.configure({key: value})
def keys(self):
"""Return a list of all resource names of this widget."""
splitlist = self.tk.splitlist
return [splitlist(x)[0][1:] for x in
splitlist(self.tk.call(self._w, 'configure'))]
def __str__(self):
"""Return the window path name of this widget."""
return self._w
def __repr__(self):
return '<%s.%s object %s>' % (
self.__class__.__module__, self.__class__.__qualname__, self._w)
# Pack methods that apply to the master
_noarg_ = ['_noarg_']
def pack_propagate(self, flag=_noarg_):
"""Set or get the status for propagation of geometry information.
A boolean argument specifies whether the geometry information
of the slaves will determine the size of this widget. If no argument
is given the current setting will be returned.
"""
if flag is Misc._noarg_:
return self._getboolean(self.tk.call(
'pack', 'propagate', self._w))
else:
self.tk.call('pack', 'propagate', self._w, flag)
propagate = pack_propagate
def pack_slaves(self):
"""Return a list of all slaves of this widget
in its packing order."""
return [self._nametowidget(x) for x in
self.tk.splitlist(
self.tk.call('pack', 'slaves', self._w))]
slaves = pack_slaves
# Place method that applies to the master
def place_slaves(self):
"""Return a list of all slaves of this widget
in its packing order."""
return [self._nametowidget(x) for x in
self.tk.splitlist(
self.tk.call(
'place', 'slaves', self._w))]
# Grid methods that apply to the master
def grid_anchor(self, anchor=None): # new in Tk 8.5
"""The anchor value controls how to place the grid within the
master when no row/column has any weight.
The default anchor is nw."""
self.tk.call('grid', 'anchor', self._w, anchor)
anchor = grid_anchor
def grid_bbox(self, column=None, row=None, col2=None, row2=None):
"""Return a tuple of integer coordinates for the bounding
box of this widget controlled by the geometry manager grid.
If COLUMN, ROW is given the bounding box applies from
the cell with row and column 0 to the specified
cell. If COL2 and ROW2 are given the bounding box
starts at that cell.
The returned integers specify the offset of the upper left
corner in the master widget and the width and height.
"""
args = ('grid', 'bbox', self._w)
if column is not None and row is not None:
args = args + (column, row)
if col2 is not None and row2 is not None:
args = args + (col2, row2)
return self._getints(self.tk.call(*args)) or None
bbox = grid_bbox
def _gridconvvalue(self, value):
if isinstance(value, (str, _tkinter.Tcl_Obj)):
try:
svalue = str(value)
if not svalue:
return None
elif '.' in svalue:
return self.tk.getdouble(svalue)
else:
return self.tk.getint(svalue)
except (ValueError, TclError):
pass
return value
def _grid_configure(self, command, index, cnf, kw):
"""Internal function."""
if isinstance(cnf, str) and not kw:
if cnf[-1:] == '_':
cnf = cnf[:-1]
if cnf[:1] != '-':
cnf = '-'+cnf
options = (cnf,)
else:
options = self._options(cnf, kw)
if not options:
return _splitdict(
self.tk,
self.tk.call('grid', command, self._w, index),
conv=self._gridconvvalue)
res = self.tk.call(
('grid', command, self._w, index)
+ options)
if len(options) == 1:
return self._gridconvvalue(res)
def grid_columnconfigure(self, index, cnf={}, **kw):
"""Configure column INDEX of a grid.
Valid resources are minsize (minimum size of the column),
weight (how much does additional space propagate to this column)
and pad (how much space to let additionally)."""
return self._grid_configure('columnconfigure', index, cnf, kw)
columnconfigure = grid_columnconfigure
def grid_location(self, x, y):
"""Return a tuple of column and row which identify the cell
at which the pixel at position X and Y inside the master
widget is located."""
return self._getints(
self.tk.call(
'grid', 'location', self._w, x, y)) or None
def grid_propagate(self, flag=_noarg_):
"""Set or get the status for propagation of geometry information.
A boolean argument specifies whether the geometry information
of the slaves will determine the size of this widget. If no argument
is given, the current setting will be returned.
"""
if flag is Misc._noarg_:
return self._getboolean(self.tk.call(
'grid', 'propagate', self._w))
else:
self.tk.call('grid', 'propagate', self._w, flag)
def grid_rowconfigure(self, index, cnf={}, **kw):
"""Configure row INDEX of a grid.
Valid resources are minsize (minimum size of the row),
weight (how much does additional space propagate to this row)
and pad (how much space to let additionally)."""
return self._grid_configure('rowconfigure', index, cnf, kw)
rowconfigure = grid_rowconfigure
def grid_size(self):
"""Return a tuple of the number of column and rows in the grid."""
return self._getints(
self.tk.call('grid', 'size', self._w)) or None
size = grid_size
def grid_slaves(self, row=None, column=None):
"""Return a list of all slaves of this widget
in its packing order."""
args = ()
if row is not None:
args = args + ('-row', row)
if column is not None:
args = args + ('-column', column)
return [self._nametowidget(x) for x in
self.tk.splitlist(self.tk.call(
('grid', 'slaves', self._w) + args))]
# Support for the "event" command, new in Tk 4.2.
# By Case Roole.
def event_add(self, virtual, *sequences):
"""Bind a virtual event VIRTUAL (of the form <<Name>>)
to an event SEQUENCE such that the virtual event is triggered
whenever SEQUENCE occurs."""
args = ('event', 'add', virtual) + sequences
self.tk.call(args)
def event_delete(self, virtual, *sequences):
"""Unbind a virtual event VIRTUAL from SEQUENCE."""
args = ('event', 'delete', virtual) + sequences
self.tk.call(args)
def event_generate(self, sequence, **kw):
"""Generate an event SEQUENCE. Additional
keyword arguments specify parameter of the event
(e.g. x, y, rootx, rooty)."""
args = ('event', 'generate', self._w, sequence)
for k, v in kw.items():
args = args + ('-%s' % k, str(v))
self.tk.call(args)
def event_info(self, virtual=None):
"""Return a list of all virtual events or the information
about the SEQUENCE bound to the virtual event VIRTUAL."""
return self.tk.splitlist(
self.tk.call('event', 'info', virtual))
# Image related commands
def image_names(self):
"""Return a list of all existing image names."""
return self.tk.splitlist(self.tk.call('image', 'names'))
def image_types(self):
"""Return a list of all available image types (e.g. photo bitmap)."""
return self.tk.splitlist(self.tk.call('image', 'types'))
class CallWrapper:
"""Internal class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit:
raise
except:
self.widget._report_exception()
class XView:
"""Mix-in class for querying and changing the horizontal position
of a widget's window."""
def xview(self, *args):
"""Query and change the horizontal position of the view."""
res = self.tk.call(self._w, 'xview', *args)
if not args:
return self._getdoubles(res)
def xview_moveto(self, fraction):
"""Adjusts the view in the window so that FRACTION of the
total width of the canvas is off-screen to the left."""
self.tk.call(self._w, 'xview', 'moveto', fraction)
def xview_scroll(self, number, what):
"""Shift the x-view according to NUMBER which is measured in "units"
or "pages" (WHAT)."""
self.tk.call(self._w, 'xview', 'scroll', number, what)
class YView:
"""Mix-in class for querying and changing the vertical position
of a widget's window."""
def yview(self, *args):
"""Query and change the vertical position of the view."""
res = self.tk.call(self._w, 'yview', *args)
if not args:
return self._getdoubles(res)
def yview_moveto(self, fraction):
"""Adjusts the view in the window so that FRACTION of the
total height of the canvas is off-screen to the top."""
self.tk.call(self._w, 'yview', 'moveto', fraction)
def yview_scroll(self, number, what):
"""Shift the y-view according to NUMBER which is measured in
"units" or "pages" (WHAT)."""
self.tk.call(self._w, 'yview', 'scroll', number, what)
class Wm:
"""Provides functions for the communication with the window manager."""
def wm_aspect(self,
minNumer=None, minDenom=None,
maxNumer=None, maxDenom=None):
"""Instruct the window manager to set the aspect ratio (width/height)
of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple
of the actual values if no argument is given."""
return self._getints(
self.tk.call('wm', 'aspect', self._w,
minNumer, minDenom,
maxNumer, maxDenom))
aspect = wm_aspect
def wm_attributes(self, *args):
"""This subcommand returns or sets platform specific attributes
The first form returns a list of the platform specific flags and
their values. The second form returns the value for the specific
option. The third form sets one or more of the values. The values
are as follows:
On Windows, -disabled gets or sets whether the window is in a
disabled state. -toolwindow gets or sets the style of the window
to toolwindow (as defined in the MSDN). -topmost gets or sets
whether this is a topmost window (displays above all other
windows).
On Macintosh, XXXXX
On Unix, there are currently no special attribute values.
"""
args = ('wm', 'attributes', self._w) + args
return self.tk.call(args)
attributes=wm_attributes
def wm_client(self, name=None):
"""Store NAME in WM_CLIENT_MACHINE property of this widget. Return
current value."""
return self.tk.call('wm', 'client', self._w, name)
client = wm_client
def wm_colormapwindows(self, *wlist):
"""Store list of window names (WLIST) into WM_COLORMAPWINDOWS property
of this widget. This list contains windows whose colormaps differ from their
parents. Return current list of widgets if WLIST is empty."""
if len(wlist) > 1:
wlist = (wlist,) # Tk needs a list of windows here
args = ('wm', 'colormapwindows', self._w) + wlist
if wlist:
self.tk.call(args)
else:
return [self._nametowidget(x)
for x in self.tk.splitlist(self.tk.call(args))]
colormapwindows = wm_colormapwindows
def wm_command(self, value=None):
"""Store VALUE in WM_COMMAND property. It is the command
which shall be used to invoke the application. Return current
command if VALUE is None."""
return self.tk.call('wm', 'command', self._w, value)
command = wm_command
def wm_deiconify(self):
"""Deiconify this widget. If it was never mapped it will not be mapped.
On Windows it will raise this widget and give it the focus."""
return self.tk.call('wm', 'deiconify', self._w)
deiconify = wm_deiconify
def wm_focusmodel(self, model=None):
"""Set focus model to MODEL. "active" means that this widget will claim
the focus itself, "passive" means that the window manager shall give
the focus. Return current focus model if MODEL is None."""
return self.tk.call('wm', 'focusmodel', self._w, model)
focusmodel = wm_focusmodel
def wm_forget(self, window): # new in Tk 8.5
"""The window will be unmapped from the screen and will no longer
be managed by wm. toplevel windows will be treated like frame
windows once they are no longer managed by wm, however, the menu
option configuration will be remembered and the menus will return
once the widget is managed again."""
self.tk.call('wm', 'forget', window)
forget = wm_forget
def wm_frame(self):
"""Return identifier for decorative frame of this widget if present."""
return self.tk.call('wm', 'frame', self._w)
frame = wm_frame
def wm_geometry(self, newGeometry=None):
"""Set geometry to NEWGEOMETRY of the form =widthxheight+x+y. Return
current value if None is given."""
return self.tk.call('wm', 'geometry', self._w, newGeometry)
geometry = wm_geometry
def wm_grid(self,
baseWidth=None, baseHeight=None,
widthInc=None, heightInc=None):
"""Instruct the window manager that this widget shall only be
resized on grid boundaries. WIDTHINC and HEIGHTINC are the width and
height of a grid unit in pixels. BASEWIDTH and BASEHEIGHT are the
number of grid units requested in Tk_GeometryRequest."""
return self._getints(self.tk.call(
'wm', 'grid', self._w,
baseWidth, baseHeight, widthInc, heightInc))
grid = wm_grid
def wm_group(self, pathName=None):
"""Set the group leader widgets for related widgets to PATHNAME. Return
the group leader of this widget if None is given."""
return self.tk.call('wm', 'group', self._w, pathName)
group = wm_group
def wm_iconbitmap(self, bitmap=None, default=None):
"""Set bitmap for the iconified widget to BITMAP. Return
the bitmap if None is given.
Under Windows, the DEFAULT parameter can be used to set the icon
for the widget and any descendents that don't have an icon set
explicitly. DEFAULT can be the relative path to a .ico file
(example: root.iconbitmap(default='myicon.ico') ). See Tk
documentation for more information."""
if default:
return self.tk.call('wm', 'iconbitmap', self._w, '-default', default)
else:
return self.tk.call('wm', 'iconbitmap', self._w, bitmap)
iconbitmap = wm_iconbitmap
def wm_iconify(self):
"""Display widget as icon."""
return self.tk.call('wm', 'iconify', self._w)
iconify = wm_iconify
def wm_iconmask(self, bitmap=None):
"""Set mask for the icon bitmap of this widget. Return the
mask if None is given."""
return self.tk.call('wm', 'iconmask', self._w, bitmap)
iconmask = wm_iconmask
def wm_iconname(self, newName=None):
"""Set the name of the icon for this widget. Return the name if
None is given."""
return self.tk.call('wm', 'iconname', self._w, newName)
iconname = wm_iconname
def wm_iconphoto(self, default=False, *args): # new in Tk 8.5
"""Sets the titlebar icon for this window based on the named photo
images passed through args. If default is True, this is applied to
all future created toplevels as well.
The data in the images is taken as a snapshot at the time of
invocation. If the images are later changed, this is not reflected
to the titlebar icons. Multiple images are accepted to allow
different images sizes to be provided. The window manager may scale
provided icons to an appropriate size.
On Windows, the images are packed into a Windows icon structure.
This will override an icon specified to wm_iconbitmap, and vice
versa.
On X, the images are arranged into the _NET_WM_ICON X property,
which most modern window managers support. An icon specified by
wm_iconbitmap may exist simultaneously.
On Macintosh, this currently does nothing."""
if default:
self.tk.call('wm', 'iconphoto', self._w, "-default", *args)
else:
self.tk.call('wm', 'iconphoto', self._w, *args)
iconphoto = wm_iconphoto
def wm_iconposition(self, x=None, y=None):
"""Set the position of the icon of this widget to X and Y. Return
a tuple of the current values of X and X if None is given."""
return self._getints(self.tk.call(
'wm', 'iconposition', self._w, x, y))
iconposition = wm_iconposition
def wm_iconwindow(self, pathName=None):
"""Set widget PATHNAME to be displayed instead of icon. Return the current
value if None is given."""
return self.tk.call('wm', 'iconwindow', self._w, pathName)
iconwindow = wm_iconwindow
def wm_manage(self, widget): # new in Tk 8.5
"""The widget specified will become a stand alone top-level window.
The window will be decorated with the window managers title bar,
etc."""
self.tk.call('wm', 'manage', widget)
manage = wm_manage
def wm_maxsize(self, width=None, height=None):
"""Set max WIDTH and HEIGHT for this widget. If the window is gridded
the values are given in grid units. Return the current values if None
is given."""
return self._getints(self.tk.call(
'wm', 'maxsize', self._w, width, height))
maxsize = wm_maxsize
def wm_minsize(self, width=None, height=None):
"""Set min WIDTH and HEIGHT for this widget. If the window is gridded
the values are given in grid units. Return the current values if None
is given."""
return self._getints(self.tk.call(
'wm', 'minsize', self._w, width, height))
minsize = wm_minsize
def wm_overrideredirect(self, boolean=None):
"""Instruct the window manager to ignore this widget
if BOOLEAN is given with 1. Return the current value if None
is given."""
return self._getboolean(self.tk.call(
'wm', 'overrideredirect', self._w, boolean))
overrideredirect = wm_overrideredirect
def wm_positionfrom(self, who=None):
"""Instruct the window manager that the position of this widget shall
be defined by the user if WHO is "user", and by its own policy if WHO is
"program"."""
return self.tk.call('wm', 'positionfrom', self._w, who)
positionfrom = wm_positionfrom
def wm_protocol(self, name=None, func=None):
"""Bind function FUNC to command NAME for this widget.
Return the function bound to NAME if None is given. NAME could be
e.g. "WM_SAVE_YOURSELF" or "WM_DELETE_WINDOW"."""
if callable(func):
command = self._register(func)
else:
command = func
return self.tk.call(
'wm', 'protocol', self._w, name, command)
protocol = wm_protocol
def wm_resizable(self, width=None, height=None):
"""Instruct the window manager whether this width can be resized
in WIDTH or HEIGHT. Both values are boolean values."""
return self.tk.call('wm', 'resizable', self._w, width, height)
resizable = wm_resizable
def wm_sizefrom(self, who=None):
"""Instruct the window manager that the size of this widget shall
be defined by the user if WHO is "user", and by its own policy if WHO is
"program"."""
return self.tk.call('wm', 'sizefrom', self._w, who)
sizefrom = wm_sizefrom
def wm_state(self, newstate=None):
"""Query or set the state of this widget as one of normal, icon,
iconic (see wm_iconwindow), withdrawn, or zoomed (Windows only)."""
return self.tk.call('wm', 'state', self._w, newstate)
state = wm_state
def wm_title(self, string=None):
"""Set the title of this widget."""
return self.tk.call('wm', 'title', self._w, string)
title = wm_title
def wm_transient(self, master=None):
"""Instruct the window manager that this widget is transient
with regard to widget MASTER."""
return self.tk.call('wm', 'transient', self._w, master)
transient = wm_transient
def wm_withdraw(self):
"""Withdraw this widget from the screen such that it is unmapped
and forgotten by the window manager. Re-draw it with wm_deiconify."""
return self.tk.call('wm', 'withdraw', self._w)
withdraw = wm_withdraw
class Tk(Misc, Wm):
"""Toplevel widget of Tk which represents mostly the main window
of an application. It has an associated Tcl interpreter."""
_w = '.'
def __init__(self, screenName=None, baseName=None, className='Tk',
useTk=1, sync=0, use=None):
"""Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will
be created. BASENAME will be used for the identification of the profile file (see
readprofile).
It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME
is the name of the widget class."""
self.master = None
self.children = {}
self._tkloaded = 0
# to avoid recursions in the getattr code in case of failure, we
# ensure that self.tk is always _something_.
self.tk = None
if baseName is None:
import os
baseName = os.path.basename(sys.argv[0])
baseName, ext = os.path.splitext(baseName)
if ext not in ('.py', '.pyc'):
baseName = baseName + ext
interactive = 0
self.tk = _tkinter.create(screenName, baseName, className, interactive, wantobjects, useTk, sync, use)
if useTk:
self._loadtk()
if not sys.flags.ignore_environment:
# Issue #16248: Honor the -E flag to avoid code injection.
self.readprofile(baseName, className)
def loadtk(self):
if not self._tkloaded:
self.tk.loadtk()
self._loadtk()
def _loadtk(self):
self._tkloaded = 1
global _default_root
# Version sanity checks
tk_version = self.tk.getvar('tk_version')
if tk_version != _tkinter.TK_VERSION:
raise RuntimeError("tk.h version (%s) doesn't match libtk.a version (%s)"
% (_tkinter.TK_VERSION, tk_version))
# Under unknown circumstances, tcl_version gets coerced to float
tcl_version = str(self.tk.getvar('tcl_version'))
if tcl_version != _tkinter.TCL_VERSION:
raise RuntimeError("tcl.h version (%s) doesn't match libtcl.a version (%s)" \
% (_tkinter.TCL_VERSION, tcl_version))
# Create and register the tkerror and exit commands
# We need to inline parts of _register here, _ register
# would register differently-named commands.
if self._tclCommands is None:
self._tclCommands = []
self.tk.createcommand('tkerror', _tkerror)
self.tk.createcommand('exit', _exit)
self._tclCommands.append('tkerror')
self._tclCommands.append('exit')
if _support_default_root and not _default_root:
_default_root = self
self.protocol("WM_DELETE_WINDOW", self.destroy)
def destroy(self):
"""Destroy this and all descendants widgets. This will
end the application of this Tcl interpreter."""
for c in list(self.children.values()): c.destroy()
self.tk.call('destroy', self._w)
Misc.destroy(self)
global _default_root
if _support_default_root and _default_root is self:
_default_root = None
def readprofile(self, baseName, className):
"""Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into
the Tcl Interpreter and calls exec on the contents of BASENAME.py and
CLASSNAME.py if such a file exists in the home directory."""
import os
if 'HOME' in os.environ: home = os.environ['HOME']
else: home = os.curdir
class_tcl = os.path.join(home, '.%s.tcl' % className)
class_py = os.path.join(home, '.%s.py' % className)
base_tcl = os.path.join(home, '.%s.tcl' % baseName)
base_py = os.path.join(home, '.%s.py' % baseName)
dir = {'self': self}
exec('from tkinter import *', dir)
if os.path.isfile(class_tcl):
self.tk.call('source', class_tcl)
if os.path.isfile(class_py):
exec(open(class_py).read(), dir)
if os.path.isfile(base_tcl):
self.tk.call('source', base_tcl)
if os.path.isfile(base_py):
exec(open(base_py).read(), dir)
def report_callback_exception(self, exc, val, tb):
"""Report callback exception on sys.stderr.
Applications may want to override this internal function, and
should when sys.stderr is None."""
import traceback
print("Exception in Tkinter callback", file=sys.stderr)
sys.last_type = exc
sys.last_value = val
sys.last_traceback = tb
traceback.print_exception(exc, val, tb)
def __getattr__(self, attr):
"Delegate attribute access to the interpreter object"
return getattr(self.tk, attr)
# Ideally, the classes Pack, Place and Grid disappear, the
# pack/place/grid methods are defined on the Widget class, and
# everybody uses w.pack_whatever(...) instead of Pack.whatever(w,
# ...), with pack(), place() and grid() being short for
# pack_configure(), place_configure() and grid_columnconfigure(), and
# forget() being short for pack_forget(). As a practical matter, I'm
# afraid that there is too much code out there that may be using the
# Pack, Place or Grid class, so I leave them intact -- but only as
# backwards compatibility features. Also note that those methods that
# take a master as argument (e.g. pack_propagate) have been moved to
# the Misc class (which now incorporates all methods common between
# toplevel and interior widgets). Again, for compatibility, these are
# copied into the Pack, Place or Grid class.
def Tcl(screenName=None, baseName=None, className='Tk', useTk=0):
return Tk(screenName, baseName, className, useTk)
class Pack:
"""Geometry manager Pack.
Base class to use the methods pack_* in every widget."""
def pack_configure(self, cnf={}, **kw):
"""Pack a widget in the parent widget. Use as options:
after=widget - pack it after you have packed widget
anchor=NSEW (or subset) - position widget according to
given direction
before=widget - pack it before you will pack widget
expand=bool - expand widget if parent size grows
fill=NONE or X or Y or BOTH - fill widget if widget grows
in=master - use master to contain this widget
in_=master - see 'in' option description
ipadx=amount - add internal padding in x direction
ipady=amount - add internal padding in y direction
padx=amount - add padding in x direction
pady=amount - add padding in y direction
side=TOP or BOTTOM or LEFT or RIGHT - where to add this widget.
"""
self.tk.call(
('pack', 'configure', self._w)
+ self._options(cnf, kw))
pack = configure = config = pack_configure
def pack_forget(self):
"""Unmap this widget and do not use it for the packing order."""
self.tk.call('pack', 'forget', self._w)
forget = pack_forget
def pack_info(self):
"""Return information about the packing options
for this widget."""
d = _splitdict(self.tk, self.tk.call('pack', 'info', self._w))
if 'in' in d:
d['in'] = self.nametowidget(d['in'])
return d
info = pack_info
propagate = pack_propagate = Misc.pack_propagate
slaves = pack_slaves = Misc.pack_slaves
class Place:
"""Geometry manager Place.
Base class to use the methods place_* in every widget."""
def place_configure(self, cnf={}, **kw):
"""Place a widget in the parent widget. Use as options:
in=master - master relative to which the widget is placed
in_=master - see 'in' option description
x=amount - locate anchor of this widget at position x of master
y=amount - locate anchor of this widget at position y of master
relx=amount - locate anchor of this widget between 0.0 and 1.0
relative to width of master (1.0 is right edge)
rely=amount - locate anchor of this widget between 0.0 and 1.0
relative to height of master (1.0 is bottom edge)
anchor=NSEW (or subset) - position anchor according to given direction
width=amount - width of this widget in pixel
height=amount - height of this widget in pixel
relwidth=amount - width of this widget between 0.0 and 1.0
relative to width of master (1.0 is the same width
as the master)
relheight=amount - height of this widget between 0.0 and 1.0
relative to height of master (1.0 is the same
height as the master)
bordermode="inside" or "outside" - whether to take border width of
master widget into account
"""
self.tk.call(
('place', 'configure', self._w)
+ self._options(cnf, kw))
place = configure = config = place_configure
def place_forget(self):
"""Unmap this widget."""
self.tk.call('place', 'forget', self._w)
forget = place_forget
def place_info(self):
"""Return information about the placing options
for this widget."""
d = _splitdict(self.tk, self.tk.call('place', 'info', self._w))
if 'in' in d:
d['in'] = self.nametowidget(d['in'])
return d
info = place_info
slaves = place_slaves = Misc.place_slaves
class Grid:
"""Geometry manager Grid.
Base class to use the methods grid_* in every widget."""
# Thanks to Masazumi Yoshikawa ([email protected])
def grid_configure(self, cnf={}, **kw):
"""Position a widget in the parent widget in a grid. Use as options:
column=number - use cell identified with given column (starting with 0)
columnspan=number - this widget will span several columns
in=master - use master to contain this widget
in_=master - see 'in' option description
ipadx=amount - add internal padding in x direction
ipady=amount - add internal padding in y direction
padx=amount - add padding in x direction
pady=amount - add padding in y direction
row=number - use cell identified with given row (starting with 0)
rowspan=number - this widget will span several rows
sticky=NSEW - if cell is larger on which sides will this
widget stick to the cell boundary
"""
self.tk.call(
('grid', 'configure', self._w)
+ self._options(cnf, kw))
grid = configure = config = grid_configure
bbox = grid_bbox = Misc.grid_bbox
columnconfigure = grid_columnconfigure = Misc.grid_columnconfigure
def grid_forget(self):
"""Unmap this widget."""
self.tk.call('grid', 'forget', self._w)
forget = grid_forget
def grid_remove(self):
"""Unmap this widget but remember the grid options."""
self.tk.call('grid', 'remove', self._w)
def grid_info(self):
"""Return information about the options
for positioning this widget in a grid."""
d = _splitdict(self.tk, self.tk.call('grid', 'info', self._w))
if 'in' in d:
d['in'] = self.nametowidget(d['in'])
return d
info = grid_info
location = grid_location = Misc.grid_location
propagate = grid_propagate = Misc.grid_propagate
rowconfigure = grid_rowconfigure = Misc.grid_rowconfigure
size = grid_size = Misc.grid_size
slaves = grid_slaves = Misc.grid_slaves
class BaseWidget(Misc):
"""Internal class."""
def _setup(self, master, cnf):
"""Internal function. Sets up information about children."""
if _support_default_root:
global _default_root
if not master:
if not _default_root:
_default_root = Tk()
master = _default_root
self.master = master
self.tk = master.tk
name = None
if 'name' in cnf:
name = cnf['name']
del cnf['name']
if not name:
name = self.__class__.__name__.lower()
if master._last_child_ids is None:
master._last_child_ids = {}
count = master._last_child_ids.get(name, 0) + 1
master._last_child_ids[name] = count
if count == 1:
name = '!%s' % (name,)
else:
name = '!%s%d' % (name, count)
self._name = name
if master._w=='.':
self._w = '.' + name
else:
self._w = master._w + '.' + name
self.children = {}
if self._name in self.master.children:
self.master.children[self._name].destroy()
self.master.children[self._name] = self
def __init__(self, master, widgetName, cnf={}, kw={}, extra=()):
"""Construct a widget with the parent widget MASTER, a name WIDGETNAME
and appropriate options."""
if kw:
cnf = _cnfmerge((cnf, kw))
self.widgetName = widgetName
BaseWidget._setup(self, master, cnf)
if self._tclCommands is None:
self._tclCommands = []
classes = [(k, v) for k, v in cnf.items() if isinstance(k, type)]
for k, v in classes:
del cnf[k]
self.tk.call(
(widgetName, self._w) + extra + self._options(cnf))
for k, v in classes:
k.configure(self, v)
def destroy(self):
"""Destroy this and all descendants widgets."""
for c in list(self.children.values()): c.destroy()
self.tk.call('destroy', self._w)
if self._name in self.master.children:
del self.master.children[self._name]
Misc.destroy(self)
def _do(self, name, args=()):
# XXX Obsolete -- better use self.tk.call directly!
return self.tk.call((self._w, name) + args)
class Widget(BaseWidget, Pack, Place, Grid):
"""Internal class.
Base class for a widget which can be positioned with the geometry managers
Pack, Place or Grid."""
pass
class Toplevel(BaseWidget, Wm):
"""Toplevel widget, e.g. for dialogs."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a toplevel widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, class,
colormap, container, cursor, height, highlightbackground,
highlightcolor, highlightthickness, menu, relief, screen, takefocus,
use, visual, width."""
if kw:
cnf = _cnfmerge((cnf, kw))
extra = ()
for wmkey in ['screen', 'class_', 'class', 'visual',
'colormap']:
if wmkey in cnf:
val = cnf[wmkey]
# TBD: a hack needed because some keys
# are not valid as keyword arguments
if wmkey[-1] == '_': opt = '-'+wmkey[:-1]
else: opt = '-'+wmkey
extra = extra + (opt, val)
del cnf[wmkey]
BaseWidget.__init__(self, master, 'toplevel', cnf, {}, extra)
root = self._root()
self.iconname(root.iconname())
self.title(root.title())
self.protocol("WM_DELETE_WINDOW", self.destroy)
class Button(Widget):
"""Button widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a button widget with the parent MASTER.
STANDARD OPTIONS
activebackground, activeforeground, anchor,
background, bitmap, borderwidth, cursor,
disabledforeground, font, foreground
highlightbackground, highlightcolor,
highlightthickness, image, justify,
padx, pady, relief, repeatdelay,
repeatinterval, takefocus, text,
textvariable, underline, wraplength
WIDGET-SPECIFIC OPTIONS
command, compound, default, height,
overrelief, state, width
"""
Widget.__init__(self, master, 'button', cnf, kw)
def flash(self):
"""Flash the button.
This is accomplished by redisplaying
the button several times, alternating between active and
normal colors. At the end of the flash the button is left
in the same normal/active state as when the command was
invoked. This command is ignored if the button's state is
disabled.
"""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Invoke the command associated with the button.
The return value is the return value from the command,
or an empty string if there is no command associated with
the button. This command is ignored if the button's state
is disabled.
"""
return self.tk.call(self._w, 'invoke')
class Canvas(Widget, XView, YView):
"""Canvas widget to display graphical elements like lines or text."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a canvas widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, closeenough,
confine, cursor, height, highlightbackground, highlightcolor,
highlightthickness, insertbackground, insertborderwidth,
insertofftime, insertontime, insertwidth, offset, relief,
scrollregion, selectbackground, selectborderwidth, selectforeground,
state, takefocus, width, xscrollcommand, xscrollincrement,
yscrollcommand, yscrollincrement."""
Widget.__init__(self, master, 'canvas', cnf, kw)
def addtag(self, *args):
"""Internal function."""
self.tk.call((self._w, 'addtag') + args)
def addtag_above(self, newtag, tagOrId):
"""Add tag NEWTAG to all items above TAGORID."""
self.addtag(newtag, 'above', tagOrId)
def addtag_all(self, newtag):
"""Add tag NEWTAG to all items."""
self.addtag(newtag, 'all')
def addtag_below(self, newtag, tagOrId):
"""Add tag NEWTAG to all items below TAGORID."""
self.addtag(newtag, 'below', tagOrId)
def addtag_closest(self, newtag, x, y, halo=None, start=None):
"""Add tag NEWTAG to item which is closest to pixel at X, Y.
If several match take the top-most.
All items closer than HALO are considered overlapping (all are
closests). If START is specified the next below this tag is taken."""
self.addtag(newtag, 'closest', x, y, halo, start)
def addtag_enclosed(self, newtag, x1, y1, x2, y2):
"""Add tag NEWTAG to all items in the rectangle defined
by X1,Y1,X2,Y2."""
self.addtag(newtag, 'enclosed', x1, y1, x2, y2)
def addtag_overlapping(self, newtag, x1, y1, x2, y2):
"""Add tag NEWTAG to all items which overlap the rectangle
defined by X1,Y1,X2,Y2."""
self.addtag(newtag, 'overlapping', x1, y1, x2, y2)
def addtag_withtag(self, newtag, tagOrId):
"""Add tag NEWTAG to all items with TAGORID."""
self.addtag(newtag, 'withtag', tagOrId)
def bbox(self, *args):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
which encloses all items with tags specified as arguments."""
return self._getints(
self.tk.call((self._w, 'bbox') + args)) or None
def tag_unbind(self, tagOrId, sequence, funcid=None):
"""Unbind for all items with TAGORID for event SEQUENCE the
function identified with FUNCID."""
self.tk.call(self._w, 'bind', tagOrId, sequence, '')
if funcid:
self.deletecommand(funcid)
def tag_bind(self, tagOrId, sequence=None, func=None, add=None):
"""Bind to all items with TAGORID at event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or whether it will
replace the previous function. See bind for the return value."""
return self._bind((self._w, 'bind', tagOrId),
sequence, func, add)
def canvasx(self, screenx, gridspacing=None):
"""Return the canvas x coordinate of pixel position SCREENX rounded
to nearest multiple of GRIDSPACING units."""
return self.tk.getdouble(self.tk.call(
self._w, 'canvasx', screenx, gridspacing))
def canvasy(self, screeny, gridspacing=None):
"""Return the canvas y coordinate of pixel position SCREENY rounded
to nearest multiple of GRIDSPACING units."""
return self.tk.getdouble(self.tk.call(
self._w, 'canvasy', screeny, gridspacing))
def coords(self, *args):
"""Return a list of coordinates for the item given in ARGS."""
# XXX Should use _flatten on args
return [self.tk.getdouble(x) for x in
self.tk.splitlist(
self.tk.call((self._w, 'coords') + args))]
def _create(self, itemType, args, kw): # Args: (val, val, ..., cnf={})
"""Internal function."""
args = _flatten(args)
cnf = args[-1]
if isinstance(cnf, (dict, tuple)):
args = args[:-1]
else:
cnf = {}
return self.tk.getint(self.tk.call(
self._w, 'create', itemType,
*(args + self._options(cnf, kw))))
def create_arc(self, *args, **kw):
"""Create arc shaped region with coordinates x1,y1,x2,y2."""
return self._create('arc', args, kw)
def create_bitmap(self, *args, **kw):
"""Create bitmap with coordinates x1,y1."""
return self._create('bitmap', args, kw)
def create_image(self, *args, **kw):
"""Create image item with coordinates x1,y1."""
return self._create('image', args, kw)
def create_line(self, *args, **kw):
"""Create line with coordinates x1,y1,...,xn,yn."""
return self._create('line', args, kw)
def create_oval(self, *args, **kw):
"""Create oval with coordinates x1,y1,x2,y2."""
return self._create('oval', args, kw)
def create_polygon(self, *args, **kw):
"""Create polygon with coordinates x1,y1,...,xn,yn."""
return self._create('polygon', args, kw)
def create_rectangle(self, *args, **kw):
"""Create rectangle with coordinates x1,y1,x2,y2."""
return self._create('rectangle', args, kw)
def create_text(self, *args, **kw):
"""Create text with coordinates x1,y1."""
return self._create('text', args, kw)
def create_window(self, *args, **kw):
"""Create window with coordinates x1,y1,x2,y2."""
return self._create('window', args, kw)
def dchars(self, *args):
"""Delete characters of text items identified by tag or id in ARGS (possibly
several times) from FIRST to LAST character (including)."""
self.tk.call((self._w, 'dchars') + args)
def delete(self, *args):
"""Delete items identified by all tag or ids contained in ARGS."""
self.tk.call((self._w, 'delete') + args)
def dtag(self, *args):
"""Delete tag or id given as last arguments in ARGS from items
identified by first argument in ARGS."""
self.tk.call((self._w, 'dtag') + args)
def find(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'find') + args)) or ()
def find_above(self, tagOrId):
"""Return items above TAGORID."""
return self.find('above', tagOrId)
def find_all(self):
"""Return all items."""
return self.find('all')
def find_below(self, tagOrId):
"""Return all items below TAGORID."""
return self.find('below', tagOrId)
def find_closest(self, x, y, halo=None, start=None):
"""Return item which is closest to pixel at X, Y.
If several match take the top-most.
All items closer than HALO are considered overlapping (all are
closest). If START is specified the next below this tag is taken."""
return self.find('closest', x, y, halo, start)
def find_enclosed(self, x1, y1, x2, y2):
"""Return all items in rectangle defined
by X1,Y1,X2,Y2."""
return self.find('enclosed', x1, y1, x2, y2)
def find_overlapping(self, x1, y1, x2, y2):
"""Return all items which overlap the rectangle
defined by X1,Y1,X2,Y2."""
return self.find('overlapping', x1, y1, x2, y2)
def find_withtag(self, tagOrId):
"""Return all items with TAGORID."""
return self.find('withtag', tagOrId)
def focus(self, *args):
"""Set focus to the first item specified in ARGS."""
return self.tk.call((self._w, 'focus') + args)
def gettags(self, *args):
"""Return tags associated with the first item specified in ARGS."""
return self.tk.splitlist(
self.tk.call((self._w, 'gettags') + args))
def icursor(self, *args):
"""Set cursor at position POS in the item identified by TAGORID.
In ARGS TAGORID must be first."""
self.tk.call((self._w, 'icursor') + args)
def index(self, *args):
"""Return position of cursor as integer in item specified in ARGS."""
return self.tk.getint(self.tk.call((self._w, 'index') + args))
def insert(self, *args):
"""Insert TEXT in item TAGORID at position POS. ARGS must
be TAGORID POS TEXT."""
self.tk.call((self._w, 'insert') + args)
def itemcget(self, tagOrId, option):
"""Return the resource value for an OPTION for item TAGORID."""
return self.tk.call(
(self._w, 'itemcget') + (tagOrId, '-'+option))
def itemconfigure(self, tagOrId, cnf=None, **kw):
"""Configure resources of an item TAGORID.
The values for resources are specified as keyword
arguments. To get an overview about
the allowed keyword arguments call the method without arguments.
"""
return self._configure(('itemconfigure', tagOrId), cnf, kw)
itemconfig = itemconfigure
# lower, tkraise/lift hide Misc.lower, Misc.tkraise/lift,
# so the preferred name for them is tag_lower, tag_raise
# (similar to tag_bind, and similar to the Text widget);
# unfortunately can't delete the old ones yet (maybe in 1.6)
def tag_lower(self, *args):
"""Lower an item TAGORID given in ARGS
(optional below another item)."""
self.tk.call((self._w, 'lower') + args)
lower = tag_lower
def move(self, *args):
"""Move an item TAGORID given in ARGS."""
self.tk.call((self._w, 'move') + args)
def postscript(self, cnf={}, **kw):
"""Print the contents of the canvas to a postscript
file. Valid options: colormap, colormode, file, fontmap,
height, pageanchor, pageheight, pagewidth, pagex, pagey,
rotate, width, x, y."""
return self.tk.call((self._w, 'postscript') +
self._options(cnf, kw))
def tag_raise(self, *args):
"""Raise an item TAGORID given in ARGS
(optional above another item)."""
self.tk.call((self._w, 'raise') + args)
lift = tkraise = tag_raise
def scale(self, *args):
"""Scale item TAGORID with XORIGIN, YORIGIN, XSCALE, YSCALE."""
self.tk.call((self._w, 'scale') + args)
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y, gain=10):
"""Adjust the view of the canvas to GAIN times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y, gain)
def select_adjust(self, tagOrId, index):
"""Adjust the end of the selection near the cursor of an item TAGORID to index."""
self.tk.call(self._w, 'select', 'adjust', tagOrId, index)
def select_clear(self):
"""Clear the selection if it is in this widget."""
self.tk.call(self._w, 'select', 'clear')
def select_from(self, tagOrId, index):
"""Set the fixed end of a selection in item TAGORID to INDEX."""
self.tk.call(self._w, 'select', 'from', tagOrId, index)
def select_item(self):
"""Return the item which has the selection."""
return self.tk.call(self._w, 'select', 'item') or None
def select_to(self, tagOrId, index):
"""Set the variable end of a selection in item TAGORID to INDEX."""
self.tk.call(self._w, 'select', 'to', tagOrId, index)
def type(self, tagOrId):
"""Return the type of the item TAGORID."""
return self.tk.call(self._w, 'type', tagOrId) or None
class Checkbutton(Widget):
"""Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a checkbutton widget with the parent MASTER.
Valid resource names: activebackground, activeforeground, anchor,
background, bd, bg, bitmap, borderwidth, command, cursor,
disabledforeground, fg, font, foreground, height,
highlightbackground, highlightcolor, highlightthickness, image,
indicatoron, justify, offvalue, onvalue, padx, pady, relief,
selectcolor, selectimage, state, takefocus, text, textvariable,
underline, variable, width, wraplength."""
Widget.__init__(self, master, 'checkbutton', cnf, kw)
def deselect(self):
"""Put the button in off-state."""
self.tk.call(self._w, 'deselect')
def flash(self):
"""Flash the button."""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Toggle the button and invoke a command if given as resource."""
return self.tk.call(self._w, 'invoke')
def select(self):
"""Put the button in on-state."""
self.tk.call(self._w, 'select')
def toggle(self):
"""Toggle the button."""
self.tk.call(self._w, 'toggle')
class Entry(Widget, XView):
"""Entry widget which allows displaying simple text."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct an entry widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, cursor,
exportselection, fg, font, foreground, highlightbackground,
highlightcolor, highlightthickness, insertbackground,
insertborderwidth, insertofftime, insertontime, insertwidth,
invalidcommand, invcmd, justify, relief, selectbackground,
selectborderwidth, selectforeground, show, state, takefocus,
textvariable, validate, validatecommand, vcmd, width,
xscrollcommand."""
Widget.__init__(self, master, 'entry', cnf, kw)
def delete(self, first, last=None):
"""Delete text from FIRST to LAST (not included)."""
self.tk.call(self._w, 'delete', first, last)
def get(self):
"""Return the text."""
return self.tk.call(self._w, 'get')
def icursor(self, index):
"""Insert cursor at INDEX."""
self.tk.call(self._w, 'icursor', index)
def index(self, index):
"""Return position of cursor."""
return self.tk.getint(self.tk.call(
self._w, 'index', index))
def insert(self, index, string):
"""Insert STRING at INDEX."""
self.tk.call(self._w, 'insert', index, string)
def scan_mark(self, x):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x)
def scan_dragto(self, x):
"""Adjust the view of the canvas to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x)
def selection_adjust(self, index):
"""Adjust the end of the selection near the cursor to INDEX."""
self.tk.call(self._w, 'selection', 'adjust', index)
select_adjust = selection_adjust
def selection_clear(self):
"""Clear the selection if it is in this widget."""
self.tk.call(self._w, 'selection', 'clear')
select_clear = selection_clear
def selection_from(self, index):
"""Set the fixed end of a selection to INDEX."""
self.tk.call(self._w, 'selection', 'from', index)
select_from = selection_from
def selection_present(self):
"""Return True if there are characters selected in the entry, False
otherwise."""
return self.tk.getboolean(
self.tk.call(self._w, 'selection', 'present'))
select_present = selection_present
def selection_range(self, start, end):
"""Set the selection from START to END (not included)."""
self.tk.call(self._w, 'selection', 'range', start, end)
select_range = selection_range
def selection_to(self, index):
"""Set the variable end of a selection to INDEX."""
self.tk.call(self._w, 'selection', 'to', index)
select_to = selection_to
class Frame(Widget):
"""Frame widget which may contain other widgets and can have a 3D border."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a frame widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, class,
colormap, container, cursor, height, highlightbackground,
highlightcolor, highlightthickness, relief, takefocus, visual, width."""
cnf = _cnfmerge((cnf, kw))
extra = ()
if 'class_' in cnf:
extra = ('-class', cnf['class_'])
del cnf['class_']
elif 'class' in cnf:
extra = ('-class', cnf['class'])
del cnf['class']
Widget.__init__(self, master, 'frame', cnf, {}, extra)
class Label(Widget):
"""Label widget which can display text and bitmaps."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a label widget with the parent MASTER.
STANDARD OPTIONS
activebackground, activeforeground, anchor,
background, bitmap, borderwidth, cursor,
disabledforeground, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, image, justify,
padx, pady, relief, takefocus, text,
textvariable, underline, wraplength
WIDGET-SPECIFIC OPTIONS
height, state, width
"""
Widget.__init__(self, master, 'label', cnf, kw)
class Listbox(Widget, XView, YView):
"""Listbox widget which can display a list of strings."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a listbox widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, cursor,
exportselection, fg, font, foreground, height, highlightbackground,
highlightcolor, highlightthickness, relief, selectbackground,
selectborderwidth, selectforeground, selectmode, setgrid, takefocus,
width, xscrollcommand, yscrollcommand, listvariable."""
Widget.__init__(self, master, 'listbox', cnf, kw)
def activate(self, index):
"""Activate item identified by INDEX."""
self.tk.call(self._w, 'activate', index)
def bbox(self, index):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
which encloses the item identified by the given index."""
return self._getints(self.tk.call(self._w, 'bbox', index)) or None
def curselection(self):
"""Return the indices of currently selected item."""
return self._getints(self.tk.call(self._w, 'curselection')) or ()
def delete(self, first, last=None):
"""Delete items from FIRST to LAST (included)."""
self.tk.call(self._w, 'delete', first, last)
def get(self, first, last=None):
"""Get list of items from FIRST to LAST (included)."""
if last is not None:
return self.tk.splitlist(self.tk.call(
self._w, 'get', first, last))
else:
return self.tk.call(self._w, 'get', first)
def index(self, index):
"""Return index of item identified with INDEX."""
i = self.tk.call(self._w, 'index', index)
if i == 'none': return None
return self.tk.getint(i)
def insert(self, index, *elements):
"""Insert ELEMENTS at INDEX."""
self.tk.call((self._w, 'insert', index) + elements)
def nearest(self, y):
"""Get index of item which is nearest to y coordinate Y."""
return self.tk.getint(self.tk.call(
self._w, 'nearest', y))
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y):
"""Adjust the view of the listbox to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y)
def see(self, index):
"""Scroll such that INDEX is visible."""
self.tk.call(self._w, 'see', index)
def selection_anchor(self, index):
"""Set the fixed end oft the selection to INDEX."""
self.tk.call(self._w, 'selection', 'anchor', index)
select_anchor = selection_anchor
def selection_clear(self, first, last=None):
"""Clear the selection from FIRST to LAST (included)."""
self.tk.call(self._w,
'selection', 'clear', first, last)
select_clear = selection_clear
def selection_includes(self, index):
"""Return True if INDEX is part of the selection."""
return self.tk.getboolean(self.tk.call(
self._w, 'selection', 'includes', index))
select_includes = selection_includes
def selection_set(self, first, last=None):
"""Set the selection from FIRST to LAST (included) without
changing the currently selected elements."""
self.tk.call(self._w, 'selection', 'set', first, last)
select_set = selection_set
def size(self):
"""Return the number of elements in the listbox."""
return self.tk.getint(self.tk.call(self._w, 'size'))
def itemcget(self, index, option):
"""Return the resource value for an ITEM and an OPTION."""
return self.tk.call(
(self._w, 'itemcget') + (index, '-'+option))
def itemconfigure(self, index, cnf=None, **kw):
"""Configure resources of an ITEM.
The values for resources are specified as keyword arguments.
To get an overview about the allowed keyword arguments
call the method without arguments.
Valid resource names: background, bg, foreground, fg,
selectbackground, selectforeground."""
return self._configure(('itemconfigure', index), cnf, kw)
itemconfig = itemconfigure
class Menu(Widget):
"""Menu widget which allows displaying menu bars, pull-down menus and pop-up menus."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct menu widget with the parent MASTER.
Valid resource names: activebackground, activeborderwidth,
activeforeground, background, bd, bg, borderwidth, cursor,
disabledforeground, fg, font, foreground, postcommand, relief,
selectcolor, takefocus, tearoff, tearoffcommand, title, type."""
Widget.__init__(self, master, 'menu', cnf, kw)
def tk_popup(self, x, y, entry=""):
"""Post the menu at position X,Y with entry ENTRY."""
self.tk.call('tk_popup', self._w, x, y, entry)
def activate(self, index):
"""Activate entry at INDEX."""
self.tk.call(self._w, 'activate', index)
def add(self, itemType, cnf={}, **kw):
"""Internal function."""
self.tk.call((self._w, 'add', itemType) +
self._options(cnf, kw))
def add_cascade(self, cnf={}, **kw):
"""Add hierarchical menu item."""
self.add('cascade', cnf or kw)
def add_checkbutton(self, cnf={}, **kw):
"""Add checkbutton menu item."""
self.add('checkbutton', cnf or kw)
def add_command(self, cnf={}, **kw):
"""Add command menu item."""
self.add('command', cnf or kw)
def add_radiobutton(self, cnf={}, **kw):
"""Addd radio menu item."""
self.add('radiobutton', cnf or kw)
def add_separator(self, cnf={}, **kw):
"""Add separator."""
self.add('separator', cnf or kw)
def insert(self, index, itemType, cnf={}, **kw):
"""Internal function."""
self.tk.call((self._w, 'insert', index, itemType) +
self._options(cnf, kw))
def insert_cascade(self, index, cnf={}, **kw):
"""Add hierarchical menu item at INDEX."""
self.insert(index, 'cascade', cnf or kw)
def insert_checkbutton(self, index, cnf={}, **kw):
"""Add checkbutton menu item at INDEX."""
self.insert(index, 'checkbutton', cnf or kw)
def insert_command(self, index, cnf={}, **kw):
"""Add command menu item at INDEX."""
self.insert(index, 'command', cnf or kw)
def insert_radiobutton(self, index, cnf={}, **kw):
"""Addd radio menu item at INDEX."""
self.insert(index, 'radiobutton', cnf or kw)
def insert_separator(self, index, cnf={}, **kw):
"""Add separator at INDEX."""
self.insert(index, 'separator', cnf or kw)
def delete(self, index1, index2=None):
"""Delete menu items between INDEX1 and INDEX2 (included)."""
if index2 is None:
index2 = index1
num_index1, num_index2 = self.index(index1), self.index(index2)
if (num_index1 is None) or (num_index2 is None):
num_index1, num_index2 = 0, -1
for i in range(num_index1, num_index2 + 1):
if 'command' in self.entryconfig(i):
c = str(self.entrycget(i, 'command'))
if c:
self.deletecommand(c)
self.tk.call(self._w, 'delete', index1, index2)
def entrycget(self, index, option):
"""Return the resource value of a menu item for OPTION at INDEX."""
return self.tk.call(self._w, 'entrycget', index, '-' + option)
def entryconfigure(self, index, cnf=None, **kw):
"""Configure a menu item at INDEX."""
return self._configure(('entryconfigure', index), cnf, kw)
entryconfig = entryconfigure
def index(self, index):
"""Return the index of a menu item identified by INDEX."""
i = self.tk.call(self._w, 'index', index)
if i == 'none': return None
return self.tk.getint(i)
def invoke(self, index):
"""Invoke a menu item identified by INDEX and execute
the associated command."""
return self.tk.call(self._w, 'invoke', index)
def post(self, x, y):
"""Display a menu at position X,Y."""
self.tk.call(self._w, 'post', x, y)
def type(self, index):
"""Return the type of the menu item at INDEX."""
return self.tk.call(self._w, 'type', index)
def unpost(self):
"""Unmap a menu."""
self.tk.call(self._w, 'unpost')
def xposition(self, index): # new in Tk 8.5
"""Return the x-position of the leftmost pixel of the menu item
at INDEX."""
return self.tk.getint(self.tk.call(self._w, 'xposition', index))
def yposition(self, index):
"""Return the y-position of the topmost pixel of the menu item at INDEX."""
return self.tk.getint(self.tk.call(
self._w, 'yposition', index))
class Menubutton(Widget):
"""Menubutton widget, obsolete since Tk8.0."""
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'menubutton', cnf, kw)
class Message(Widget):
"""Message widget to display multiline text. Obsolete since Label does it too."""
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'message', cnf, kw)
class Radiobutton(Widget):
"""Radiobutton widget which shows only one of several buttons in on-state."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a radiobutton widget with the parent MASTER.
Valid resource names: activebackground, activeforeground, anchor,
background, bd, bg, bitmap, borderwidth, command, cursor,
disabledforeground, fg, font, foreground, height,
highlightbackground, highlightcolor, highlightthickness, image,
indicatoron, justify, padx, pady, relief, selectcolor, selectimage,
state, takefocus, text, textvariable, underline, value, variable,
width, wraplength."""
Widget.__init__(self, master, 'radiobutton', cnf, kw)
def deselect(self):
"""Put the button in off-state."""
self.tk.call(self._w, 'deselect')
def flash(self):
"""Flash the button."""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Toggle the button and invoke a command if given as resource."""
return self.tk.call(self._w, 'invoke')
def select(self):
"""Put the button in on-state."""
self.tk.call(self._w, 'select')
class Scale(Widget):
"""Scale widget which can display a numerical scale."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a scale widget with the parent MASTER.
Valid resource names: activebackground, background, bigincrement, bd,
bg, borderwidth, command, cursor, digits, fg, font, foreground, from,
highlightbackground, highlightcolor, highlightthickness, label,
length, orient, relief, repeatdelay, repeatinterval, resolution,
showvalue, sliderlength, sliderrelief, state, takefocus,
tickinterval, to, troughcolor, variable, width."""
Widget.__init__(self, master, 'scale', cnf, kw)
def get(self):
"""Get the current value as integer or float."""
value = self.tk.call(self._w, 'get')
try:
return self.tk.getint(value)
except (ValueError, TypeError, TclError):
return self.tk.getdouble(value)
def set(self, value):
"""Set the value to VALUE."""
self.tk.call(self._w, 'set', value)
def coords(self, value=None):
"""Return a tuple (X,Y) of the point along the centerline of the
trough that corresponds to VALUE or the current value if None is
given."""
return self._getints(self.tk.call(self._w, 'coords', value))
def identify(self, x, y):
"""Return where the point X,Y lies. Valid return values are "slider",
"though1" and "though2"."""
return self.tk.call(self._w, 'identify', x, y)
class Scrollbar(Widget):
"""Scrollbar widget which displays a slider at a certain position."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a scrollbar widget with the parent MASTER.
Valid resource names: activebackground, activerelief,
background, bd, bg, borderwidth, command, cursor,
elementborderwidth, highlightbackground,
highlightcolor, highlightthickness, jump, orient,
relief, repeatdelay, repeatinterval, takefocus,
troughcolor, width."""
Widget.__init__(self, master, 'scrollbar', cnf, kw)
def activate(self, index=None):
"""Marks the element indicated by index as active.
The only index values understood by this method are "arrow1",
"slider", or "arrow2". If any other value is specified then no
element of the scrollbar will be active. If index is not specified,
the method returns the name of the element that is currently active,
or None if no element is active."""
return self.tk.call(self._w, 'activate', index) or None
def delta(self, deltax, deltay):
"""Return the fractional change of the scrollbar setting if it
would be moved by DELTAX or DELTAY pixels."""
return self.tk.getdouble(
self.tk.call(self._w, 'delta', deltax, deltay))
def fraction(self, x, y):
"""Return the fractional value which corresponds to a slider
position of X,Y."""
return self.tk.getdouble(self.tk.call(self._w, 'fraction', x, y))
def identify(self, x, y):
"""Return the element under position X,Y as one of
"arrow1","slider","arrow2" or ""."""
return self.tk.call(self._w, 'identify', x, y)
def get(self):
"""Return the current fractional values (upper and lower end)
of the slider position."""
return self._getdoubles(self.tk.call(self._w, 'get'))
def set(self, first, last):
"""Set the fractional values of the slider position (upper and
lower ends as value between 0 and 1)."""
self.tk.call(self._w, 'set', first, last)
class Text(Widget, XView, YView):
"""Text widget which can display text in various forms."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a text widget with the parent MASTER.
STANDARD OPTIONS
background, borderwidth, cursor,
exportselection, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, insertbackground,
insertborderwidth, insertofftime,
insertontime, insertwidth, padx, pady,
relief, selectbackground,
selectborderwidth, selectforeground,
setgrid, takefocus,
xscrollcommand, yscrollcommand,
WIDGET-SPECIFIC OPTIONS
autoseparators, height, maxundo,
spacing1, spacing2, spacing3,
state, tabs, undo, width, wrap,
"""
Widget.__init__(self, master, 'text', cnf, kw)
def bbox(self, index):
"""Return a tuple of (x,y,width,height) which gives the bounding
box of the visible part of the character at the given index."""
return self._getints(
self.tk.call(self._w, 'bbox', index)) or None
def compare(self, index1, op, index2):
"""Return whether between index INDEX1 and index INDEX2 the
relation OP is satisfied. OP is one of <, <=, ==, >=, >, or !=."""
return self.tk.getboolean(self.tk.call(
self._w, 'compare', index1, op, index2))
def count(self, index1, index2, *args): # new in Tk 8.5
"""Counts the number of relevant things between the two indices.
If index1 is after index2, the result will be a negative number
(and this holds for each of the possible options).
The actual items which are counted depends on the options given by
args. The result is a list of integers, one for the result of each
counting option given. Valid counting options are "chars",
"displaychars", "displayindices", "displaylines", "indices",
"lines", "xpixels" and "ypixels". There is an additional possible
option "update", which if given then all subsequent options ensure
that any possible out of date information is recalculated."""
args = ['-%s' % arg for arg in args if not arg.startswith('-')]
args += [index1, index2]
res = self.tk.call(self._w, 'count', *args) or None
if res is not None and len(args) <= 3:
return (res, )
else:
return res
def debug(self, boolean=None):
"""Turn on the internal consistency checks of the B-Tree inside the text
widget according to BOOLEAN."""
if boolean is None:
return self.tk.getboolean(self.tk.call(self._w, 'debug'))
self.tk.call(self._w, 'debug', boolean)
def delete(self, index1, index2=None):
"""Delete the characters between INDEX1 and INDEX2 (not included)."""
self.tk.call(self._w, 'delete', index1, index2)
def dlineinfo(self, index):
"""Return tuple (x,y,width,height,baseline) giving the bounding box
and baseline position of the visible part of the line containing
the character at INDEX."""
return self._getints(self.tk.call(self._w, 'dlineinfo', index))
def dump(self, index1, index2=None, command=None, **kw):
"""Return the contents of the widget between index1 and index2.
The type of contents returned in filtered based on the keyword
parameters; if 'all', 'image', 'mark', 'tag', 'text', or 'window' are
given and true, then the corresponding items are returned. The result
is a list of triples of the form (key, value, index). If none of the
keywords are true then 'all' is used by default.
If the 'command' argument is given, it is called once for each element
of the list of triples, with the values of each triple serving as the
arguments to the function. In this case the list is not returned."""
args = []
func_name = None
result = None
if not command:
# Never call the dump command without the -command flag, since the
# output could involve Tcl quoting and would be a pain to parse
# right. Instead just set the command to build a list of triples
# as if we had done the parsing.
result = []
def append_triple(key, value, index, result=result):
result.append((key, value, index))
command = append_triple
try:
if not isinstance(command, str):
func_name = command = self._register(command)
args += ["-command", command]
for key in kw:
if kw[key]: args.append("-" + key)
args.append(index1)
if index2:
args.append(index2)
self.tk.call(self._w, "dump", *args)
return result
finally:
if func_name:
self.deletecommand(func_name)
## new in tk8.4
def edit(self, *args):
"""Internal method
This method controls the undo mechanism and
the modified flag. The exact behavior of the
command depends on the option argument that
follows the edit argument. The following forms
of the command are currently supported:
edit_modified, edit_redo, edit_reset, edit_separator
and edit_undo
"""
return self.tk.call(self._w, 'edit', *args)
def edit_modified(self, arg=None):
"""Get or Set the modified flag
If arg is not specified, returns the modified
flag of the widget. The insert, delete, edit undo and
edit redo commands or the user can set or clear the
modified flag. If boolean is specified, sets the
modified flag of the widget to arg.
"""
return self.edit("modified", arg)
def edit_redo(self):
"""Redo the last undone edit
When the undo option is true, reapplies the last
undone edits provided no other edits were done since
then. Generates an error when the redo stack is empty.
Does nothing when the undo option is false.
"""
return self.edit("redo")
def edit_reset(self):
"""Clears the undo and redo stacks
"""
return self.edit("reset")
def edit_separator(self):
"""Inserts a separator (boundary) on the undo stack.
Does nothing when the undo option is false
"""
return self.edit("separator")
def edit_undo(self):
"""Undoes the last edit action
If the undo option is true. An edit action is defined
as all the insert and delete commands that are recorded
on the undo stack in between two separators. Generates
an error when the undo stack is empty. Does nothing
when the undo option is false
"""
return self.edit("undo")
def get(self, index1, index2=None):
"""Return the text from INDEX1 to INDEX2 (not included)."""
return self.tk.call(self._w, 'get', index1, index2)
# (Image commands are new in 8.0)
def image_cget(self, index, option):
"""Return the value of OPTION of an embedded image at INDEX."""
if option[:1] != "-":
option = "-" + option
if option[-1:] == "_":
option = option[:-1]
return self.tk.call(self._w, "image", "cget", index, option)
def image_configure(self, index, cnf=None, **kw):
"""Configure an embedded image at INDEX."""
return self._configure(('image', 'configure', index), cnf, kw)
def image_create(self, index, cnf={}, **kw):
"""Create an embedded image at INDEX."""
return self.tk.call(
self._w, "image", "create", index,
*self._options(cnf, kw))
def image_names(self):
"""Return all names of embedded images in this widget."""
return self.tk.call(self._w, "image", "names")
def index(self, index):
"""Return the index in the form line.char for INDEX."""
return str(self.tk.call(self._w, 'index', index))
def insert(self, index, chars, *args):
"""Insert CHARS before the characters at INDEX. An additional
tag can be given in ARGS. Additional CHARS and tags can follow in ARGS."""
self.tk.call((self._w, 'insert', index, chars) + args)
def mark_gravity(self, markName, direction=None):
"""Change the gravity of a mark MARKNAME to DIRECTION (LEFT or RIGHT).
Return the current value if None is given for DIRECTION."""
return self.tk.call(
(self._w, 'mark', 'gravity', markName, direction))
def mark_names(self):
"""Return all mark names."""
return self.tk.splitlist(self.tk.call(
self._w, 'mark', 'names'))
def mark_set(self, markName, index):
"""Set mark MARKNAME before the character at INDEX."""
self.tk.call(self._w, 'mark', 'set', markName, index)
def mark_unset(self, *markNames):
"""Delete all marks in MARKNAMES."""
self.tk.call((self._w, 'mark', 'unset') + markNames)
def mark_next(self, index):
"""Return the name of the next mark after INDEX."""
return self.tk.call(self._w, 'mark', 'next', index) or None
def mark_previous(self, index):
"""Return the name of the previous mark before INDEX."""
return self.tk.call(self._w, 'mark', 'previous', index) or None
def peer_create(self, newPathName, cnf={}, **kw): # new in Tk 8.5
"""Creates a peer text widget with the given newPathName, and any
optional standard configuration options. By default the peer will
have the same start and end line as the parent widget, but
these can be overridden with the standard configuration options."""
self.tk.call(self._w, 'peer', 'create', newPathName,
*self._options(cnf, kw))
def peer_names(self): # new in Tk 8.5
"""Returns a list of peers of this widget (this does not include
the widget itself)."""
return self.tk.splitlist(self.tk.call(self._w, 'peer', 'names'))
def replace(self, index1, index2, chars, *args): # new in Tk 8.5
"""Replaces the range of characters between index1 and index2 with
the given characters and tags specified by args.
See the method insert for some more information about args, and the
method delete for information about the indices."""
self.tk.call(self._w, 'replace', index1, index2, chars, *args)
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y):
"""Adjust the view of the text to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y)
def search(self, pattern, index, stopindex=None,
forwards=None, backwards=None, exact=None,
regexp=None, nocase=None, count=None, elide=None):
"""Search PATTERN beginning from INDEX until STOPINDEX.
Return the index of the first character of a match or an
empty string."""
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def see(self, index):
"""Scroll such that the character at INDEX is visible."""
self.tk.call(self._w, 'see', index)
def tag_add(self, tagName, index1, *args):
"""Add tag TAGNAME to all characters between INDEX1 and index2 in ARGS.
Additional pairs of indices may follow in ARGS."""
self.tk.call(
(self._w, 'tag', 'add', tagName, index1) + args)
def tag_unbind(self, tagName, sequence, funcid=None):
"""Unbind for all characters with TAGNAME for event SEQUENCE the
function identified with FUNCID."""
self.tk.call(self._w, 'tag', 'bind', tagName, sequence, '')
if funcid:
self.deletecommand(funcid)
def tag_bind(self, tagName, sequence, func, add=None):
"""Bind to all characters with TAGNAME at event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or whether it will
replace the previous function. See bind for the return value."""
return self._bind((self._w, 'tag', 'bind', tagName),
sequence, func, add)
def tag_cget(self, tagName, option):
"""Return the value of OPTION for tag TAGNAME."""
if option[:1] != '-':
option = '-' + option
if option[-1:] == '_':
option = option[:-1]
return self.tk.call(self._w, 'tag', 'cget', tagName, option)
def tag_configure(self, tagName, cnf=None, **kw):
"""Configure a tag TAGNAME."""
return self._configure(('tag', 'configure', tagName), cnf, kw)
tag_config = tag_configure
def tag_delete(self, *tagNames):
"""Delete all tags in TAGNAMES."""
self.tk.call((self._w, 'tag', 'delete') + tagNames)
def tag_lower(self, tagName, belowThis=None):
"""Change the priority of tag TAGNAME such that it is lower
than the priority of BELOWTHIS."""
self.tk.call(self._w, 'tag', 'lower', tagName, belowThis)
def tag_names(self, index=None):
"""Return a list of all tag names."""
return self.tk.splitlist(
self.tk.call(self._w, 'tag', 'names', index))
def tag_nextrange(self, tagName, index1, index2=None):
"""Return a list of start and end index for the first sequence of
characters between INDEX1 and INDEX2 which all have tag TAGNAME.
The text is searched forward from INDEX1."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'nextrange', tagName, index1, index2))
def tag_prevrange(self, tagName, index1, index2=None):
"""Return a list of start and end index for the first sequence of
characters between INDEX1 and INDEX2 which all have tag TAGNAME.
The text is searched backwards from INDEX1."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'prevrange', tagName, index1, index2))
def tag_raise(self, tagName, aboveThis=None):
"""Change the priority of tag TAGNAME such that it is higher
than the priority of ABOVETHIS."""
self.tk.call(
self._w, 'tag', 'raise', tagName, aboveThis)
def tag_ranges(self, tagName):
"""Return a list of ranges of text which have tag TAGNAME."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'ranges', tagName))
def tag_remove(self, tagName, index1, index2=None):
"""Remove tag TAGNAME from all characters between INDEX1 and INDEX2."""
self.tk.call(
self._w, 'tag', 'remove', tagName, index1, index2)
def window_cget(self, index, option):
"""Return the value of OPTION of an embedded window at INDEX."""
if option[:1] != '-':
option = '-' + option
if option[-1:] == '_':
option = option[:-1]
return self.tk.call(self._w, 'window', 'cget', index, option)
def window_configure(self, index, cnf=None, **kw):
"""Configure an embedded window at INDEX."""
return self._configure(('window', 'configure', index), cnf, kw)
window_config = window_configure
def window_create(self, index, cnf={}, **kw):
"""Create a window at INDEX."""
self.tk.call(
(self._w, 'window', 'create', index)
+ self._options(cnf, kw))
def window_names(self):
"""Return all names of embedded windows in this widget."""
return self.tk.splitlist(
self.tk.call(self._w, 'window', 'names'))
def yview_pickplace(self, *what):
"""Obsolete function, use see."""
self.tk.call((self._w, 'yview', '-pickplace') + what)
class _setit:
"""Internal class. It wraps the command in the widget OptionMenu."""
def __init__(self, var, value, callback=None):
self.__value = value
self.__var = var
self.__callback = callback
def __call__(self, *args):
self.__var.set(self.__value)
if self.__callback:
self.__callback(self.__value, *args)
class OptionMenu(Menubutton):
"""OptionMenu which allows the user to select a value from a menu."""
def __init__(self, master, variable, value, *values, **kwargs):
"""Construct an optionmenu widget with the parent MASTER, with
the resource textvariable set to VARIABLE, the initially selected
value VALUE, the other menu values VALUES and an additional
keyword argument command."""
kw = {"borderwidth": 2, "textvariable": variable,
"indicatoron": 1, "relief": RAISED, "anchor": "c",
"highlightthickness": 2}
Widget.__init__(self, master, "menubutton", kw)
self.widgetName = 'tk_optionMenu'
menu = self.__menu = Menu(self, name="menu", tearoff=0)
self.menuname = menu._w
# 'command' is the only supported keyword
callback = kwargs.get('command')
if 'command' in kwargs:
del kwargs['command']
if kwargs:
raise TclError('unknown option -'+kwargs.keys()[0])
menu.add_command(label=value,
command=_setit(variable, value, callback))
for v in values:
menu.add_command(label=v,
command=_setit(variable, v, callback))
self["menu"] = menu
def __getitem__(self, name):
if name == 'menu':
return self.__menu
return Widget.__getitem__(self, name)
def destroy(self):
"""Destroy this widget and the associated menu."""
Menubutton.destroy(self)
self.__menu = None
class Image:
"""Base class for images."""
_last_id = 0
def __init__(self, imgtype, name=None, cnf={}, master=None, **kw):
self.name = None
if not master:
master = _default_root
if not master:
raise RuntimeError('Too early to create image')
self.tk = getattr(master, 'tk', master)
if not name:
Image._last_id += 1
name = "pyimage%r" % (Image._last_id,) # tk itself would use image<x>
if kw and cnf: cnf = _cnfmerge((cnf, kw))
elif kw: cnf = kw
options = ()
for k, v in cnf.items():
if callable(v):
v = self._register(v)
options = options + ('-'+k, v)
self.tk.call(('image', 'create', imgtype, name,) + options)
self.name = name
def __str__(self): return self.name
def __del__(self):
if self.name:
try:
self.tk.call('image', 'delete', self.name)
except TclError:
# May happen if the root was destroyed
pass
def __setitem__(self, key, value):
self.tk.call(self.name, 'configure', '-'+key, value)
def __getitem__(self, key):
return self.tk.call(self.name, 'configure', '-'+key)
def configure(self, **kw):
"""Configure the image."""
res = ()
for k, v in _cnfmerge(kw).items():
if v is not None:
if k[-1] == '_': k = k[:-1]
if callable(v):
v = self._register(v)
res = res + ('-'+k, v)
self.tk.call((self.name, 'config') + res)
config = configure
def height(self):
"""Return the height of the image."""
return self.tk.getint(
self.tk.call('image', 'height', self.name))
def type(self):
"""Return the type of the image, e.g. "photo" or "bitmap"."""
return self.tk.call('image', 'type', self.name)
def width(self):
"""Return the width of the image."""
return self.tk.getint(
self.tk.call('image', 'width', self.name))
class PhotoImage(Image):
"""Widget which can display images in PGM, PPM, GIF, PNG format."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create an image with NAME.
Valid resource names: data, format, file, gamma, height, palette,
width."""
Image.__init__(self, 'photo', name, cnf, master, **kw)
def blank(self):
"""Display a transparent image."""
self.tk.call(self.name, 'blank')
def cget(self, option):
"""Return the value of OPTION."""
return self.tk.call(self.name, 'cget', '-' + option)
# XXX config
def __getitem__(self, key):
return self.tk.call(self.name, 'cget', '-' + key)
# XXX copy -from, -to, ...?
def copy(self):
"""Return a new PhotoImage with the same image as this widget."""
destImage = PhotoImage(master=self.tk)
self.tk.call(destImage, 'copy', self.name)
return destImage
def zoom(self, x, y=''):
"""Return a new PhotoImage with the same image as this widget
but zoom it with a factor of x in the X direction and y in the Y
direction. If y is not given, the default value is the same as x.
"""
destImage = PhotoImage(master=self.tk)
if y=='': y=x
self.tk.call(destImage, 'copy', self.name, '-zoom',x,y)
return destImage
def subsample(self, x, y=''):
"""Return a new PhotoImage based on the same image as this widget
but use only every Xth or Yth pixel. If y is not given, the
default value is the same as x.
"""
destImage = PhotoImage(master=self.tk)
if y=='': y=x
self.tk.call(destImage, 'copy', self.name, '-subsample',x,y)
return destImage
def get(self, x, y):
"""Return the color (red, green, blue) of the pixel at X,Y."""
return self.tk.call(self.name, 'get', x, y)
def put(self, data, to=None):
"""Put row formatted colors to image starting from
position TO, e.g. image.put("{red green} {blue yellow}", to=(4,6))"""
args = (self.name, 'put', data)
if to:
if to[0] == '-to':
to = to[1:]
args = args + ('-to',) + tuple(to)
self.tk.call(args)
# XXX read
def write(self, filename, format=None, from_coords=None):
"""Write image to file FILENAME in FORMAT starting from
position FROM_COORDS."""
args = (self.name, 'write', filename)
if format:
args = args + ('-format', format)
if from_coords:
args = args + ('-from',) + tuple(from_coords)
self.tk.call(args)
class BitmapImage(Image):
"""Widget which can display images in XBM format."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create a bitmap with NAME.
Valid resource names: background, data, file, foreground, maskdata, maskfile."""
Image.__init__(self, 'bitmap', name, cnf, master, **kw)
def image_names():
return _default_root.tk.splitlist(_default_root.tk.call('image', 'names'))
def image_types():
return _default_root.tk.splitlist(_default_root.tk.call('image', 'types'))
class Spinbox(Widget, XView):
"""spinbox widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a spinbox widget with the parent MASTER.
STANDARD OPTIONS
activebackground, background, borderwidth,
cursor, exportselection, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, insertbackground,
insertborderwidth, insertofftime,
insertontime, insertwidth, justify, relief,
repeatdelay, repeatinterval,
selectbackground, selectborderwidth
selectforeground, takefocus, textvariable
xscrollcommand.
WIDGET-SPECIFIC OPTIONS
buttonbackground, buttoncursor,
buttondownrelief, buttonuprelief,
command, disabledbackground,
disabledforeground, format, from,
invalidcommand, increment,
readonlybackground, state, to,
validate, validatecommand values,
width, wrap,
"""
Widget.__init__(self, master, 'spinbox', cnf, kw)
def bbox(self, index):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a
rectangle which encloses the character given by index.
The first two elements of the list give the x and y
coordinates of the upper-left corner of the screen
area covered by the character (in pixels relative
to the widget) and the last two elements give the
width and height of the character, in pixels. The
bounding box may refer to a region outside the
visible area of the window.
"""
return self._getints(self.tk.call(self._w, 'bbox', index)) or None
def delete(self, first, last=None):
"""Delete one or more elements of the spinbox.
First is the index of the first character to delete,
and last is the index of the character just after
the last one to delete. If last isn't specified it
defaults to first+1, i.e. a single character is
deleted. This command returns an empty string.
"""
return self.tk.call(self._w, 'delete', first, last)
def get(self):
"""Returns the spinbox's string"""
return self.tk.call(self._w, 'get')
def icursor(self, index):
"""Alter the position of the insertion cursor.
The insertion cursor will be displayed just before
the character given by index. Returns an empty string
"""
return self.tk.call(self._w, 'icursor', index)
def identify(self, x, y):
"""Returns the name of the widget at position x, y
Return value is one of: none, buttondown, buttonup, entry
"""
return self.tk.call(self._w, 'identify', x, y)
def index(self, index):
"""Returns the numerical index corresponding to index
"""
return self.tk.call(self._w, 'index', index)
def insert(self, index, s):
"""Insert string s at index
Returns an empty string.
"""
return self.tk.call(self._w, 'insert', index, s)
def invoke(self, element):
"""Causes the specified element to be invoked
The element could be buttondown or buttonup
triggering the action associated with it.
"""
return self.tk.call(self._w, 'invoke', element)
def scan(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'scan') + args)) or ()
def scan_mark(self, x):
"""Records x and the current view in the spinbox window;
used in conjunction with later scan dragto commands.
Typically this command is associated with a mouse button
press in the widget. It returns an empty string.
"""
return self.scan("mark", x)
def scan_dragto(self, x):
"""Compute the difference between the given x argument
and the x argument to the last scan mark command
It then adjusts the view left or right by 10 times the
difference in x-coordinates. This command is typically
associated with mouse motion events in the widget, to
produce the effect of dragging the spinbox at high speed
through the window. The return value is an empty string.
"""
return self.scan("dragto", x)
def selection(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'selection') + args)) or ()
def selection_adjust(self, index):
"""Locate the end of the selection nearest to the character
given by index,
Then adjust that end of the selection to be at index
(i.e including but not going beyond index). The other
end of the selection is made the anchor point for future
select to commands. If the selection isn't currently in
the spinbox, then a new selection is created to include
the characters between index and the most recent selection
anchor point, inclusive.
"""
return self.selection("adjust", index)
def selection_clear(self):
"""Clear the selection
If the selection isn't in this widget then the
command has no effect.
"""
return self.selection("clear")
def selection_element(self, element=None):
"""Sets or gets the currently selected element.
If a spinbutton element is specified, it will be
displayed depressed.
"""
return self.tk.call(self._w, 'selection', 'element', element)
###########################################################################
class LabelFrame(Widget):
"""labelframe widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a labelframe widget with the parent MASTER.
STANDARD OPTIONS
borderwidth, cursor, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, padx, pady, relief,
takefocus, text
WIDGET-SPECIFIC OPTIONS
background, class, colormap, container,
height, labelanchor, labelwidget,
visual, width
"""
Widget.__init__(self, master, 'labelframe', cnf, kw)
########################################################################
class PanedWindow(Widget):
"""panedwindow widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a panedwindow widget with the parent MASTER.
STANDARD OPTIONS
background, borderwidth, cursor, height,
orient, relief, width
WIDGET-SPECIFIC OPTIONS
handlepad, handlesize, opaqueresize,
sashcursor, sashpad, sashrelief,
sashwidth, showhandle,
"""
Widget.__init__(self, master, 'panedwindow', cnf, kw)
def add(self, child, **kw):
"""Add a child widget to the panedwindow in a new pane.
The child argument is the name of the child widget
followed by pairs of arguments that specify how to
manage the windows. The possible options and values
are the ones accepted by the paneconfigure method.
"""
self.tk.call((self._w, 'add', child) + self._options(kw))
def remove(self, child):
"""Remove the pane containing child from the panedwindow
All geometry management options for child will be forgotten.
"""
self.tk.call(self._w, 'forget', child)
forget=remove
def identify(self, x, y):
"""Identify the panedwindow component at point x, y
If the point is over a sash or a sash handle, the result
is a two element list containing the index of the sash or
handle, and a word indicating whether it is over a sash
or a handle, such as {0 sash} or {2 handle}. If the point
is over any other part of the panedwindow, the result is
an empty list.
"""
return self.tk.call(self._w, 'identify', x, y)
def proxy(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'proxy') + args)) or ()
def proxy_coord(self):
"""Return the x and y pair of the most recent proxy location
"""
return self.proxy("coord")
def proxy_forget(self):
"""Remove the proxy from the display.
"""
return self.proxy("forget")
def proxy_place(self, x, y):
"""Place the proxy at the given x and y coordinates.
"""
return self.proxy("place", x, y)
def sash(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'sash') + args)) or ()
def sash_coord(self, index):
"""Return the current x and y pair for the sash given by index.
Index must be an integer between 0 and 1 less than the
number of panes in the panedwindow. The coordinates given are
those of the top left corner of the region containing the sash.
pathName sash dragto index x y This command computes the
difference between the given coordinates and the coordinates
given to the last sash coord command for the given sash. It then
moves that sash the computed difference. The return value is the
empty string.
"""
return self.sash("coord", index)
def sash_mark(self, index):
"""Records x and y for the sash given by index;
Used in conjunction with later dragto commands to move the sash.
"""
return self.sash("mark", index)
def sash_place(self, index, x, y):
"""Place the sash given by index at the given coordinates
"""
return self.sash("place", index, x, y)
def panecget(self, child, option):
"""Query a management option for window.
Option may be any value allowed by the paneconfigure subcommand
"""
return self.tk.call(
(self._w, 'panecget') + (child, '-'+option))
def paneconfigure(self, tagOrId, cnf=None, **kw):
"""Query or modify the management options for window.
If no option is specified, returns a list describing all
of the available options for pathName. If option is
specified with no value, then the command returns a list
describing the one named option (this list will be identical
to the corresponding sublist of the value returned if no
option is specified). If one or more option-value pairs are
specified, then the command modifies the given widget
option(s) to have the given value(s); in this case the
command returns an empty string. The following options
are supported:
after window
Insert the window after the window specified. window
should be the name of a window already managed by pathName.
before window
Insert the window before the window specified. window
should be the name of a window already managed by pathName.
height size
Specify a height for the window. The height will be the
outer dimension of the window including its border, if
any. If size is an empty string, or if -height is not
specified, then the height requested internally by the
window will be used initially; the height may later be
adjusted by the movement of sashes in the panedwindow.
Size may be any value accepted by Tk_GetPixels.
minsize n
Specifies that the size of the window cannot be made
less than n. This constraint only affects the size of
the widget in the paned dimension -- the x dimension
for horizontal panedwindows, the y dimension for
vertical panedwindows. May be any value accepted by
Tk_GetPixels.
padx n
Specifies a non-negative value indicating how much
extra space to leave on each side of the window in
the X-direction. The value may have any of the forms
accepted by Tk_GetPixels.
pady n
Specifies a non-negative value indicating how much
extra space to leave on each side of the window in
the Y-direction. The value may have any of the forms
accepted by Tk_GetPixels.
sticky style
If a window's pane is larger than the requested
dimensions of the window, this option may be used
to position (or stretch) the window within its pane.
Style is a string that contains zero or more of the
characters n, s, e or w. The string can optionally
contains spaces or commas, but they are ignored. Each
letter refers to a side (north, south, east, or west)
that the window will "stick" to. If both n and s
(or e and w) are specified, the window will be
stretched to fill the entire height (or width) of
its cavity.
width size
Specify a width for the window. The width will be
the outer dimension of the window including its
border, if any. If size is an empty string, or
if -width is not specified, then the width requested
internally by the window will be used initially; the
width may later be adjusted by the movement of sashes
in the panedwindow. Size may be any value accepted by
Tk_GetPixels.
"""
if cnf is None and not kw:
return self._getconfigure(self._w, 'paneconfigure', tagOrId)
if isinstance(cnf, str) and not kw:
return self._getconfigure1(
self._w, 'paneconfigure', tagOrId, '-'+cnf)
self.tk.call((self._w, 'paneconfigure', tagOrId) +
self._options(cnf, kw))
paneconfig = paneconfigure
def panes(self):
"""Returns an ordered list of the child panes."""
return self.tk.splitlist(self.tk.call(self._w, 'panes'))
# Test:
def _test():
root = Tk()
text = "This is Tcl/Tk version %s" % TclVersion
text += "\nThis should be a cedilla: \xe7"
label = Label(root, text=text)
label.pack()
test = Button(root, text="Click me!",
command=lambda root=root: root.test.configure(
text="[%s]" % root.test['text']))
test.pack()
root.test = test
quit = Button(root, text="QUIT", command=root.destroy)
quit.pack()
# The following three commands are needed so the window pops
# up on top on Windows...
root.iconify()
root.update()
root.deiconify()
root.mainloop()
if __name__ == '__main__':
_test()
| prefetchnta/questlab | bin/x64bin/python/37/Lib/tkinter/__init__.py | Python | lgpl-2.1 | 170,982 |
# Copyright (c) 2002 Zooko, blanu
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
__revision__ = "$Id: tristero.py,v 1.2 2002/12/02 19:58:54 myers_carpenter Exp $"
nodeSchema='http://tristero.sourceforge.net/mnet/MetaTracker#'
commSchema='http://tristero.sourceforge.net/mnet/CommStrategies#'
lowerSchema='http://tristero.sourceforge.net/mnet/LowerStrategy#'
pubkeySchema='http://tristero.sourceforge.net/mnet/Pubkey#'
keyHeaderSchema='http://tristero.sourceforge.net/mnet/PubkeyHeader#'
keyValueSchema='http://tristero.sourceforge.net/mnet/PubkeyValue#'
LITERAL=0
RESOURCE=1
NODE=2
| zooko/egtp_new | egtp/tristero.py | Python | lgpl-2.1 | 687 |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from obspy.core.utcdatetime import UTCDateTime
from matplotlib import gridspec
from pdart.view import stream_from_directory
from obspy import read_inventory
import os
from obspy.core import read
# start_time = UTCDateTime('1971-02-07T00:45:00')
from pdart.diffusion.view_single_seismogram import remove_response
# update 25-08-21
def single_seismogram(title):
# 1969-11-20T22:17:17.7
# onset is 42.4 s Lognonne 2003
onset = UTCDateTime('1969-11-20TT22:17:17.700000Z')
# +42.4
# onset = UTCDateTime('1969-11-20T22:17:700000Z')
start_time = onset - timedelta(minutes=2)
stations = ['S12']
channels = ['MHZ']
end_time = start_time + timedelta(minutes=15)
stream = stream_from_directory(
top_level_dir='/Users/cnunn/lunar_data/PDART_CONTINUOUS_MAIN_TAPES',
start_time=start_time,
stations=stations,
channels=channels,
end_time=end_time)
########
fig = plt.figure(figsize=(15, 4))
gs = gridspec.GridSpec(5, 1, hspace=0.001)
ax0 = plt.subplot(gs[0])
trace_MHZ = stream.select(channel='MHZ')[0]
ax0.plot(times_to_seconds(trace_MHZ.times()), trace_MHZ.data, color='k')
ax0.set_xlim(-2*60, 15*60)
ax0.set_xticks(np.arange(0,15*60,6*50))
ax0.set_xticks(np.arange(-180,15*60,60), minor=True)
ax0.set_title(title, fontsize=20)
ax0.set_yticks(np.arange(480, 560, 20))
# ax0.set_yticks(np.arange(460,580,20), minor=True)
ax0.set_ylim(510-50, 510+50)
ax0.set_ylabel('DU', fontsize=14)
ax0.annotate(xy=(0.01,0.9), text=onset.strftime("%Y-%m-%d %H:%M:%S"),
fontsize=13, horizontalalignment="left", verticalalignment="top",
xycoords="axes fraction")
xticklabels = (ax0.get_xticklabels())
plt.setp(xticklabels, visible=False)
ax0.tick_params(length=6, width=1, which='minor')
ax0.yaxis.set_label_coords(-0.04, 0.5)
ax0.annotate(xy=(1.01,0.5), text='MHZ', fontsize=16,
xycoords="axes fraction", horizontalalignment='left',
verticalalignment='center')
plt.subplots_adjust(left=0.06, right=0.95, top=0.9, bottom=0.12)
plt.savefig('Apollo12_LM_impact_XXXX.png')
plt.show()
# def single_seismogram_remove_response_short(title):
#
# # peaked mode
# inv_name = "/Users/cnunn/lunar_data/IRIS_dataless_seed/XA.1969-1977.xml"
# # onset is 42.4 s Lognonne 2003
# onset = UTCDateTime('1969-11-20TT22:17:17.700000Z')
# start_time = onset - timedelta(minutes=2)
# # XXXX
# station = 'S12'
# channel = 'MHZ'
# pre_filt = [0.1, 0.3,0.7,1]
# # end_time = UTCDateTime('1971:02:07T02:35.25')
#
# end_time = onset + timedelta(minutes=60)
#
# # 1969-11-20T22:17:17.7
#
# stream = remove_response_from_seismogram(inv_name=inv_name,
# start_time=start_time,
# station=station,
# channel=channel,
# pre_filt=pre_filt,
# water_level=None,
# end_time=end_time,
# plot=False)
#
# ########
#
# fig = plt.figure(figsize=(15, 4))
# gs = gridspec.GridSpec(1, 1, hspace=0.001)
#
# ax0 = plt.subplot(gs[0])
#
# trace_MHZ = stream.select(channel='MHZ')[0]
# ax0.plot(times_to_seconds(trace_MHZ.times()), trace_MHZ.data, color='k')
# ax0.set_xlim(-2*60, 5*60)
# # print('short')
# ax0.set_xticks(np.arange(0,6*60,6*50),minor=False)
# ax0.set_xticks(np.arange(-180,6*60,60), minor=True)
# ax0.set_title(title, fontsize=20)
#
# # ax0.set_yticks(np.arange(480, 560, 20))
# # ax0.set_yticks(np.arange(460,580,20), minor=True)
# ax0.set_ylim(-1.1e-8, 1.01e-8)
# ax0.set_ylabel('Displacement [m]', fontsize=14)
# ax0.annotate(xy=(0.01,0.9), text=onset.strftime("%Y-%m-%d %H:%M:%S"),
# fontsize=13, horizontalalignment="left", verticalalignment="top",
# xycoords="axes fraction")
#
# # xticklabels = (ax0.get_xticklabels())
# # plt.setp(xticklabels, visible=False)
#
# ax0.tick_params(length=3, width=1, which='minor')
# ax0.tick_params(length=6, width=1, which='major')
#
# ax0.yaxis.set_label_coords(-0.04, 0.5)
#
# ax0.annotate(xy=(1.01,0.5), text='MHZ', fontsize=16,
# xycoords="axes fraction", horizontalalignment='left',
# verticalalignment='center')
#
# ax0.set_xlabel('Time after impact [s]', fontsize=14)
# ax0.yaxis.set_label_coords(-0.04, 0.5)
#
# plt.subplots_adjust(left=0.06, right=0.95, top=0.9, bottom=0.12)
# plt.savefig('Apollo12_LM_impact_XXXX.png')
# plt.show()
def single_seismogram_remove_response(title,onset,pick=None):
# peaked mode
inv_name = "/Users/cnunn/lunar_data/IRIS_dataless_seed/XA.1969-1977.xml"
onset = UTCDateTime('1969-11-20TT22:17:17.700000Z')
start_time = onset - timedelta(minutes=2)
# XXXX
station = 'S12'
channel = 'MHZ'
pre_filt = [0.1, 0.3,0.7,1]
# end_time = UTCDateTime('1971:02:07T02:35.25')
end_time = onset + timedelta(minutes=60)
# 1969-11-20T22:17:17.7
# reset the timing
# make a correction
# find actual time of onset
# print(onset.time)
stream = remove_response_from_seismogram(inv_name=inv_name,
start_time=start_time,
station=station,
channel=channel,
pre_filt=pre_filt,
water_level=None,
end_time=end_time,
plot=False)
########
fig = plt.figure(figsize=(15, 4))
gs = gridspec.GridSpec(1, 1, hspace=0.001)
ax0 = plt.subplot(gs[0])
trace_MHZ = stream.select(channel='MHZ')[0]
ax0.plot(times_to_seconds(trace_MHZ.times()), trace_MHZ.data, color='k')
ax0.set_xlim(-2*60, 60*60)
ax0.set_xticks(np.arange(0,61*60,6*50),minor=False)
ax0.set_xticks(np.arange(-180,61*60,60), minor=True)
# pick_markP = pick - onset
# plt.gca().axvline(x=pick_markP,
# color='r', linewidth=2)
ax0.set_title(title, fontsize=20)
# ax0.set_yticks(np.arange(480, 560, 20))
# ax0.set_yticks(np.arange(460,580,20), minor=True)
ax0.set_ylim(-1.1e-8, 1.01e-8)
ax0.set_ylabel('Displacement [m]', fontsize=14)
ax0.annotate(xy=(0.01,0.9), text=onset.strftime("%Y-%m-%d %H:%M:%S"),
fontsize=13, horizontalalignment="left", verticalalignment="top",
xycoords="axes fraction")
# xticklabels = (ax0.get_xticklabels())
# plt.setp(xticklabels, visible=False)
ax0.tick_params(length=3, width=1, which='minor')
ax0.tick_params(length=6, width=1, which='major')
ax0.yaxis.set_label_coords(-0.04, 0.5)
ax0.annotate(xy=(1.01,0.5), text='MHZ', fontsize=16,
xycoords="axes fraction", horizontalalignment='left',
verticalalignment='center')
ax0.set_xlabel('Time after impact [s]', fontsize=14)
ax0.yaxis.set_label_coords(-0.04, 0.5)
ax0.plot(times_to_seconds(trace_MHZ.times()),
times_to_seconds(trace_MHZ.data-trace_MHZ.stats.starttime.timestamp), color='k')
plt.subplots_adjust(left=0.06, right=0.95, top=0.9, bottom=0.13)
plt.savefig('../extra_plots_output/Apollo12_LM_impact_XXXX.png')
plt.show()
# def times_to_minutes(times_in_seconds):
# return ((times_in_seconds / 60) - 2)
# copied from /Users/cnunn/python_packages/pdart/extra_plots/view_response.py
def remove_response_from_seismogram(
inv_name,
start_time,
station,
channel,
pre_filt,
end_time=None,
outfile=None,
output='DISP',
water_level=None,
plot=True):
# read the response file
inv = read_inventory(inv_name)
if end_time is None:
time_interval = timedelta(hours=3)
end_time = start_time + time_interval
# xa.s12..att.1969.324.0.mseed
filename = '%s.%s.*.%s.%s.%03d.0.mseed' % ('xa',station.lower(), channel.lower(),
str(start_time.year), start_time.julday)
filename = os.path.join('/Users/cnunn/lunar_data/PDART_CONTINUOUS_MAIN_TAPES',station.lower(),str(start_time.year),str(start_time.julday),filename)
stream = read(filename)
stream = stream.select(channel=channel)
stream.trim(starttime=start_time, endtime=end_time)
# remove location (ground station)
for tr in stream:
tr.stats.location = ''
# detrend
stream.detrend('linear')
# taper the edges
# if there are gaps in the seismogram - EVERY short trace will be tapered
# this is required to remove the response later
# stream.taper(max_percentage=0.05, type='cosine')
# experiment with tapering? not tapering preserves the overall shape better
# but it may required
# merge the streams
stream.merge()
if stream.count() > 1:
print('Too many streams - exiting')
# find the gaps in the trace
if isinstance(stream[0].data,np.ma.MaskedArray):
mask = np.ma.getmask(stream[0].data)
else:
mask = None
# split the stream, then refill it with zeros on the gaps
stream = stream.split()
stream = stream.merge(fill_value=0)
# for i, n in enumerate(stream[0].times()):
# # print(n)
# stream[0].data[i]=np.sin(2*np.pi*(1/25)*n)
stream.attach_response(inv)
# print('here')
# zero_mean=False - because the trace can be asymmetric - remove the mean ourselves
# do not taper here - it doesn't work well with the masked arrays - often required
# when there are gaps - if necessary taper first
# water level - this probably doesn't have much impact - because we are pre filtering
# stream.remove_response(pre_filt=pre_filt,output="DISP",water_level=30,zero_mean=False,taper=False,plot=True,fig=outfile)
for tr in stream:
remove_response(tr, pre_filt=pre_filt,output=output,water_level=water_level,zero_mean=False,taper=False,plot=plot,fig=outfile)
for tr in stream:
tr.stats.location = 'changed'
if mask is not None:
stream[0].data = np.ma.array(stream[0].data, mask = mask)
print(stream)
return stream
def times_to_seconds(times_in_seconds):
return (times_in_seconds - 120)
if __name__ == "__main__":
# single_seismogram(title='Impact of Apollo 12 Lunar Ascent Module')
onset = UTCDateTime('1969-11-20TT22:17:17.700000Z')
# <pick publicID="smi:nunn19/pick/00001/lognonne03/S12/P">
pick = UTCDateTime('1969-11-20T22:17:42.400000Z')
arrival_time = pick - onset
print(arrival_time)
single_seismogram_remove_response(title='Impact of Apollo 12 Lunar Ascent Module',onset=onset,pick=pick)
# single_seismogram_remove_response_short(title='Impact of Apollo 12 Lunar Ascent Module')
| cerinunn/pdart | extra_plots/plot_seismograms.py | Python | lgpl-3.0 | 10,680 |
# -*- coding: utf-8 -*-
__author__ = 'sdukaka'
#只是为了测试一下装饰器的作用 decorator
import functools
def log(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log
def now():
print('2015-3-25')
now()
def logger(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
@logger('DEBUG')
def today():
print('2015-3-25')
today()
print(today.__name__)
| sdukaka/sdukakaBlog | test_decorator.py | Python | lgpl-3.0 | 664 |
# -*- coding: utf-8 -*-
# util.py
# Copyright (C) 2012 Red Hat, Inc.
#
# Authors:
# Akira TAGOH <[email protected]>
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gettext
import gi
import os.path
import string
from collections import OrderedDict
from fontstweak import FontsTweak
from gi.repository import Gtk
def N_(s):
return s
class FontsTweakUtil:
@classmethod
def find_file(self, uifile):
path = os.path.dirname(os.path.realpath(__file__))
f = os.path.join(path, 'data', uifile)
if not os.path.isfile(f):
f = os.path.join(path, '..', 'data', uifile)
if not os.path.isfile(f):
f = os.path.join(FontsTweak.UIPATH, uifile)
return f
@classmethod
def create_builder(self, uifile):
builder = Gtk.Builder()
builder.set_translation_domain(FontsTweak.GETTEXT_PACKAGE)
builder.add_from_file(self.find_file(uifile))
return builder
@classmethod
def translate_text(self, text, lang):
try:
self.translations
except AttributeError:
self.translations = {}
if self.translations.has_key(lang) == False:
self.translations[lang] = gettext.translation(
domain=FontsTweak.GETTEXT_PACKAGE,
localedir=FontsTweak.LOCALEDIR,
languages=[lang.replace('-', '_')],
fallback=True,
codeset="utf8")
return unicode(self.translations[lang].gettext(text), "utf8")
@classmethod
def get_language_list(self, default):
dict = OrderedDict()
if default == True:
dict[''] = N_('Default')
try:
fd = open(self.find_file('locale-list'), 'r')
except:
raise RuntimeError, "Unable to open locale-list"
while True:
line = fd.readline()
if not line:
break
tokens = string.split(line)
lang = str(tokens[0]).split('.')[0].replace('_', '-')
dict[lang] = string.join(tokens[3:], ' ')
return dict
| jamesni/fonts-tweak-tool | fontstweak/util.py | Python | lgpl-3.0 | 2,700 |
from pymongo import MongoClient
import config
class Database:
def __init__(self, db_name=None):
self.mongodb_client = create_mongodb_client()
self.db = self.create_db(db_name)
self.authenticate_user()
def create_db(self, db_name):
if db_name is None:
return self.mongodb_client[config.get_database_name()]
return self.mongodb_client[db_name]
def authenticate_user(self):
if config.is_database_authentication_enabled():
self.db.authenticate(config.get_database_user(), config.get_database_password())
def insert_document_in_collection(self, doc, collection_name):
collection = self.db[collection_name]
collection.insert_one(doc)
def exist_doc_in_collection(self, search_condition, collection_name):
collection = self.db[collection_name]
query_result = collection.find(search_condition).limit(1)
return doc_found(query_result)
def search_text_with_regex_in_collection(self, regex, field, collection_name):
collection = self.db[collection_name]
return collection.find({field: get_regex_dict(regex)})
def search_text_with_regex_in_collection_mul(self, regex_a, regex_b, field_a, field_b, collection_name):
collection = self.db[collection_name]
return collection.find({'$and': [{field_a: get_regex_dict(regex_a)}, {field_b: get_regex_dict(regex_b)}]})
def search_document_in_collection(self, search_condition, collection_name):
collection = self.db[collection_name]
return collection.find_one(search_condition, {'_id': 0})
def search_documents_in_collection(self, search_condition, collection_name):
collection = self.db[collection_name]
return collection.find(search_condition, {'_id': 0})
def search_documents_and_aggregate(self, search_condition, aggregation, collection_name):
collection = self.db[collection_name]
return list(collection.aggregate([{'$match': search_condition}, {'$project': aggregation}]))
def get_number_of_documents_in_collection(self, collection_name, filter_=None):
collection = self.db[collection_name]
return collection.count(filter_)
def update_document_in_collection(self, filter_, update, collection_name, insert_if_not_exists=False):
collection = self.db[collection_name]
collection.update_one(filter_, {'$set': update}, upsert=insert_if_not_exists)
def update_documents_in_collection(self, docs, find_filter, collection_name):
if len(docs) > 0:
bulk = self.db[collection_name].initialize_ordered_bulk_op()
for doc in docs:
bulk.find({find_filter: doc.get(find_filter)}).upsert().update({'$set': doc})
bulk.execute()
def get_documents_from_collection(self, collection_name):
collection = self.db[collection_name]
return list(collection.find({}, {'_id': 0}))
def get_documents_from_collection_in_range(self, collection_name, skip=0, limit=0):
collection = self.db[collection_name]
return list(collection.find({}, {'_id': 0}).skip(skip).limit(limit))
def delete_document_from_collection(self, query, collection_name):
collection = self.db[collection_name]
collection.delete_one(query)
def close(self):
self.mongodb_client.close()
def drop_collection(self, collection_name):
collection = self.db[collection_name]
collection.drop()
def insert_documents_in_collection(self, documents, collection_name):
collection = self.db[collection_name]
collection.insert_many(documents=documents)
def create_mongodb_client():
return MongoClient(config.get_database_host(), config.get_database_port())
def doc_found(query_result):
found = query_result.count() > 0
query_result.close()
return found
def get_regex_dict(regex):
return {'$regex': regex} | fkie-cad/iva | database.py | Python | lgpl-3.0 | 3,952 |
"""
useful decorators
"""
__author__ = "Philippe Guglielmetti"
__copyright__ = "Copyright 2015, Philippe Guglielmetti"
__credits__ = ["http://include.aorcsik.com/2014/05/28/timeout-decorator/"]
__license__ = "LGPL + MIT"
import multiprocessing
from multiprocessing import TimeoutError
from threading import Timer
import weakref
import threading
import _thread as thread
from multiprocessing.pool import ThreadPool
import logging
import functools
import sys
import logging
_gettrace = getattr(sys, 'gettrace', None)
debugger = _gettrace and _gettrace()
logging.info('debugger ' + ('ACTIVE' if debugger else 'INACTIVE'))
# http://wiki.python.org/moin/PythonDecoratorLibrary
def memoize(obj):
"""speed up repeated calls to a function by caching its results in a dict index by params
:see: https://en.wikipedia.org/wiki/Memoization
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def debug(func):
# Customize these messages
ENTRY_MESSAGE = 'Entering {}'
EXIT_MESSAGE = 'Exiting {}'
@functools.wraps(func)
def wrapper(*args, **kwds):
logger = logging.getLogger()
logger.info(ENTRY_MESSAGE.format(func.__name__))
level = logger.getEffectiveLevel()
logger.setLevel(logging.DEBUG)
f_result = func(*args, **kwds)
logger.setLevel(level)
logger.info(EXIT_MESSAGE.format(func.__name__))
return f_result
return wrapper
def nodebug(func):
@functools.wraps(func)
def wrapper(*args, **kwds):
logger = logging.getLogger()
level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO)
f_result = func(*args, **kwds)
logger.setLevel(level)
return f_result
return wrapper
# https://medium.com/pythonhive/python-decorator-to-measure-the-execution-time-of-methods-fa04cb6bb36d
def timeit(method):
import time
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
logging.info('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))
return result
return timed
# http://include.aorcsik.com/2014/05/28/timeout-decorator/
# BUT read http://eli.thegreenplace.net/2011/08/22/how-not-to-set-a-timeout-on-a-computation-in-python
thread_pool = None
def get_thread_pool():
global thread_pool
if thread_pool is None:
# fix for python <2.7.2
if not hasattr(threading.current_thread(), "_children"):
threading.current_thread()._children = weakref.WeakKeyDictionary()
thread_pool = ThreadPool(processes=1)
return thread_pool
def timeout(timeout):
def wrap_function(func):
if not timeout:
return func
@functools.wraps(func)
def __wrapper(*args, **kwargs):
try:
async_result = get_thread_pool().apply_async(func, args=args, kwds=kwargs)
return async_result.get(timeout)
except thread.error:
return func(*args, **kwargs)
return __wrapper
return wrap_function
# https://gist.github.com/goulu/45329ef041a368a663e5
def itimeout(iterable, timeout):
"""timeout for loops
:param iterable: any iterable
:param timeout: float max running time in seconds
:yield: items in iterator until timeout occurs
:raise: multiprocessing.TimeoutError if timeout occured
"""
if False: # handle debugger better one day ...
n = 100 * timeout
for i, x in enumerate(iterable):
yield x
if i > n:
break
else:
timer = Timer(timeout, lambda: None)
timer.start()
for x in iterable:
yield x
if timer.finished.is_set():
raise TimeoutError
# don't forget it, otherwise the thread never finishes...
timer.cancel()
# https://www.artima.com/weblogs/viewpost.jsp?thread=101605
registry = {}
class MultiMethod(object):
def __init__(self, name):
self.name = name
self.typemap = {}
def __call__(self, *args):
types = tuple(arg.__class__ for arg in args) # a generator expression!
function = self.typemap.get(types)
if function is None:
raise TypeError("no match")
return function(*args)
def register(self, types, function):
if types in self.typemap:
raise TypeError("duplicate registration")
self.typemap[types] = function
def multimethod(*types):
"""
allows to overload functions for various parameter types
@multimethod(int, int)
def foo(a, b):
...code for two ints...
@multimethod(float, float):
def foo(a, b):
...code for two floats...
@multimethod(str, str):
def foo(a, b):
...code for two strings...
"""
def register(function):
name = function.__name__
mm = registry.get(name)
if mm is None:
mm = registry[name] = MultiMethod(name)
mm.register(types, function)
return mm
return register
| goulu/Goulib | Goulib/decorators.py | Python | lgpl-3.0 | 5,279 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 18.02.2015
@author: marscher
'''
import os
import numpy as np
from deeptime.clustering import ClusterModel, metrics
from pyemma._base.serialization.serialization import SerializableMixIn
from pyemma._base.model import Model
from pyemma._base.parallel import NJobsMixIn
from pyemma._ext.sklearn.base import ClusterMixin
from pyemma.coordinates.data._base.transformer import StreamingEstimationTransformer
from pyemma.util.annotators import fix_docs, aliased, alias
from pyemma.util.discrete_trajectories import index_states, sample_indexes_by_state
from pyemma.util.files import mkdir_p
@fix_docs
@aliased
class AbstractClustering(StreamingEstimationTransformer, Model, ClusterMixin, NJobsMixIn, SerializableMixIn):
"""
provides a common interface for cluster algorithms.
Parameters
----------
metric: str, default='euclidean'
metric to pass to c extension
n_jobs: int or None, default=None
How much threads to use during assignment
If None, all available CPUs will be used.
"""
def __init__(self, metric='euclidean', n_jobs=None):
super(AbstractClustering, self).__init__()
from ._ext import rmsd
metrics.register("minRMSD", rmsd)
self.metric = metric
self.clustercenters = None
self._previous_stride = -1
self._dtrajs = []
self._overwrite_dtrajs = False
self._index_states = []
self.n_jobs = n_jobs
__serialize_fields = ('_dtrajs', '_previous_stride', '_index_states', '_overwrite_dtrajs', '_precentered')
__serialize_version = 0
def set_model_params(self, clustercenters):
self.clustercenters = clustercenters
@property
@alias('cluster_centers_') # sk-learn compat.
def clustercenters(self):
""" Array containing the coordinates of the calculated cluster centers. """
return self._clustercenters
@clustercenters.setter
def clustercenters(self, val):
self._clustercenters = np.asarray(val, dtype='float32', order='C')[:] if val is not None else None
self._precentered = False
@property
def overwrite_dtrajs(self):
"""
Should existing dtraj files be overwritten. Set this property to True to overwrite.
"""
return self._overwrite_dtrajs
@overwrite_dtrajs.setter
def overwrite_dtrajs(self, value):
self._overwrite_dtrajs = value
@property
#@alias('labels_') # TODO: for fully sklearn-compat this would have to be a flat array!
def dtrajs(self):
"""Discrete trajectories (assigned data to cluster centers)."""
if len(self._dtrajs) == 0: # nothing assigned yet, doing that now
self._dtrajs = self.assign(stride=1)
return self._dtrajs # returning what we have saved
@property
def index_clusters(self):
"""Returns trajectory/time indexes for all the clusters
Returns
-------
indexes : list of ndarray( (N_i, 2) )
For each state, all trajectory and time indexes where this cluster occurs.
Each matrix has a number of rows equal to the number of occurrences of the corresponding state,
with rows consisting of a tuple (i, t), where i is the index of the trajectory and t is the time index
within the trajectory.
"""
if len(self._dtrajs) == 0: # nothing assigned yet, doing that now
self._dtrajs = self.assign()
if len(self._index_states) == 0: # has never been run
self._index_states = index_states(self._dtrajs)
return self._index_states
def sample_indexes_by_cluster(self, clusters, nsample, replace=True):
"""Samples trajectory/time indexes according to the given sequence of states.
Parameters
----------
clusters : iterable of integers
It contains the cluster indexes to be sampled
nsample : int
Number of samples per cluster. If replace = False, the number of returned samples per cluster could be smaller
if less than nsample indexes are available for a cluster.
replace : boolean, optional
Whether the sample is with or without replacement
Returns
-------
indexes : list of ndarray( (N, 2) )
List of the sampled indices by cluster.
Each element is an index array with a number of rows equal to N=len(sequence), with rows consisting of a
tuple (i, t), where i is the index of the trajectory and t is the time index within the trajectory.
"""
# Check if the catalogue (index_states)
if len(self._index_states) == 0: # has never been run
self._index_states = index_states(self.dtrajs)
return sample_indexes_by_state(self._index_states[clusters], nsample, replace=replace)
def _transform_array(self, X):
"""get closest index of point in :attr:`clustercenters` to x."""
X = np.require(X, dtype=np.float32, requirements='C')
# for performance reasons we pre-center the cluster centers for minRMSD.
if self.metric == 'minRMSD' and not self._precentered:
self._precentered = True
model = ClusterModel(cluster_centers=self.clustercenters, metric=self.metric)
dtraj = model.transform(X)
res = dtraj[:, None] # always return a column vector in this function
return res
def dimension(self):
"""output dimension of clustering algorithm (always 1)."""
return 1
def output_type(self):
return np.int32()
def assign(self, X=None, stride=1):
"""
Assigns the given trajectory or list of trajectories to cluster centers by using the discretization defined
by this clustering method (usually a Voronoi tesselation).
You can assign multiple times with different strides. The last result of assign will be saved and is available
as the attribute :func:`dtrajs`.
Parameters
----------
X : ndarray(T, n) or list of ndarray(T_i, n), optional, default = None
Optional input data to map, where T is the number of time steps and n is the number of dimensions.
When a list is provided they can have differently many time steps, but the number of dimensions need
to be consistent. When X is not provided, the result of assign is identical to get_output(), i.e. the
data used for clustering will be assigned. If X is given, the stride argument is not accepted.
stride : int, optional, default = 1
If set to 1, all frames of the input data will be assigned. Note that this could cause this calculation
to be very slow for large data sets. Since molecular dynamics data is usually
correlated at short timescales, it is often sufficient to obtain the discretization at a longer stride.
Note that the stride option used to conduct the clustering is independent of the assign stride.
This argument is only accepted if X is not given.
Returns
-------
Y : ndarray(T, dtype=int) or list of ndarray(T_i, dtype=int)
The discretized trajectory: int-array with the indexes of the assigned clusters, or list of such int-arrays.
If called with a list of trajectories, Y will also be a corresponding list of discrete trajectories
"""
if X is None:
# if the stride did not change and the discrete trajectory is already present,
# just return it
if self._previous_stride is stride and len(self._dtrajs) > 0:
return self._dtrajs
self._previous_stride = stride
skip = self.skip if hasattr(self, 'skip') else 0
# map to column vectors
mapped = self.get_output(stride=stride, chunk=self.chunksize, skip=skip)
# flatten and save
self._dtrajs = [np.transpose(m)[0] for m in mapped]
# return
return self._dtrajs
else:
if stride != 1:
raise ValueError('assign accepts either X or stride parameters, but not both. If you want to map '+
'only a subset of your data, extract the subset yourself and pass it as X.')
# map to column vector(s)
mapped = self.transform(X)
# flatten
if isinstance(mapped, np.ndarray):
mapped = np.transpose(mapped)[0]
else:
mapped = [np.transpose(m)[0] for m in mapped]
# return
return mapped
def save_dtrajs(self, trajfiles=None, prefix='',
output_dir='.',
output_format='ascii',
extension='.dtraj'):
"""saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
trajfiles : list of str (optional)
names of input trajectory files, will be used generate output files.
prefix : str
prepend prefix to filenames.
output_dir : str
save files to this directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj')
"""
if extension[0] != '.':
extension = '.' + extension
# obtain filenames from input (if possible, reader is a featurereader)
if output_format == 'ascii':
from msmtools.dtraj import write_discrete_trajectory as write_dtraj
else:
from msmtools.dtraj import save_discrete_trajectory as write_dtraj
import os.path as path
output_files = []
if trajfiles is not None: # have filenames available?
for f in trajfiles:
p, n = path.split(f) # path and file
basename, _ = path.splitext(n)
if prefix != '':
name = "%s_%s%s" % (prefix, basename, extension)
else:
name = "%s%s" % (basename, extension)
# name = path.join(p, name)
output_files.append(name)
else:
for i in range(len(self.dtrajs)):
if prefix != '':
name = "%s_%i%s" % (prefix, i, extension)
else:
name = str(i) + extension
output_files.append(name)
assert len(self.dtrajs) == len(output_files)
if not os.path.exists(output_dir):
mkdir_p(output_dir)
for filename, dtraj in zip(output_files, self.dtrajs):
dest = path.join(output_dir, filename)
self.logger.debug('writing dtraj to "%s"' % dest)
try:
if path.exists(dest) and not self.overwrite_dtrajs:
raise EnvironmentError('Attempted to write dtraj "%s" which already existed. To automatically'
' overwrite existing files, set source.overwrite_dtrajs=True.' % dest)
write_dtraj(dest, dtraj)
except IOError:
self.logger.exception('Exception during writing dtraj to "%s"' % dest)
| markovmodel/PyEMMA | pyemma/coordinates/clustering/interface.py | Python | lgpl-3.0 | 12,270 |
""" X """
import cPickle as pickle
import os
import numpy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class PlotInterface(object):
def plot_timeseries(self, times, values):
pass
def plot_sinwave(self, times, sinewave):
pass
def plot_area_ratio(self, on_period_area, off_period_area):
pass
def plot_periodogram(self, periods, powers, hints, power_threshold, time_threshold):
pass
def plot_acf(self, times, acf):
pass
def plot_acf_validation(self, times, acf, times1, m1, c1, err1, times2, m2, c2, err2, split_idx, peak_idx):
pass
def show(self):
pass
class ImageOutput(PlotInterface):
""" Output the timeseries data and the period estimate as plot in png format """
def __init__(self, jobid, metricname):
ijobid = int(jobid)
top = (ijobid / 1000000)
middle = (ijobid / 1000) % 1000
try:
os.makedirs("{}/{}".format(top, middle))
except OSError:
pass
self.outfilename = "{}/{}/{}_{}.png".format(top, middle, jobid, metricname)
self.fig = plt.figure()
self.data_ax = plt.subplot(211, xlabel='Elapsed time (s)', ylabel='Data rate (MB/s)')
self.sine_ax = None
self.verbose = False
def plot_timeseries(self, times, values):
self.data_ax.plot(times, values / 1024.0 / 1024.0, label='Timeseries')
def plot_sinwave(self, times, sinewave):
self.sine_ax = plt.subplot(212, xlabel='Elapsed time (s)', ylabel='Data rate (MB/s)')
self.sine_ax.plot(times, sinewave / 1024.0 / 1024.0, label='Estimate')
def show(self):
self.fig.tight_layout()
self.fig.savefig(self.outfilename, format='png', transparent=True)
class Dumper(object):
def __init__(self, filename='data.dat'):
self.filename = filename
self.data = {}
self.verbose = True
def plot_timeseries(self, times, values):
self.data['timeseries'] = (times, values)
def plot_sinwave(self, times, sinewave):
self.data['sinewave'] = (times, sinewave)
def plot_area_ratio(self, on_period_area, off_period_area):
self.data['area_ratio'] = (on_period_area, off_period_area)
def plot_periodogram(self, periods, powers, hints, power_threshold, time_threshold):
self.data['periodogram'] = (periods, powers, hints, power_threshold, time_threshold)
def plot_acf(self, times, acf):
self.data['acf'] = (times, acf)
def plot_acf_validation(self, times, acf, times1, m1, c1, err1, times2, m2, c2, err2, split_idx, peak_idx):
self.data['acf_validation'] = (times, acf, times1, m1, c1, err1, times2, m2, c2, err2, split_idx, peak_idx)
def show(self):
with open(self.filename, 'wb') as fp:
pickle.dump(self.data, fp)
def load(self):
with open(self.filename, 'rb') as fp:
self.data = pickle.load(fp)
class Plotter(object):
def __init__(self, title="Autoperiod", filename='output.pdf', figsize=(4, 3), verbose=False):
self.title = title
self.filename = filename
self.fig = plt.figure()
self.figsize = figsize
self.verbose = verbose
self.timeseries_ax = plt.subplot2grid((3, 10), (0, 0), colspan=9, xlabel='Times', ylabel='Values')
self.area_ratio_ax = plt.subplot2grid((3, 10), (0, 9), colspan=1, xticks=(1, 2), xticklabels=("on", "off"))
self.area_ratio_ax.get_yaxis().set_visible(False)
self.periodogram_ax = plt.subplot2grid((3, 10), (1, 0), colspan=10, xlabel='Period', ylabel='Power')
self.acf_ax = plt.subplot2grid((3, 10), (2, 0), colspan=10, xlabel='Lag', ylabel='Correlation')
self.time_threshold = None
def plot_timeseries(self, times, values):
self.timeseries_ax.plot(times, values, label='Timeseries')
self.timeseries_ax.legend()
def plot_sinwave(self, times, sinwave):
self.timeseries_ax.plot(times, sinwave, label='Estimated Period')
self.timeseries_ax.legend()
def plot_area_ratio(self, on_period_area, off_period_area):
self.area_ratio_ax.bar(1, on_period_area)
self.area_ratio_ax.bar(2, off_period_area)
self.area_ratio_ax.legend()
def plot_periodogram(self, periods, powers, hints, power_threshold, time_threshold):
self.time_threshold = time_threshold
self.periodogram_ax.plot(periods, powers, label='Periodogram')
self.periodogram_ax.scatter([p for i, p in hints], [powers[i] for i, p in hints], c='red', marker='x', label='Period Hints')
self.periodogram_ax.axhline(power_threshold, color='green', linewidth=1, linestyle='dashed', label='Min Power')
#self.periodogram_ax.axvline(time_threshold, c='purple', linewidth=1, linestyle='dashed', label='Max Period')
self.periodogram_ax.legend()
self.periodogram_ax.set_xlim([0, self.time_threshold])
def plot_acf(self, times, acf):
self.acf_ax.plot(times, acf, '-o', lw=0.5, ms=2, label='Autocorrelation')
if self.time_threshold is not None:
self.acf_ax.set_xlim([0, self.time_threshold])
self.acf_ax.legend()
def plot_acf_validation(self, times, acf, times1, m1, c1, err1, times2, m2, c2, err2, split_idx, peak_idx):
self.acf_ax.plot(times1, c1 + m1 * times1, c='r', label='Slope: {}, Error: {}'.format(m1, err1))
self.acf_ax.plot(times2, c2 + m2 * times2, c='r', label='Slope: {}, Error: {}'.format(m2, err2))
self.acf_ax.scatter(times[split_idx], acf[split_idx], c='y', label='Split point: {}'.format(times[split_idx]))
self.acf_ax.scatter(times[peak_idx], acf[peak_idx], c='g', label='Peak point: {}'.format(times[peak_idx]))
self.acf_ax.legend()
def show(self):
self.fig.tight_layout()
if self.filename:
self.fig.set_size_inches(*self.figsize)
self.fig.savefig(self.filename, format='pdf', facecolor=self.fig.get_facecolor())
def main():
""" X """
d = Dumper('gpfs-fsios-write_bytes_data.dat')
d.load()
p = Plotter()
p.plot_timeseries(*d.data['timeseries'])
p.plot_sinwave(*d.data['sinewave'])
p.plot_area_ratio(*d.data['area_ratio'])
p.plot_periodogram(*d.data['periodogram'])
p.plot_acf(*d.data['acf'])
p.plot_acf_validation(*d.data['acf_validation'])
p.show()
if __name__ == "__main__":
main()
| ubccr/supremm | src/supremm/datadumper.py | Python | lgpl-3.0 | 6,457 |
# Copyright (c) 2010-2018 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Utilities for writing code that runs on Python 2 and 3
WARNING: THIS VERSION OF SIX HAS BEEN MODIFIED.
Changed line 658:
def u(s):
s = s.replace(r'\\', r'\\\\')
if isinstance(s, unicode):
return s
else:
return unicode(s, "unicode_escape")
"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.12.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s, encoding=None):
return str(s)
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s, encoding="unicode_escape"):
if not isinstance(s, basestring):
s = unicode(s, encoding)
s = s.replace(r'\\', r'\\\\')
if isinstance(s, unicode):
return s
else:
return unicode(s, encoding)
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
try:
if from_value is None:
raise value
raise value from from_value
finally:
value = None
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, text_type):
return s.encode(encoding, errors)
elif isinstance(s, binary_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| krathjen/studiolibrary | src/studiovendor/six.py | Python | lgpl-3.0 | 32,901 |
from pyactor.context import set_context, create_host, sleep, shutdown,\
serve_forever
class Someclass(object):
_tell = {'show_things'}
def __init__(self, op, thing):
self.things = [op, thing]
def show_things(self):
print(self.things)
if __name__ == '__main__':
set_context()
h = create_host()
params = ["hi", "you"]
# kparams = {"op":"hi", "thing":"you"}
# t = h.spawn('t', Someclass, *params)
t = h.spawn('t', Someclass, *["hi", "you"])
t.show_things()
shutdown()
| pedrotgn/pyactor | examples/initparams.py | Python | lgpl-3.0 | 539 |
# *-* encoding: utf-8 *-*
import os
import codecs
import unicodedata
try:
from lxml import etree
except ImportError:
try:
# Python 2.5 - cElementTree
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5 - ElementTree
import xml.etree.ElementTree as etree
except ImportError:
try:
# Instalacao normal do cElementTree
import cElementTree as etree
except ImportError:
try:
# Instalacao normal do ElementTree
import elementtree.ElementTree as etree
except ImportError:
raise Exception('Falhou ao importar lxml/ElementTree')
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import flags
# from geraldo.utils import memoize
# @memoize
def so_numeros(texto):
"""Retorna o texto informado mas somente os numeros"""
return ''.join(filter(lambda c: ord(c) in range(48, 58), texto))
# @memoize
def obter_pais_por_codigo(codigo):
# TODO
if codigo == '1058':
return 'Brasil'
CAMINHO_DATA = os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..', 'data')
CAMINHO_MUNICIPIOS = os.path.join(CAMINHO_DATA, 'MunIBGE')
CARACTERS_ACENTUADOS = {
ord(u'á'): u'a',
ord(u'â'): u'a',
ord(u'à'): u'a',
ord(u'ã'): u'a',
ord(u'é'): u'e',
ord(u'ê'): u'e',
ord(u'í'): u'i',
ord(u'ó'): u'o',
ord(u'õ'): u'o',
ord(u'ô'): u'o',
ord(u'ú'): u'u',
ord(u'ç'): u'c',
}
# @memoize
def normalizar_municipio(municipio):
if not isinstance(municipio, unicode):
municipio = municipio.decode('utf-8')
return municipio.lower().translate(CARACTERS_ACENTUADOS).upper()
# @memoize
def carregar_arquivo_municipios(uf, reverso=False):
if isinstance(uf, basestring):
try:
uf = int(uf)
except ValueError:
uf = flags.CODIGOS_ESTADOS[uf.upper()]
caminho_arquivo = os.path.join(CAMINHO_MUNICIPIOS, 'MunIBGE-UF%s.txt' % uf)
# Carrega o conteudo do arquivo
fp = codecs.open(caminho_arquivo, "r", "utf-8-sig")
linhas = list(fp.readlines())
fp.close()
municipios_dict = {}
for linha in linhas:
codigo, municipio = linha.split('\t')
codigo = codigo.strip()
municipio = municipio.strip()
if not reverso:
municipios_dict[codigo] = municipio
else:
municipios_dict[normalizar_municipio(municipio)] = codigo
return municipios_dict
# @memoize
def obter_codigo_por_municipio(municipio, uf):
# TODO: fazer UF ser opcional
municipios = carregar_arquivo_municipios(uf, True)
return municipios[normalizar_municipio(municipio)]
# @memoize
def obter_municipio_por_codigo(codigo, uf, normalizado=False):
# TODO: fazer UF ser opcional
municipios = carregar_arquivo_municipios(uf)
municipio = municipios.get(unicode(codigo))
if municipio is None:
raise ValueError
if normalizado:
return normalizar_municipio(municipio)
return municipio
# @memoize
def obter_municipio_e_codigo(dados, uf):
'''Retorna código e município
municipio_ou_codigo - espera receber um dicionário no formato:
{codigo: 121212, municipio: u'municipio'}
'''
cod = dados.get('codigo', '')
mun = normalizar_municipio(dados.get('municipio', ''))
try:
cod = int(cod)
except ValueError:
cod = obter_codigo_por_municipio(mun, uf)
# TODO: se ainda com este teste apresentar erros de nessa seção
# desenvolver um retorno que informe ao cliente quais nfes estão com erro
# e não explodir esse a geração das outras nfes
municipio = obter_municipio_por_codigo(cod, uf, normalizado=True)
return cod, municipio
# @memoize
def extrair_tag(root):
return root.tag.split('}')[-1]
def formatar_decimal(dec):
if dec * 100 - int(dec * 100):
return str(dec)
else:
return "%.2f" % dec
def safe_str(str_):
if not isinstance(str_, unicode):
if isinstance(str_, str):
str_ = str_.decode('utf8')
else:
str_ = unicode(str_)
return unicodedata.normalize('NFKD', str_).encode('ascii', 'ignore')
def obter_uf_por_codigo(codigo_uf):
if isinstance(codigo_uf, basestring) and codigo_uf.isalpha():
return codigo_uf
estados = {v: k for k, v in flags.CODIGOS_ESTADOS.items()}
return estados[unicode(codigo_uf)]
| YACOWS/PyNFe | pynfe/utils/__init__.py | Python | lgpl-3.0 | 4,553 |
# (C) British Crown Copyright 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Tests for the NearsidePerspective projection.
"""
from __future__ import (absolute_import, division, print_function)
import unittest
from numpy.testing import assert_almost_equal
from nose.tools import assert_equal
from cartopy.tests.crs.test_geostationary import (GeostationaryTestsMixin,
check_proj4_params)
from cartopy.crs import NearsidePerspective
class TestEquatorialDefault(unittest.TestCase, GeostationaryTestsMixin):
# Check that it behaves just like Geostationary, in the absence of a
# central_latitude parameter.
test_class = NearsidePerspective
expected_proj_name = 'nsper'
class TestOwnSpecifics(unittest.TestCase):
def test_central_latitude(self):
# Check the effect of the added 'central_latitude' key.
geos = NearsidePerspective(central_latitude=53.7)
expected = ['+ellps=WGS84', 'h=35785831', 'lat_0=53.7', 'lon_0=0.0',
'no_defs',
'proj=nsper',
'units=m', 'x_0=0', 'y_0=0']
check_proj4_params(geos, expected)
assert_almost_equal(geos.boundary.bounds,
(-5372584.78443894, -5372584.78443894,
5372584.78443894, 5372584.78443894),
decimal=4)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| zak-k/cartopy | lib/cartopy/tests/crs/test_nearside_perspective.py | Python | lgpl-3.0 | 2,158 |
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""utilities methods and classes for reporters"""
import sys
import locale
import os
from .. import utils
CMPS = ['=', '-', '+']
# py3k has no more cmp builtin
if sys.version_info >= (3, 0):
def cmp(a, b):
return (a > b) - (a < b)
def diff_string(old, new):
"""given a old and new int value, return a string representing the
difference
"""
diff = abs(old - new)
diff_str = "%s%s" % (CMPS[cmp(old, new)], diff and ('%.2f' % diff) or '')
return diff_str
class Message(object):
"""This class represent a message to be issued by the reporters"""
def __init__(self, reporter, msg_id, location, msg):
self.msg_id = msg_id
self.abspath, self.module, self.obj, self.line, self.column = location
self.path = self.abspath.replace(reporter.path_strip_prefix, '')
self.msg = msg
self.C = msg_id[0]
self.category = utils.MSG_TYPES[msg_id[0]]
self.symbol = reporter.linter.check_message_id(msg_id).symbol
def format(self, template):
"""Format the message according to the given template.
The template format is the one of the format method :
cf. http://docs.python.org/2/library/string.html#formatstrings
"""
return template.format(**(self.__dict__))
class BaseReporter(object):
"""base class for reporters
symbols: show short symbolic names for messages.
"""
extension = ''
def __init__(self, output=None):
self.linter = None
# self.include_ids = None # Deprecated
# self.symbols = None # Deprecated
self.section = 0
self.out = None
self.out_encoding = None
self.encode = None
self.set_output(output)
# Build the path prefix to strip to get relative paths
self.path_strip_prefix = os.getcwd() + os.sep
def add_message(self, msg_id, location, msg):
"""Client API to send a message"""
# Shall we store the message objects somewhere, do some validity checking ?
raise NotImplementedError
def set_output(self, output=None):
"""set output stream"""
self.out = output or sys.stdout
# py3k streams handle their encoding :
if sys.version_info >= (3, 0):
self.encode = lambda x: x
return
def encode(string):
if not isinstance(string, unicode):
return string
encoding = (getattr(self.out, 'encoding', None) or
locale.getdefaultlocale()[1] or
sys.getdefaultencoding())
# errors=replace, we don't want to crash when attempting to show
# source code line that can't be encoded with the current locale
# settings
return string.encode(encoding, 'replace')
self.encode = encode
def writeln(self, string=''):
"""write a line in the output buffer"""
print >> self.out, self.encode(string)
def display_results(self, layout):
"""display results encapsulated in the layout tree"""
self.section = 0
if hasattr(layout, 'report_id'):
layout.children[0].children[0].data += ' (%s)' % layout.report_id
self._display(layout)
def _display(self, layout):
"""display the layout"""
raise NotImplementedError()
# Event callbacks
def on_set_current_module(self, module, filepath):
"""starting analyzis of a module"""
pass
def on_close(self, stats, previous_stats):
"""global end of analyzis"""
pass
def initialize(linter):
"""initialize linter with reporters in this package """
utils.register_plugins(linter, __path__[0])
| lukaszpiotr/pylama_with_gjslint | pylama/checkers/pylint/reporters/__init__.py | Python | lgpl-3.0 | 4,482 |
#!/usr/bin/env python
# -*- coding: utf-8 -*
import clutter
from clutter import cogl
import math
class Clock(clutter.Actor):
__gtype_name__ = 'Clock'
"""
A clock widget
"""
def __init__(self, date=None, texture=None):
clutter.Actor.__init__(self)
self._date = date
self._texture = texture
self._color = clutter.color_from_string('Black')
def set_color(self, color):
self._color = clutter.color_from_string(color)
self.queue_redraw()
def set_texture(self, texture):
self._texture = texture
self.queue_redraw()
def set_date(self, date=None):
self._date = date
if date is not None:
self.queue_redraw()
def do_paint(self):
#clutter.Texture.do_paint(self)
(x1, y1, x2, y2) = self.get_allocation_box()
width = x2 - x1
height = y2 - y1
hw = width / 2
hh = height / 2
center_x = hw
center_y = hh
# texture
if self._texture is not None:
cogl.path_rectangle(0, 0, width, height)
cogl.path_close()
cogl.set_source_texture(self._texture)
cogl.path_fill()
# clock hands
if self._date is not None:
hour = self._date.hour
minute = self._date.minute
# hour
angle = (60 * hour + minute) / 2 + 270
left = angle - 14
right = angle + 14
angle = angle * (math.pi / 180)
left = left * (math.pi / 180)
right = right * (math.pi / 180)
cogl.path_move_to(center_x, center_y)
cogl.path_line_to(center_x + (hw/4) * math.cos(left), center_y + (hh/4) * math.sin(left))
cogl.path_line_to(center_x + (2*hw/3) * math.cos(angle), center_y + (2*hh/3) * math.sin(angle))
cogl.path_line_to(center_x + (hw/4) * math.cos(right), center_y + (hh/4) * math.sin(right))
cogl.path_line_to(center_x, center_y)
cogl.path_close()
cogl.set_source_color(self._color)
cogl.path_fill()
# minute
angle = 6 * minute + 270
left = angle - 10
right = angle + 10
angle = angle * (math.pi / 180)
left = left * (math.pi / 180)
right = right * (math.pi / 180)
cogl.path_move_to(center_x, center_y)
cogl.path_line_to(center_x + (hw/3) * math.cos(left), center_y + (hh/3) * math.sin(left))
cogl.path_line_to(center_x + hw * math.cos(angle), center_y + hh * math.sin(angle))
cogl.path_line_to(center_x + (hw/3) * math.cos(right), center_y + (hh/3) * math.sin(right))
cogl.path_line_to(center_x, center_y)
cogl.path_close()
cogl.set_source_color(self._color)
cogl.path_fill()
#main to test
if __name__ == '__main__':
stage = clutter.Stage()
stage.connect('destroy',clutter.main_quit)
import gobject, datetime
t = cogl.texture_new_from_file('clock.png', clutter.cogl.TEXTURE_NO_SLICING, clutter.cogl.PIXEL_FORMAT_ANY)
c = Clock()
c.set_texture(t)
c.set_size(400, 400)
c.set_position(50, 50)
stage.add(c)
def update():
today = datetime.datetime.today()
#self.actor.set_text(today.strftime('%H:%M\n%d / %m'))
c.set_date(today)
return True
gobject.timeout_add_seconds(60, update)
stage.show()
clutter.main()
| UbiCastTeam/candies | candies2/clock.py | Python | lgpl-3.0 | 3,642 |
import itertools
import re
import random
import time
import urllib2
from bs4 import BeautifulSoup
import csv
import os
import os.path
import string
import pg
from collections import OrderedDict
import tweepy
import sys
# lint_ignore=E302,E501
_dir = os.path.dirname(os.path.abspath(__file__))
_cur = pg.connect(host="127.0.0.1")
_topHashtagsDir = "%s/dissertationData/topHashtags" % (_dir)
def getTweepyAPI():
consumer_key = "vKbz24SqytZnYO33FNkR7w"
consumer_secret = "jjobro8Chy9aKMzo8szYMz9tHftONLRkjNnrxk0"
access_key = "363361813-FKSdmwSbzuUzHWg326fTGJM7Bu2hTviqEetjMgu8"
access_secret = "VKgzDnTvDUWR1csliUR3BiMOI2oqO9NzocNKX1jPd4"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
return tweepy.API(auth)
_api = getTweepyAPI()
def isRetweet(tweet):
return hasattr(tweet, 'retweeted_status')
def write2csv(res, file):
print "writing %s results to file: %s" % (len(res), file)
with open(file, 'wb') as f:
writer = csv.writer(f, quoting=csv.QUOTE_ALL)
writer.writerows(res)
def myQuery(query):
print "running query: %s" % (query)
res = _cur.query(query)
print "finished running query"
return(res)
def getTweetObj(tweet):
tweetObj = [tweet.id_str, tweet.user.id, tweet.user.screen_name.lower(), tweet.created_at, isRetweet(tweet), tweet.in_reply_to_status_id_str,
tweet.lang, tweet.truncated, tweet.text.encode("utf-8")]
return tweetObj
class CustomStreamListener(tweepy.StreamListener):
def __init__(self, group):
self.group = group
self.curTweets = []
# http://stackoverflow.com/questions/576169/understanding-python-super-and-init-methods
super(CustomStreamListener, self).__init__()
def addTweet(self, tweet):
tweetObj = getTweetObj(tweet)
tweetObj.append(self.group)
self.curTweets.append(tweetObj)
if len(self.curTweets) == 1000:
self.saveResults()
self.curTweets = []
def saveResults(self):
sys.stdout.write('\n')
file = '/tmp/topHashtags.csv' % ()
ids = [tweet[0] for tweet in self.curTweets]
ids = list(OrderedDict.fromkeys(ids))
ids = [[id] for id in ids]
myQuery('truncate temp_tweets_id')
write2csv(ids, file)
myQuery("copy temp_tweets_id (id) from '%s' delimiters ',' csv" % (file))
newIds = myQuery("select id from temp_tweets_id as t where t.id not in (select id from top_hashtag_tweets where top_hashtag_tweets.id >= (select min(id) from temp_tweets_id))").getresult()
newIds = [id[0] for id in newIds]
newIds = [str(id) for id in newIds]
newTweets = [tweet for tweet in self.curTweets if tweet[0] in newIds]
newTweets = dict((tweet[0], tweet) for tweet in newTweets).values()
write2csv(newTweets, file)
myQuery("copy top_hashtag_tweets (id, user_id, user_screen_name, created_at, retweeted, in_reply_to_status_id, lang, truncated, text, hashtag_group) from '%s' delimiters ',' csv" % (file))
def on_status(self, status):
sys.stdout.write('.')
sys.stdout.flush()
self.addTweet(status)
def on_error(self, status_code):
print >> sys.stderr, 'error: %s' % (repr(status_code))
return True
def on_timeout(self):
print >> sys.stderr, 'Timeout...'
return True
def scrapeTrendsmap():
baseUrl = 'http://trendsmap.com'
url = baseUrl + '/local'
soup = BeautifulSoup(urllib2.urlopen(url).read())
#res = soup.findAll('div', {'class': 'location'})
res = soup.findAll('a', {'href': re.compile('^\/local\/us')})
cityUrls = [baseUrl + item['href'] for item in res]
allHashtags = []
for rank, url in enumerate(cityUrls):
print "working url %s, %s/%s" % (url, rank, len(cityUrls))
soup = BeautifulSoup(urllib2.urlopen(url).read())
res = soup.findAll('a', {'class': 'obscure-text', 'title': re.compile('^#')})
hashtags = [item['title'].encode('utf-8') for item in res]
allHashtags.extend(hashtags)
time.sleep(2 + random.random())
allHashtags = list(OrderedDict.fromkeys(allHashtags))
random.shuffle(allHashtags)
return allHashtags
def generateTopHashtagsTrendsmap():
res = scrapeTrendsmap()
return res
def scrapeStatweestics():
url = 'http://statweestics.com/stats/hashtags/day'
soup = BeautifulSoup(urllib2.urlopen(url).read())
res = []
for hrefEl in soup.findAll('a', {'href': re.compile('^\/stats\/show')}):
res.append(hrefEl.contents[0].encode('utf-8'))
return res
def generateTopHashtagsStatweestics():
res = scrapeStatweestics()
return res
def generateTopHashtagsCSV(scrapeFun, group):
res = []
for rank, item in enumerate(scrapeFun()):
res.append([item, rank, group])
file = "%s/%s.csv" % (_topHashtagsDir, group)
write2csv(res, file)
def storeTopHashtags(topHashtagsFile):
cmd = "copy top_hashtag_hashtags (hashtag, rank, hashtag_group) from '%s/%s.csv' delimiters ',' csv" % (_topHashtagsDir, topHashtagsFile)
myQuery(cmd)
def generateTopHashtags(scrapeFun=generateTopHashtagsTrendsmap, groupID='trendsmap'):
hashtagGroup = '%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S"), groupID)
generateTopHashtagsCSV(scrapeFun, hashtagGroup)
storeTopHashtags(hashtagGroup)
def getHashtagsFrom(group):
res = myQuery("select hashtag from top_hashtag_hashtags where hashtag_group = '%s' order by rank asc" % (group)).getresult()
res = [item[0] for item in res]
res = [hashtag for hashtag in res if sys.getsizeof(hashtag) <= 60]
res = res[-400:]
return res
def streamHashtags(hashtagGroup):
while True:
try:
sapi = tweepy.streaming.Stream(_api.auth, CustomStreamListener(hashtagGroup))
sapi.filter(languages=['en'], track=getHashtagsFrom('%s' % (hashtagGroup)))
except Exception as e:
print "couldn't do it for %s:" % (e)
time.sleep(1)
pass
def streamHashtagsCurrent():
#hashtagGroup = '2014-02-27 17:13:30 initial'
#hashtagGroup = '2014-03-17 11:28:15 trendsmap'
#hashtagGroup = '2014-03-24 13:06:19 trendsmap'
hashtagGroup = '2014-04-04 15:03:59 trendsmap'
streamHashtags(hashtagGroup)
def scrape_socialbakers(url):
soup = BeautifulSoup(urllib2.urlopen(url).read())
res = []
for div in soup.findAll('div', {'id': 'snippet-bookmarkToggle-bookmarkToggle'}):
res.append(div.findAll('div')[0]['id'].split('-')[-1])
print "grabbed %s results from url %s" % (len(res), url)
return res
def scrape_twitaholic(url):
soup = BeautifulSoup(urllib2.urlopen(url).read())
res = []
for tr in soup.findAll('tr', {'style': 'border-top:1px solid black;'}):
temp = tr.find('td', {'class': 'statcol_name'})
res.append(temp.a['title'].split('(')[1][4:-1])
return res
def generateTopUsersTwitaholic():
res = []
for i in range(10):
i = i + 1
url = 'http://twitaholic.com/top' + str(i) + '00/followers/'
res.append(scrape_twitaholic(url))
return res
def generateTopUsersSocialBakers(numUsers=10000):
res = []
for i in range(numUsers / 50):
url = 'http://socialbakers.com/twitter/page-' + str(i + 1) + '/'
res.append(scrape_socialbakers(url))
return res
_topUsersDir = "%s/dissertationData/topRankedUsers" % (_dir)
def generateTopUsersCSV(scrapeFun, topUsersFile):
res = scrapeFun()
res = list(itertools.chain(*res))
res = [x.lower() for x in res]
res = OrderedDict.fromkeys(res)
res = filter(None, res)
with open("%s/%s" % (_topUsersDir, topUsersFile), 'wb') as csvfile:
csvWriter = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
rank = 0
for item in res:
rank = rank + 1
csvWriter.writerow([item, rank])
def storeTopUsers(topUsersFile):
topUsersDir = _topUsersDir
cmd = "copy topUsers (user_screen_name, rank) from '${topUsersDir}/${topUsersFile}' delimiters ',' csv"
cmd = string.Template(cmd).substitute(locals())
myQuery(cmd)
def generateTopUsers(scrapeFun=generateTopUsersTwitaholic, topUsersFile='top1000Twitaholic.csv'):
generateTopUsersCSV(scrapeFun=scrapeFun, topUsersFile=topUsersFile)
storeTopUsers(topUsersFile=topUsersFile)
def storeTagSynonyms(synonymsFile):
cmd = "copy tag_synonyms (%s) from '%s/dissertationData/tagSynonyms/%s' delimiters ',' csv header" % (
"id, Source_Tag_Name, Target_Tag_Name, Creation_Date, Owner_User_Id, Auto_Rename_Count, Last_Auto_Rename, Score, Approved_By_User_Id, Approval_Date",
_dir, synonymsFile)
myQuery(cmd)
def storeCurTagSynonyms():
storeTagSynonyms('synonyms-2014-01-30.csv')
def backupTables(tableNames=['topUsers', 'tweets', 'top_hashtag_hashtags', 'top_hashtag_tweets', 'post_subsets', 'top_hashtag_subsets',
'post_tokenized', 'top_hashtag_tokenized', 'post_filtered', 'twitter_users', 'tag_synonyms', 'users', 'posts',
'post_tokenized_type_types', 'top_hashtag_tokenized_type_types', 'post_tokenized_chunk_types', 'top_hashtag_tokenized_chunk_types',
'tweets_tokenized', 'tweets_tokenized_chunk_types', 'tweets_tokenized_type_types']):
for tableName in tableNames:
file = "%s/dissertationData/tables/%s.csv" % (_dir, tableName)
cmd = string.Template("copy ${tableName} to '${file}' delimiter ',' csv header").substitute(locals())
myQuery(cmd)
def getRemainingHitsUserTimeline():
stat = _api.rate_limit_status()
return stat['resources']['statuses']['/statuses/user_timeline']['remaining']
def getRemainingHitsGetUser():
stat = _api.rate_limit_status()
return stat['resources']['users']['/users/lookup']['remaining']
def getTweets(screen_name, **kwargs):
# w.r.t include_rts: ref: https://dev.twitter.com/docs/api/1.1/get/statuses/user_timeline
# When set to false, the timeline will strip any native retweets (though they
# will still count toward both the maximal length of the timeline and the slice
# selected by the count parameter).
return _api.user_timeline(screen_name=screen_name, include_rts=True, **kwargs)
def getInfoForUser(screenNames):
users = _api.lookup_users(screen_names=screenNames)
res = [[user.id, user.created_at, user.description.encode('utf-8'), user.followers_count, user.friends_count,
user.lang, user.location.encode('utf-8'), user.name.encode('utf-8'), user.screen_name.lower(), user.verified, user.statuses_count] for user in users]
file = '/tmp/%s..%s_user.csv' % (screenNames[0], screenNames[-1])
write2csv(res, file)
myQuery("copy twitter_users (id,created_at,description,followers_count,friends_count,lang,location,name,user_screen_name,verified,statuses_count) from '%s' delimiters ',' csv" % (file))
def getAllTweets(screenNames):
def getTweetsBetween(greaterThanID, lessThanID):
alltweets = []
while True:
print "getting tweets from %s that are later than %s but before %s" % (screen_name, greaterThanID, lessThanID)
newTweets = getTweets(screen_name, count=200, max_id=lessThanID - 1, since_id=greaterThanID + 1)
if len(newTweets) == 0:
break
alltweets.extend(newTweets)
lessThanID = alltweets[-1].id
print "...%s tweets downloaded so far" % (len(alltweets))
return alltweets
assert len(screenNames) == 1, "Passed more than one screen name into function"
screen_name = screenNames[0]
print "getting tweets for %s" % (screen_name)
alltweets = []
lessThanID = getTweets(screen_name, count=1)[-1].id + 1
cmd = string.Template("select id from tweets where user_screen_name = '${screen_name}' order by id desc").substitute(locals())
res = myQuery(cmd).getresult()
if len(res) == 0:
newestGrabbed = 0
else:
newestGrabbed = int(res[0][0])
res = getTweetsBetween(newestGrabbed, lessThanID)
alltweets.extend(res)
cmd = string.Template("select id from tweets where user_screen_name = '${screen_name}' order by id asc").substitute(locals())
res = myQuery(cmd).getresult()
if len(res) == 0:
lessThanID = 0
else:
lessThanID = int(res[0][0])
alltweets.extend(getTweetsBetween(0, lessThanID))
outTweets = [getTweetObj(tweet) for tweet in alltweets]
file = '/tmp/%s_tweets.csv' % screen_name
write2csv(outTweets, file)
myQuery("copy tweets (id, user_id, user_screen_name, created_at, retweeted, in_reply_to_status_id, lang, truncated,text) from '%s' delimiters ',' csv" % (file))
def userAlreadyCollected(user_screen_name):
res = myQuery(string.Template("select * from tweets where user_screen_name='${user_screen_name}' limit 1").substitute(locals())).getresult()
return len(res) > 0
def userInfoAlreadyCollected(user_screen_name):
res = myQuery(string.Template("select * from twitter_users where user_screen_name='${user_screen_name}' limit 1").substitute(locals())).getresult()
return len(res) > 0
# ref: http://stackoverflow.com/questions/434287/what-is-the-most-pythonic-way-to-iterate-over-a-list-in-chunks/434411#434411
def chunker(seq, size):
return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
def getForTopUsers(alreadyCollectedFun, getForUserFun, getRemainingHitsFun, hitsAlwaysGreaterThan, userQuery, groupFun=lambda x: chunker(x, 1)):
res = myQuery(userQuery).getresult()
screenNames = [[user[0]] for user in res]
screenNames = list(itertools.chain(*screenNames))
print "getting tweets for %s users" % len(screenNames)
screenNameGroups = groupFun(screenNames)
for screenNameGroup in screenNameGroups:
newScreenNames = []
for screenName in screenNameGroup:
if alreadyCollectedFun(screenName):
print "already collected tweets for %s; moving to next user" % (screenName)
continue
newScreenNames.append(screenName)
if len(newScreenNames) == 0:
continue
try:
while True:
remainingHits = getRemainingHitsFun()
if remainingHits > hitsAlwaysGreaterThan:
break
print "only %s remaining hits; waiting until greater than %s" % (remainingHits, hitsAlwaysGreaterThan)
time.sleep(60)
print "calling %s with %s at %s remaining hits" % (getForUserFun, newScreenNames, remainingHits)
getForUserFun(newScreenNames)
except Exception as e:
print "couldn't do it for %s: %s" % (newScreenNames, e)
time.sleep(1)
pass
def getAllTweetsDefault(userQuery):
return getForTopUsers(alreadyCollectedFun=userAlreadyCollected, getForUserFun=getAllTweets, getRemainingHitsFun=getRemainingHitsUserTimeline, hitsAlwaysGreaterThan=30, userQuery=userQuery)
def makeUserQuery(val, col):
return 'select user_screen_name from twitter_users where %s > %d order by %s asc limit 1000' % (col, val, col)
def makeUserQueryFollowers(val):
return makeUserQuery(val, 'followers_count')
def makeUserQueryTweets(val):
return makeUserQuery(val, 'statuses_count')
def getAllTweetsFor10MUsers():
return getAllTweetsDefault(makeUserQueryFollowers(10000000))
def getAllTweetsFor1MUsers():
return getAllTweetsDefault(makeUserQueryFollowers(1000000))
def getAllTweetsFor100kUsers():
return getAllTweetsDefault(makeUserQueryFollowers(100000))
def getAllTweetsFor10kUsers():
return getAllTweetsDefault(makeUserQueryFollowers(10000))
def getAllTweetsFor5kUsers():
return getAllTweetsDefault(makeUserQueryFollowers(5000))
def getAllTweetsFor1kUsers():
return getAllTweetsDefault(makeUserQueryFollowers(1000))
def getAllTweetsForS1e2Users():
return getAllTweetsDefault(makeUserQueryTweets(100))
def getAllTweetsForS5e2Users():
return getAllTweetsDefault(makeUserQueryTweets(500))
def getAllTweetsForS1e3Users():
return getAllTweetsDefault(makeUserQueryTweets(1000))
def getAllTweetsForS5e3Users():
return getAllTweetsDefault(makeUserQueryTweets(5000))
def getAllTweetsForS1e4Users():
return getAllTweetsDefault(makeUserQueryTweets(10000))
def getAllTweetsForS5e4Users():
return getAllTweetsDefault(makeUserQueryTweets(50000))
def getAllTweetsForTopUsersByFollowers():
getAllTweetsFor10MUsers()
getAllTweetsFor1MUsers()
getAllTweetsFor100kUsers()
getAllTweetsFor10kUsers()
getAllTweetsFor1kUsers()
getAllTweetsFor5kUsers()
def getAllTweetsForTopUsersByTweets():
getAllTweetsForS1e2Users()
getAllTweetsForS5e2Users()
getAllTweetsForS1e3Users()
getAllTweetsForS5e3Users()
getAllTweetsForS1e4Users()
getAllTweetsForS5e4Users()
def getUserInfoForTopUsers():
getForTopUsers(alreadyCollectedFun=userInfoAlreadyCollected, getForUserFun=getInfoForUser, getRemainingHitsFun=getRemainingHitsGetUser, hitsAlwaysGreaterThan=30, groupFun=lambda x: chunker(x, 100),
userQuery='select (user_screen_name) from topUsers order by rank asc limit 100000')
def generateTopUsers100k():
generateTopUsers(scrapeFun=lambda: generateTopUsersSocialBakers(numUsers=100000), topUsersFile='top100000SocialBakers.csv')
def backupTopHashtags():
backupTables(tableNames=['top_hashtag_hashtags',
'top_hashtag_subsets',
'top_hashtag_tokenized',
'top_hashtag_tokenized_chunk_types',
'top_hashtag_tokenized_type_types',
'top_hashtag_tweets'])
def backupTweets():
backupTables(tableNames=['tweets',
'tweets_tokenized',
'tweets_tokenized_chunk_types',
'tweets_tokenized_type_types'])
# Current run selections
#generateTopUsers100k()
#getAllTweetsForTopUsersByFollowers()
#getAllTweetsForTopUsersByTweets()
#getUserInfoForTopUsers()
#storeCurTagSynonyms()
#backupTopHashtags()
#backupTables()
#generateTopHashtags()
#streamHashtagsCurrent()
if __name__ == "__main__":
command = " ".join(sys.argv[1:])
print('running command %s') % (command)
eval(command)
| claytontstanley/dissertation | dissProject/scrapeUsers.py | Python | lgpl-3.0 | 18,399 |
#!/usr/bin/env python
#
# Copyright (C) 2015 Jonathan Racicot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
#
# You are free to use and modify this code for your own software
# as long as you retain information about the original author
# in your code as shown below.
#
# <author>Jonathan Racicot</author>
# <email>[email protected]</email>
# <date>2015-03-26</date>
# <url>https://github.com/infectedpacket</url>
#//////////////////////////////////////////////////////////
# Program Information
#
PROGRAM_NAME = "vmfcat"
PROGRAM_DESC = ""
PROGRAM_USAGE = "%(prog)s [-i] [-h|--help] (OPTIONS)"
__version_info__ = ('0','1','0')
__version__ = '.'.join(__version_info__)
#//////////////////////////////////////////////////////////
#//////////////////////////////////////////////////////////
# Imports Statements
import re
import sys
import json
import argparse
import traceback
from Factory import *
from Logger import *
from bitstring import *
#//////////////////////////////////////////////////////////
# =============================================================================
# Parameter information
class Params:
parameters = {
"debug" : {
"cmd" : "debug",
"help" : "Enables debug mode.",
"choices" : [True, False]
},
"data" : {
"cmd" : "data",
"help" : "Specifies a file containing data to be included in the VMF message.",
"choices" : []
},
"vmfversion" : {
"cmd" : "vmfversion",
"help" :
"""Field representing the version of the MIL-STD-2045-47001 header being used for the message.""",
"choices" : ["std47001", "std47001b","std47001c","std47001d","std47001d_change"]
},
"compress" : {
"cmd" : "compress",
"help" :
"""This field represents whether the message or messages contained in the User Data portion of the Application PDU have been UNIX compressed or compressed using GZIP.""",
"choices" : ["unix", "gzip"]
},
"headersize" : {
"cmd" : "headersize",
"help" :
"""Indicates the size in octets of the header""",
"choices" : []
},
"originator_urn" : {
"cmd" : "originator_urn",
"help" : """24-bit code used to uniquely identify friendly military units, broadcast networks and multicast groups.""",
"choices" : []
},
"originator_unitname" : {
"cmd" : "originator_unitname",
"help" : """Specify the name of the unit sending the message.""",
"choices" : []
},
"rcpt_urns" : {
"cmd" : "rcpt_urns",
"help" : """List of 24-bit codes used to uniquely identify friendly units.""",
"choices" : []
},
"rcpt_unitnames" : {
"cmd" : "rcpt_unitnames",
"help" : """ List of variable size fields of character-coded identifiers for friendly units. """,
"choices" : []
},
"info_urns" : {
"cmd" : "info_urns",
"help" : """List of 24-bit codes used to uniquely identify friendly units.""",
"choices" : []
},
"info_unitnames" : {
"cmd" : "info_unitnames",
"help" : """ List of variable size fields of character-coded identifiers for friendly units. """,
"choices" : []
},
"umf" : {
"cmd" : "umf",
"choices" : ["link16", "binary", "vmf", "nitfs", "rdm", "usmtf", "doi103", "xml-mtf", "xml-vmf"],
"help" : """ Indicates the format of the message contained in the user data field."""
},
"messagevers" : {
"cmd" : "messagevers",
"choices" : [],
"help" : """Represents the version of the message standard contained in the user data field."""
},
"fad" : {
"cmd" : "fad",
"choices" : ["netcon", "geninfo", "firesp", "airops", "intops", "landops","marops", "css", "specialops", "jtfopsctl", "airdef"],
"help" : "Identifies the functional area of a specific VMF message using code words."
},
"msgnumber" : {
"cmd" : "msgnumber",
"choices" : [],
"help" : """Represents the number that identifies a specific VMF message within a functional area."""
},
"msgsubtype" : {
"cmd" : "msgsubtype",
"choices" : [],
"help" : """Represents a specific case within a VMF message, which depends on the UMF, FAD and message number."""
},
"filename" : {
"cmd" : "filename",
"choices" : [],
"help" : """Indicates the name of the computer file or data block contained in the User Data portion of the application PDU."""
},
"msgsize" : {
"cmd" : "msgsize",
"choices" : [],
"help" : """Indicates the size(in bytes) of the associated message within the User Data field."""
},
"opind" : {
"cmd" : "opind",
"choices" : ["op", "ex", "sim", "test"],
"help" : "Indicates the operational function of the message."
},
"retransmission" : {
"cmd" : "retransmission",
"choices" : [1, 0],
"help" : """Indicates whether a message is a retransmission."""
},
"msgprecedence" : {
"cmd" : "msgprecedence",
"choices" : ["reserved", "critic", "flashover", "flash", "imm", "pri", "routine"],
"help" : """Indicates relative precedence of a message."""
},
"classification" : {
"cmd" : "classification",
"choices" : ["unclass", "conf", "secret", "topsecret"],
"help" : """Security classification of the message."""
},
"releasemark" : {
"cmd" : "releasemark",
"choices" : [],
"help" : """Support the exchange of a list of up to 16 country codes with which the message can be release."""
},
"originatordtg" : {
"cmd" : "originatordtg",
"choices" : [],
"help" : """ Contains the date and time in Zulu Time that the message was prepared."""
},
"perishdtg" : {
"cmd" : "perishdtg",
"choices" : [],
"help" : """Provides the latest time the message is still of value."""
},
"ackmachine" : {
"cmd" : "ackmachine",
"choices" : [1, 0],
"help" : """Indicates whether the originator of a machine requires a machine acknowledgement for the message."""
},
"ackop" : {
"cmd" : "ackop",
"choices" : [1, 0],
"help" : """Indicates whether the originator of the message requires an acknowledgement for the message from the recipient."""
},
"ackdtg" : {
"cmd" : "ackdtg",
"choices" : [],
"help" : """Provides the date and time of the original message that is being acknowledged."""
},
"rc" : {
"cmd" : "rc",
"choices" : ["mr", "cantpro", "oprack", "wilco", "havco", "cantco", "undef"],
"help" : """Codeword representing the Receipt/Compliance answer to the acknowledgement request."""
},
"cantpro" : {
"cmd" : "cantpro",
"choices" : [],
"help" : """Indicates the reason that a particular message cannot be processed by a recipient or information address."""
},
"reply" : {
"cmd" : "reply",
"choices" : [1, 0],
"help" : """Indicates whether the originator of the message requires an operator reply to the message."""
},
"cantco" : {
"cmd" : "cantco",
"choices" : ["comm", "ammo", "pers", "fuel", "env", "equip", "tac", "other"],
"help" : """Indicates the reason that a particular recipient cannot comply with a particular message."""
},
"replyamp" : {
"cmd" : "replyamp",
"choices" : [],
"help" : """Provide textual data an amplification of the recipient's reply to a message."""
},
"ref_urn" : {
"cmd" : "ref_urn",
"choices" : [],
"help" : """URN of the reference message."""
},
"ref_unitname" : {
"cmd" : "ref_unitname",
"choices" : [],
"help" : """Name of the unit of the reference message."""
},
"refdtg" : {
"cmd" : "refdtg",
"choices" : [],
"help" : """Date time group of the reference message."""
},
"secparam" : {
"cmd" : "secparam",
"choices" : ['auth', 'undef'],
"help" : """Indicate the identities of the parameters and algorithms that enable security processing."""
},
"keymatlen" : {
"cmd" : "keymatlen",
"choices" : [],
"help" : """Defines the size in octets of the Keying Material ID field."""
},
"keymatid" : {
"cmd" : "keymatid",
"choices" : [],
"help" : """Identifies the key which was used for encryption."""
},
"crypto_init_len" : {
"cmd" : "crypto_init_len",
"choices" : [],
"help" : """Defines the size, in 64-bit blocks, of the Crypto Initialization field."""
},
"crypto_init" : {
"cmd" : "crypto_init",
"choices" : [],
"help" : """Sequence of bits used by the originator and recipient to initialize the encryption/decryption process."""
},
"keytok_len" : {
"cmd" : "keytok_len",
"choices" : [],
"help" : """Defines the size, in 64-bit blocks, of the Key Token field."""
},
"keytok" : {
"cmd" : "keytok",
"choices" : [],
"help" : """Contains information enabling each member of each address group to decrypt the user data associated with this message header."""
},
"autha-len" : {
"cmd" : "autha-len",
"choices" : [],
"help" : """Defines the size, in 64-bit blocks, of the Authentification Data (A) field."""
},
"authb-len" : {
"cmd" : "authb-len",
"choices" : [],
"help" : """Defines the size, in 64-bit blocks, of the Authentification Data (B) field."""
},
"autha" : {
"cmd" : "autha",
"choices" : [],
"help" : """Data created by the originator to provide both connectionless integrity and data origin authentication (A)."""
},
"authb" : {
"cmd" : "authb",
"choices" : [],
"help" : """Data created by the originator to provide both connectionless integrity and data origin authentication (B)."""
},
"acksigned" : {
"cmd" : "acksigned",
"choices" : [],
"help" : """Indicates whether the originator of a message requires a signed response from the recipient."""
},
"pad_len" : {
"cmd" : "pad_len",
"choices" : [],
"help" : """Defines the size, in octets, of the message security padding field."""
},
"padding" : {
"cmd" : "padding",
"choices" : [],
"help" : """Necessary for a block encryption algorithm so the content of the message is a multiple of the encryption block length."""
},
}
#//////////////////////////////////////////////////////////////////////////////
# Argument Parser Declaration
#
usage = "%(prog)s [options] data"
parser = argparse.ArgumentParser(usage=usage,
prog="vmfcat",
version="%(prog)s "+__version__,
description="Allows crafting of Variable Message Format (VMF) messages.")
io_options = parser.add_argument_group(
"Input/Output Options", "Types of I/O supported.")
io_options.add_argument("-d", "--debug",
dest=Params.parameters['debug']['cmd'],
action="store_true",
help=Params.parameters['debug']['help'])
io_options.add_argument("-i", "--interactive",
dest="interactive",
action="store_true",
help="Create and send VMF messages interactively.")
io_options.add_argument("-of", "--ofile",
dest="outputfile",
nargs="?",
type=argparse.FileType('w'),
default=sys.stdout,
help="File to output the results. STDOUT by default.")
io_options.add_argument("--data",
dest=Params.parameters['data']['cmd'],
help=Params.parameters['data']['help'])
# =============================================================================
# Application Header Arguments
header_options = parser.add_argument_group(
"Application Header", "Flags and Fields of the application header.")
header_options.add_argument("--vmf-version",
dest=Params.parameters["vmfversion"]["cmd"],
action="store",
choices=Params.parameters["vmfversion"]["choices"],
default="std47001c",
help=Params.parameters["vmfversion"]["help"])
header_options.add_argument("--compress",
dest=Params.parameters["compress"]["cmd"],
action="store",
choices=Params.parameters["compress"]["choices"],
help=Params.parameters["compress"]["help"])
header_options.add_argument("--header-size",
dest=Params.parameters["headersize"]["cmd"],
action="store",
type=int,
help=Params.parameters["headersize"]["help"])
# =============================================================================
# Originator Address Group Arguments
orig_addr_options = parser.add_argument_group(
"Originator Address Group", "Fields of the originator address group.")
orig_addr_options.add_argument("--orig-urn",
dest=Params.parameters["originator_urn"]["cmd"],
metavar="URN",
type=int,
action="store",
help=Params.parameters["originator_urn"]["help"])
orig_addr_options.add_argument("--orig-unit",
dest=Params.parameters["originator_unitname"]["cmd"],
metavar="STRING",
action="store",
help=Params.parameters["originator_unitname"]["help"])
# =============================================================================
# =============================================================================
# Recipient Address Group Arguments
recp_addr_options = parser.add_argument_group(
"Recipient Address Group", "Fields of the recipient address group.")
recp_addr_options.add_argument("--rcpt-urns",
nargs="+",
dest=Params.parameters['rcpt_urns']['cmd'],
metavar="URNs",
help=Params.parameters['rcpt_urns']['help'])
recp_addr_options.add_argument("--rcpt-unitnames",
nargs="+",
dest=Params.parameters['rcpt_unitnames']['cmd'],
metavar="UNITNAMES",
help=Params.parameters['rcpt_unitnames']['help'])
# =============================================================================
# =============================================================================
# Information Address Group Arguments
info_addr_options = parser.add_argument_group(
"Information Address Group", "Fields of the information address group.")
info_addr_options.add_argument("--info-urns",
dest=Params.parameters["info_urns"]["cmd"],
metavar="URNs",
nargs="+",
action="store",
help=Params.parameters["info_urns"]["help"])
info_addr_options.add_argument("--info-units",
dest="info_unitnames",
metavar="UNITNAMES",
action="store",
help="Specify the name of the unit of the reference message.")
# =============================================================================
# =============================================================================
# Message Handling Group Arguments
msg_handling_options = parser.add_argument_group(
"Message Handling Group", "Fields of the message handling group.")
msg_handling_options.add_argument("--umf",
dest=Params.parameters["umf"]["cmd"],
action="store",
choices=Params.parameters["umf"]["choices"],
help=Params.parameters["umf"]["help"])
msg_handling_options.add_argument("--msg-version",
dest=Params.parameters["messagevers"]["cmd"],
action="store",
metavar="VERSION",
type=int,
help=Params.parameters["messagevers"]["help"])
msg_handling_options.add_argument("--fad",
dest=Params.parameters["fad"]["cmd"],
action="store",
choices=Params.parameters["fad"]["choices"],
help=Params.parameters["fad"]["help"])
msg_handling_options.add_argument("--msg-number",
dest=Params.parameters["msgnumber"]["cmd"],
action="store",
type=int,
metavar="1-127",
help=Params.parameters["msgnumber"]["help"])
msg_handling_options.add_argument("--msg-subtype",
dest=Params.parameters["msgsubtype"]["cmd"],
action="store",
type=int,
metavar="1-127",
help=Params.parameters["msgsubtype"]["help"])
msg_handling_options.add_argument("--filename",
dest=Params.parameters["filename"]["cmd"],
action="store",
help=Params.parameters["filename"]["help"])
msg_handling_options.add_argument("--msg-size",
dest=Params.parameters["msgsize"]["cmd"],
action="store",
type=int,
metavar="SIZE",
help=Params.parameters["msgsize"]["help"])
msg_handling_options.add_argument("--opind",
dest=Params.parameters["opind"]["cmd"],
action="store",
choices=Params.parameters["opind"]["choices"],
help=Params.parameters["opind"]["help"])
msg_handling_options.add_argument("--retrans",
dest=Params.parameters["retransmission"]["cmd"],
action="store_true",
help=Params.parameters["retransmission"]["help"])
msg_handling_options.add_argument("--msg-prec",
dest=Params.parameters["msgprecedence"]["cmd"],
action="store",
choices=Params.parameters["msgprecedence"]["choices"],
help=Params.parameters["msgprecedence"]["help"])
msg_handling_options.add_argument("--class",
dest=Params.parameters["classification"]["cmd"],
action="store",
nargs="+",
choices=Params.parameters["classification"]["choices"],
help=Params.parameters["classification"]["cmd"])
msg_handling_options.add_argument("--release",
dest=Params.parameters["releasemark"]["cmd"],
action="store",
metavar="COUNTRIES",
help=Params.parameters["releasemark"]["help"])
msg_handling_options.add_argument("--orig-dtg",
dest=Params.parameters["originatordtg"]["cmd"],
action="store",
metavar="YYYY-MM-DD HH:mm[:ss] [extension]",
help=Params.parameters["originatordtg"]["cmd"])
msg_handling_options.add_argument("--perish-dtg",
dest=Params.parameters["perishdtg"]["cmd"],
action="store",
metavar="YYYY-MM-DD HH:mm[:ss]",
help=Params.parameters["perishdtg"]["cmd"])
# =====================================================================================
# =====================================================================================
# Acknowledge Request Group Arguments
ack_options = parser.add_argument_group(
"Acknowledgement Request Group", "Options to request acknowledgement and replies.")
ack_options.add_argument("--ack-machine",
dest=Params.parameters["ackmachine"]["cmd"],
action="store_true",
help=Params.parameters["ackmachine"]["help"])
ack_options.add_argument("--ack-op",
dest=Params.parameters["ackop"]["cmd"],
action="store_true",
help=Params.parameters["ackop"]["help"])
ack_options.add_argument("--reply",
dest=Params.parameters["reply"]["cmd"],
action="store_true",
help=Params.parameters["reply"]["help"])
# =====================================================================================
# =====================================================================================
# Response Data Group Arguments
#
resp_options = parser.add_argument_group(
"Response Data Options", "Fields for the response data group.")
resp_options.add_argument("--ack-dtg",
dest=Params.parameters["ackdtg"]["cmd"],
help=Params.parameters["ackdtg"]["help"],
action="store",
metavar="YYYY-MM-DD HH:mm[:ss] [extension]")
resp_options.add_argument("--rc",
dest=Params.parameters["rc"]["cmd"],
help=Params.parameters["rc"]["help"],
choices=Params.parameters["rc"]["choices"],
action="store")
resp_options.add_argument("--cantpro",
dest=Params.parameters["cantpro"]["cmd"],
help=Params.parameters["cantpro"]["help"],
action="store",
type=int,
metavar="1-32")
resp_options.add_argument("--cantco",
dest=Params.parameters["cantco"]["cmd"],
help=Params.parameters["cantco"]["help"],
choices=Params.parameters["cantco"]["choices"],
action="store")
resp_options.add_argument("--reply-amp",
dest=Params.parameters["replyamp"]["cmd"],
help=Params.parameters["replyamp"]["help"],
action="store")
# =====================================================================================
# =====================================================================================
# Reference Message Data Group Arguments
#
ref_msg_options = parser.add_argument_group(
"Reference Message Data Group", "Fields of the reference message data group.")
ref_msg_options.add_argument("--ref-urn",
dest=Params.parameters["ref_urn"]["cmd"],
help=Params.parameters["ref_urn"]["help"],
metavar="URN",
action="store")
ref_msg_options.add_argument("--ref-unit",
dest=Params.parameters["ref_unitname"]["cmd"],
help=Params.parameters["ref_unitname"]["help"],
metavar="STRING",
action="store")
ref_msg_options.add_argument("--ref-dtg",
dest=Params.parameters["refdtg"]["cmd"],
help=Params.parameters["refdtg"]["help"],
action="store",
metavar="YYYY-MM-DD HH:mm[:ss] [extension]")
# =====================================================================================
# =====================================================================================
# Message Security Data Group Arguments
#
msg_sec_grp = parser.add_argument_group(
"Message Security Group", "Fields of the message security group.")
msg_sec_grp.add_argument("--sec-param",
dest=Params.parameters["secparam"]["cmd"],
help=Params.parameters["secparam"]["help"],
choices=Params.parameters["secparam"]["choices"],
action="store")
msg_sec_grp.add_argument("--keymat-len",
dest=Params.parameters["keymatlen"]["cmd"],
help=Params.parameters["keymatlen"]["help"],
action="store",
type=int)
msg_sec_grp.add_argument("--keymat-id",
dest=Params.parameters["keymatid"]["cmd"],
help=Params.parameters["keymatid"]["help"],
action="store",
type=int)
msg_sec_grp.add_argument("--crypto-init-len",
dest=Params.parameters["crypto_init_len"]["cmd"],
help=Params.parameters["crypto_init_len"]["help"],
action="store",
type=int)
msg_sec_grp.add_argument("--crypto-init",
dest=Params.parameters["crypto_init"]["cmd"],
help=Params.parameters["crypto_init"]["help"],
action="store",
type=int)
msg_sec_grp.add_argument("--keytok-len",
dest=Params.parameters["keytok_len"]["cmd"],
help=Params.parameters["keytok_len"]["help"],
action="store",
type=int)
msg_sec_grp.add_argument("--keytok",
dest=Params.parameters["keytok"]["cmd"],
help=Params.parameters["keytok"]["help"],
action="store",
type=int)
msg_sec_grp.add_argument("--autha-len",
dest=Params.parameters["autha-len"]["cmd"],
help=Params.parameters["autha-len"]["help"],
action="store",
type=int,
metavar="LENGTH")
msg_sec_grp.add_argument("--authb-len",
dest=Params.parameters["authb-len"]["cmd"],
help=Params.parameters["authb-len"]["help"],
action="store",
type=int,
metavar="LENGTH")
msg_sec_grp.add_argument("--autha",
dest=Params.parameters["autha"]["cmd"],
help=Params.parameters["autha"]["help"],
action="store",
type=int)
msg_sec_grp.add_argument("--authb",
dest=Params.parameters["authb"]["cmd"],
help=Params.parameters["authb"]["help"],
action="store",
type=int)
msg_sec_grp.add_argument("--ack-signed",
dest=Params.parameters["acksigned"]["cmd"],
help=Params.parameters["acksigned"]["help"],
action="store_true")
msg_sec_grp.add_argument("--pad-len",
dest=Params.parameters["pad_len"]["cmd"],
help=Params.parameters["pad_len"]["help"],
action="store",
type=int,
metavar="LENGTH")
msg_sec_grp.add_argument("--padding",
dest=Params.parameters["padding"]["cmd"],
help=Params.parameters["padding"]["help"],
action="store",
type=int)
# =============================================================================
#//////////////////////////////////////////////////////////////////////////////
class VmfShell(object):
"""
Interative shell to Vmfcat. The shell can be use to build a VMF message.
"""
CMD_SAVE = 'save'
CMD_LOAD = 'load'
CMD_SEARCH = 'search'
CMD_SET = 'set'
CMD_SHOW = 'show'
CMD_HEADER = 'header'
CMD_HELP = 'help'
CMD_QUIT = 'quit'
PROMPT = "<<< "
def __init__(self, _output=sys.stdout):
"""
Initializes the user interface by defining a Logger object
and defining the standard output.
"""
self.output = _output
self.logger = Logger(_output, _debug=True)
def start(self):
"""
Starts the main loop of the interactive shell.
"""
# Command entered by the user
cmd = ""
self.logger.print_info("Type 'help' to show a list of available commands.")
while (cmd.lower() != VmfShell.CMD_QUIT):
try:
self.output.write(VmfShell.PROMPT)
user_input = sys.stdin.readline()
tokens = user_input.rstrip().split()
cmd = tokens[0]
if (cmd.lower() == VmfShell.CMD_QUIT):
pass
elif (cmd.lower() == VmfShell.CMD_HELP):
if (len(tokens) == 1):
self.logger.print_info("{:s} <field>|all".format(VmfShell.CMD_SHOW))
self.logger.print_info("{:s} <field> <value>".format(VmfShell.CMD_SET))
self.logger.print_info("{:s} [field] {{bin, hex}}".format(VmfShell.CMD_HEADER))
self.logger.print_info("{:s} <field>".format(VmfShell.CMD_HELP))
self.logger.print_info("{:s} <field>".format(VmfShell.CMD_SEARCH))
self.logger.print_info("{:s} <file>".format(VmfShell.CMD_SAVE))
self.logger.print_info("{:s} <file>".format(VmfShell.CMD_LOAD))
self.logger.print_info("{:s}".format(VmfShell.CMD_QUIT))
else:
param = tokens[1]
if (param in Params.__dict__.keys()):
help_msg = Params.parameters[param]['help']
self.logger.print_info(help_msg)
if (len(Params.parameters[param]['choices']) > 0):
choices_msg = ', '.join([ choice for choice in Params.parameters[param]['choices']])
self.logger.print_info("Available values: {:s}".format(choices_msg))
else:
self.logger.print_error("Unknown parameter/option: {:s}.".format(param))
elif (cmd.lower() == VmfShell.CMD_SHOW):
#
# Displays the value of the given field
#
if (len(tokens) == 2):
param = tokens[1]
if (param in Params.parameters.keys()):
value = Params.__dict__[param]
if (isinstance(value, int)):
value = "0x{:02x}".format(value)
self.logger.print_info("{} = {}".format(param, value))
elif param.lower() == "all":
for p in Params.parameters.keys():
value = Params.__dict__[p]
self.logger.print_info("{} = {}".format(p, value))
else:
self.logger.print_error("Unknown parameter/option {:s}.".format(param))
else:
self.logger.print_error("Usage: {s} <field>".format(VmfShell.CMD_SHOW))
elif (cmd.lower() == VmfShell.CMD_SET):
#
# Sets a field with the given value
#
# TODO: Issues with parameters with boolean values
if (len(tokens) >= 3):
param = tokens[1]
value = ' '.join(tokens[2:])
if (param in Params.__dict__.keys()):
if (Params.parameters[param]["choices"]):
if (value in Params.parameters[param]["choices"]):
Params.__dict__[param] = value
new_value = Params.__dict__[param]
self.logger.print_success("{:s} = {:s}".format(param, new_value))
else:
self.logger.print_error("Invalid value ({:s}) for field {:s}.".format(value, param))
self.logger.print_info("Values for field are : {:s}.".format(','.join(str(Params.parameters[param]["choices"]))))
else:
Params.__dict__[param] = value
new_value = Params.__dict__[param]
self.logger.print_success("{:s} = {:s}".format(param, new_value))
else:
self.logger.print_error("Unknown parameter {:s}.".format(param))
else:
self.logger.print_error("Usage: {:s} <field> <value>".format(VmfShell.CMD_SET))
elif (cmd.lower() == VmfShell.CMD_HEADER):
field = "vmfversion"
fmt = "bin"
if (len(tokens) >= 2):
field = tokens[1]
if (len(tokens) == 3):
fmt = tokens[2]
vmf_factory = Factory(_logger=self.logger)
vmf_message = vmf_factory.new_message(Params)
vmf_elem = vmf_message.header.elements[field]
if (isinstance(vmf_elem, Field)):
vmf_value = vmf_elem.value
elif (isinstance(vmf_elem, Group)):
vmf_value = "n/a"
else:
raise Exception("Unknown type for element '{:s}'.".format(field))
vmf_bits = vmf_elem.get_bit_array()
output = vmf_bits
if (fmt == "bin"):
output = vmf_bits.bin
if (fmt == "hex"):
output = vmf_bits.hex
self.logger.print_success("{}\t{}\t{}".format(field, vmf_value, output))
elif (cmd.lower() == VmfShell.CMD_SEARCH):
keyword = ' '.join(tokens[1:]).lower()
for p in Params.parameters.keys():
help = Params.parameters[p]['help']
if (p.lower() == keyword or keyword in help.lower()):
self.logger.print_success("{:s}: {:s}".format(p, help))
elif (cmd.lower() == VmfShell.CMD_SAVE):
if len(tokens) == 2:
file = tokens[1]
tmpdict = {}
for param in Params.parameters.keys():
value = Params.__dict__[param]
tmpdict[param] = value
with open(file, 'w') as f:
json.dump(tmpdict, f)
self.logger.print_success("Saved VMF message to {:s}.".format(file))
else:
self.logger.print_error("Specify a file to save the configuration to.")
elif (cmd.lower() == "test"):
if (len(tokens) == 2):
vmf_params = tokens[1]
else:
vmf_params = '0x4023'
s = BitStream(vmf_params)
bstream = BitStream('0x4023')
vmf_factory = Factory(_logger=self.logger)
vmf_message = vmf_factory.read_message(bstream)
elif (cmd.lower() == VmfShell.CMD_LOAD):
if len(tokens) == 2:
file = tokens[1]
with open(file, 'r') as f:
param_dict = json.load(f)
for (param, value) in param_dict.iteritems():
Params.__dict__[param] = value
self.logger.print_success("Loaded VMF message from {:s}.".format(file))
else:
self.logger.print_error("Specify a file to load the configuration from.")
else:
self.logger.print_error("Unknown command {:s}.".format(cmd))
except Exception as e:
self.logger.print_error("An exception as occured: {:s}".format(e.message))
traceback.print_exc(file=sys.stdout)
| InfectedPacket/TerrorCat | UI.py | Python | lgpl-3.0 | 32,143 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Plot cost evolution
@author: verlaanm
"""
# ex1
#load numpy and matplotlib if needed
import matplotlib.pyplot as plt
#load data
import dud_results as dud
# create plot of cost and parameter
plt.close("all")
f,ax = plt.subplots(2,1)
ax[0].plot(dud.costTotal);
ax[0].set_xlabel("model run");
ax[0].set_ylabel("cost function");
ax[1].plot(dud.evaluatedParameters);
ax[1].set_xlabel('model run');
ax[1].set_ylabel('change of reaction\_time [seconds]');
| OpenDA-Association/OpenDA | course/exercise_black_box_calibration_polution_NOT_WORKING/plot_cost.py | Python | lgpl-3.0 | 504 |
#!/usr/bin/env python
from string import punctuation
import argparse
import fnmatch
import os
import shutil
import sys
import json
sys.path.insert(1, '/var/www/bedrock/')
sys.path.insert(0, '/var/www/bedrock/src/')
import analytics.utils
import dataloader.utils
import visualization.utils
from multiprocessing import Queue
#function to determine if the file being checked has the appropriate imports
def find_imports(fileToCheck, desiredInterface):
importedList = []
asterikFound = False
with open(fileToCheck, 'r') as pyFile:
for line in pyFile:
newFront = line.find("import") #finds the first occurence of the word import on the line
if newFront != -1: #an occurence of import has been found
line = line[newFront + 7:] #sets line to now start at the word after import
possibleAs = line.find(" as") #used to find import states that have the structure (from x import y as z)
if possibleAs != -1:
line = line[possibleAs + 4:]
if line.find("*") != -1 and len(line) == 2: #if the import is just the *
importedList.extend(line)
asterikFound = True
line =[word.strip(punctuation) for word in line.split()] #correctly splits the inputs based on puncuation
importedList.extend(line) #creates a single list of all the imports
if desiredInterface == 1:
if "Algorithm" not in importedList:
return "Missing the Algorithm input, can be fixed using 'from ..analytics import Algorithm'.\n"
else:
return ""
elif desiredInterface == 2:
if "*" not in importedList:
return "Missing the * input, can be fixed using 'from visualization.utils import *'.\n"
else:
return ""
elif desiredInterface == 3:
if "*" not in importedList:
return "Missing the * input, can be fixed using 'from dataloader.utils import *'.\n"
else:
return ""
elif desiredInterface == 4:
if "*" not in importedList:
return "Missing the * input, can be fixed using 'from dataloader.utils import *'\n"
else:
return ""
def find_class(fileToCheck, desiredInterface):
classesList = []
with open(fileToCheck, 'r') as pyFile:
for line in pyFile:
newFront = line.find("class")
newEnd = line.find(")")
if newFront == 0:
line = line[newFront + 6: newEnd + 1]
line.split()
classesList.append(line)
return classesList
def find_functions(fileToCheck, desiredInterface):
functionsList_with_inputs = []
with open(fileToCheck, 'r') as pyFile:
for line in pyFile:
newFront = line.find("def")
newEnd = line.find(")")
if newFront != -1:
line = line[newFront + 3: newEnd + 1]
line.split()
functionsList_with_inputs.append(line)
return functionsList_with_inputs
def compare_fuctions(fileToCheck, desiredInterface):
fList = find_functions(fileToCheck, desiredInterface)
if desiredInterface == 1:
if " __init__(self)" not in fList or " compute(self, filepath, **kwargs)" not in fList:
return "Function/s and/or specific input/s missing in this file.\n"
else:
return ""
elif desiredInterface == 2:
if " __init__(self)" not in fList or " initialize(self, inputs)" not in fList or " create(self)" not in fList:
return "Function/s and/or specific inputs/s missing in this file.\n"
else:
return ""
elif desiredInterface == 3:
if " __init__(self)" not in fList or " explore(self, filepath)" not in fList or " ingest(self, posted_data, src)" not in fList:
return "Function/s and/or specific input/s missing in this file.\n"
else:
return ""
elif desiredInterface == 4:
if " __init__(self)" not in fList or (" check(self, name, sample)" not in fList and " check(self, name, col)" not in fList) or " apply(self, conf)" not in fList:
return "Function/s and/or specific input/s missing in this file.\n"
else:
return ""
def inheritance_check(fileToCheck, desiredInterface):
class_name_list = find_class(fileToCheck, desiredInterface)
inhertiance_name = ""
if (len(class_name_list) > 0):
if (desiredInterface == 1 and len(class_name_list) > 1):
inhertiance_name = class_name_list[0]
elif (len(class_name_list) > 1):
inhertiance_name = class_name_list[len(class_name_list) - 1]
else:
inhertiance_name = class_name_list[0]
newFront = inhertiance_name.find("(")
newEnd = inhertiance_name.find(")")
inhertiance_name = inhertiance_name[newFront + 1:newEnd]
if desiredInterface == 1:
if inhertiance_name != "Algorithm":
return "Class must inherit from the Algorithm super class.\n"
else:
return ""
elif desiredInterface == 2:
if inhertiance_name != "Visualization":
return "Class must inherit from the Visualization super class.\n"
else:
return ""
elif desiredInterface == 3:
if inhertiance_name != "Ingest":
return "Class must inherit from the Ingest super class.\n"
else:
return ""
elif desiredInterface == 4:
if inhertiance_name != "Filter":
return "Class must inherit from the Filter super class.\n"
else:
return ""
else:
return "There are no classes in this file.\n"
def validate_file_name(fileToCheck, desiredInterface):
class_name_list = find_class(fileToCheck, desiredInterface)
class_name = ""
if (len(class_name_list) > 0):
if (desiredInterface == 1 and len(class_name_list) > 1):
class_name = class_name_list[0]
elif (len(class_name_list) > 1):
class_name = class_name_list[len(class_name_list) - 1]
else:
class_name = class_name_list[0]
trim = class_name.find("(")
class_name = class_name[:trim]
returnsList = list_returns(fileToCheck, desiredInterface)
superStament = []
with open(fileToCheck, 'r') as pyFile:
for line in pyFile:
newFront = line.find("super")
if newFront != -1:
trimFront = line.find("(")
trimBack = line.find(",")
line = line[trimFront + 1: trimBack]
superStament.append(line)
if class_name not in superStament:
return "File name does not match Class name\n"
else:
return ""
def list_returns(fileToCheck, desiredInterface):
returnsList = []
newLine = ""
with open(fileToCheck, 'r') as pyFile:
for line in pyFile:
if line.find("#") == -1:
newFront = line.find("return")
if newFront != -1:
possibleErrorMessageCheck1 = line.find("'")
bracketBefore = line.find("{")
lastBracket = line.find("}")
newLine = line[possibleErrorMessageCheck1:]
possibleErrorMessageCheck2 = newLine.find(" ")
if possibleErrorMessageCheck2 == -1:
line = line[newFront + 7:]
line.split()
line = [word.strip(punctuation) for word in line.split()]
returnsList.extend(line)
elif possibleErrorMessageCheck1 == bracketBefore + 1:
line = line[newFront + 7:lastBracket + 1]
line.split()
returnsList.append(line)
return returnsList
def check_return_values(fileToCheck, desiredInterface):
listOfReturns = list_returns(fileToCheck, desiredInterface)
listOfClasses = find_class(fileToCheck, desiredInterface)
firstElement = listOfClasses[0]
for elem in listOfClasses:
cutOff = elem.find("(")
if cutOff != -1:
elem = elem[:cutOff]
firstElement = elem
listOfClasses[0] = firstElement
if desiredInterface == 1:
listOfFunctions = find_functions(fileToCheck, desiredInterface)
if len(listOfFunctions) == 2 and len(listOfReturns) > 0:
return "Too many return values in this file.\n"
else:
return ""
elif desiredInterface == 2:
if len(listOfReturns) > 1:
if listOfClasses[0] not in listOfReturns or listOfReturns[1].find("data") == -1 or listOfReturns[1].find("type") == -1 or listOfReturns[1].find("id") == -1:
return "Missing or incorrectly named return values.\n"
else:
return ""
elif listOfReturns[0].find("data") == -1 or listOfReturns[0].find("type") == -1 or listOfReturns[0].find("id") == -1:
return "Missing or incorrectly named return values.\n"
else:
return ""
elif desiredInterface == 3:
if ("schema" not in listOfReturns and "schemas" not in listOfReturns and "collection:ret" not in listOfReturns) or "error" not in listOfReturns or "matrices" not in listOfReturns:
return "Missing or incorrectly named return values.\n"
else:
return ""
elif desiredInterface == 4:
if ("True" not in listOfReturns and "False" not in listOfReturns) or ("matrix" not in listOfReturns and "None" not in listOfReturns):
return "Missing or incorrectly named return values"
else:
return ""
def hard_type_check_return(fileToCheck, desiredInterface, my_dir, output_directory, filter_specs):
specificErrorMessage = ""
queue = Queue()
lastOccurence = fileToCheck.rfind("/")
file_name = fileToCheck[lastOccurence + 1:len(fileToCheck) - 3]
print filter_specs
if desiredInterface == 1:
file_metaData = analytics.utils.get_metadata(file_name)
elif desiredInterface == 2:
file_metaData = visualization.utils.get_metadata(file_name)
elif desiredInterface == 3:
file_metaData = dataloader.utils.get_metadata(file_name, "ingest")
elif desiredInterface == 4:
file_metaData = dataloader.utils.get_metadata(file_name, "filters")
inputList = []
if desiredInterface != 3 and desiredInterface != 4:
for elem in file_metaData['inputs']:
inputList.append(elem)
inputDict = create_input_dict(my_dir, inputList)
if desiredInterface == 1:
count = 0
computeResult = analytics.utils.run_analysis(queue, file_name, file_metaData['parameters'], inputDict, output_directory, "Result")
for file in os.listdir(my_dir):
if fnmatch.fnmatch(file, "*.csv") or fnmatch.fnmatch(file, ".json"):
count += 1
if (count < 1):
specificErrorMessage += "Missing .csv or .json file, the compute function must create a new .csv or .json file."
for file_name in os.listdir(output_directory):
os.remove(os.path.join(output_directory, file_name))
elif desiredInterface == 2:
createResult = visualization.utils.generate_vis(file_name, inputDict, file_metaData['parameters'])
if (type(createResult) != dict):
specificErrorMessage += "Missing a dict return, create function must return a dict item."
elif desiredInterface == 3:
filter_specs_dict = json.loads(str(filter_specs))
exploreResult = dataloader.utils.explore(file_name, my_dir, [])
exploreResultList = list(exploreResult)
count = 0
typeOfMatrix = []
matrix = ""
nameOfSource = ""
filterOfMatrix = []
for elem in exploreResult:
if type(elem) == dict:
for key in elem.keys():
nameOfSource = str(key)
if len(elem.values()) == 1:
for value in elem.values():
while count < len(value):
for item in value[count].keys():
if item == "type":
matrix = str(value[count]['type'])
matrix = matrix[2:len(matrix) - 2]
typeOfMatrix.append(matrix)
if item == "key_usr":
filterOfMatrix.append(str(value[count]['key_usr']))
count += 1
typeListExplore = []
posted_data = {
'matrixFilters':{},
'matrixFeatures':[],
'matrixFeaturesOriginal':[],
'matrixName':"test",
'sourceName':nameOfSource,
'matrixTypes':[]
}
# posted_data['matrixFilters'].update({filterOfMatrix[0]:{"classname":"DocumentLEAN","filter_id":"DocumentLEAN","parameters":[],"stage":"before","type":"extract"}}) #for Text
# posted_data['matrixFilters'].update({filterOfMatrix[0]:{"classname":"TweetDocumentLEAN","filter_id":"TweetDocumentLEAN","parameters":[{"attrname":"include","name":"Include the following keywords","type":"input","value":""},{"attrname":"sent","value":"No"},{"attrname":"exclude","name":"Exclude the following keywords","type":"input","value":""},{"attrname":"lang","name":"Language","type":"input","value":""},{"attrname":"limit","name":"Limit","type":"input","value":"10"},{"attrname":"start","name":"Start time","type":"input","value":""},{"attrname":"end","name":"End time","type":"input","value":""},{"attrname":"geo","name":"Geo","type":"input","value":""}],"stage":"before","type":"extract"}}) #for Mongo
posted_data['matrixFilters'].update({filterOfMatrix[0]:filter_specs_dict})
# posted_data['matrixFilters'].update({filterOfMatrix[0]:{}}) #for spreadsheet
posted_data['matrixFeatures'].append(filterOfMatrix[0])
posted_data['matrixFeaturesOriginal'].append(filterOfMatrix[0])
posted_data['matrixTypes'].append(typeOfMatrix[0])
secondToLastOccurence = my_dir.rfind("/", 0, my_dir.rfind("/"))
my_dir = my_dir[:secondToLastOccurence + 1]
src = {
'created':dataloader.utils.getCurrentTime(),
'host': "127.0.1.1",
'ingest_id':file_name,
'matrices':[],
'name': nameOfSource,
'rootdir':my_dir,
'src_id': "test_files",
'src_type':"file"
}
ingestResult = dataloader.utils.ingest(posted_data, src)
ingestResultList = list(ingestResult)
typeListIngest = []
for i in range(len(exploreResultList)):
typeListExplore.append(type(exploreResultList[i]))
for i in range(len(ingestResultList)):
typeListIngest.append(type(ingestResultList[i]))
for file in os.listdir(my_dir):
if os.path.isdir(my_dir + file) and len(file) > 15:
shutil.rmtree(my_dir + file + "/")
if file.startswith("reduced_"):
os.remove(os.path.join(my_dir, file))
if dict in typeListExplore and int not in typeListExplore:
specificErrorMessage += "Missing a int, explore function must return both a dict and a int."
elif dict not in typeListExplore and int in typeListExplore:
specificErrorMessage += "Missing a dict, explore function must return both a dict and a int."
elif dict not in typeListExplore and int not in typeListExplore:
specificErrorMessage += "Missing a dict and int, explore function must return both a dict and a int."
if bool in typeListIngest and list not in typeListIngest:
specificErrorMessage += " Missing a list, ingest function must return both a boolean and a list."
elif bool not in typeListIngest and list in typeListIngest:
specificErrorMessage += " Missing a boolean value, ingest function must return both a boolean and a list."
elif bool not in typeListIngest and list not in typeListIngest:
specificErrorMessage += " Missing a boolean value and list, ingest function must return both a boolean and a list."
elif desiredInterface == 4:
conf = {
'mat_id':'27651d66d4cf4375a75208d3482476ac',
'storepath':'/home/vagrant/bedrock/bedrock-core/caa1a3105a22477f8f9b4a3124cd41b6/source/',
'src_id':'caa1a3105a22477f8f9b4a3124cd41b6',
'name':'iris'
}
checkResult = dataloader.utils.check(file_name, file_metaData['name'], conf)
if type(checkResult) != bool:
specificErrorMessage += "Missing boolean value, check funtion must return a boolean value."
applyResult = dataloader.utils.apply(file_name, file_metaData['parameters'], conf)
if type(applyResult) != dict:
specificErrorMessage += " Missing a dict object, apply function must return a dict object."
return specificErrorMessage
def create_input_dict(my_dir, inputList):
returnDict = {}
i = 0
j = 1
length = len(inputList)
for file in os.listdir(my_dir):
if file in inputList:
if length == 1 or (length > 1 and file != inputList[length - 1]) or (length > 1 and inputList[i] != inputList[i + 1]):
returnDict.update({file:{'rootdir':my_dir}})
elif length > 1 and inputList[i] == inputList[i + 1]:
firstNewFile = file + "_" + str(j)
j += 1
returnDict.update({firstNewFile:{'rootdir':my_dir}})
if j > 1:
secondNewFile = file + "_" + str(j)
returnDict.update({secondNewFile:{'rootdir':my_dir}})
i += 1
length -= 1
return returnDict
parser = argparse.ArgumentParser(description="Validate files being added to system.")
parser.add_argument('--api', help="The API where the file is trying to be inserted.", action='store', required=True, metavar='api')
parser.add_argument('--filename', help="Name of file inlcuding entire file path.", action='store', required=True, metavar='filename')
parser.add_argument('--input_directory', help="Directory where necessary inputs are stored", action='store', required=True, metavar='input_directory')
parser.add_argument('--filter_specs', help="Specifications for a used filter.", action='store', required=True, metavar='filter_specs')
parser.add_argument('--output_directory', help='Directory where outputs are stored (type NA if there will be no outputs).', action='store', required=True, metavar='output_directory')
args = parser.parse_args()
desiredInterface = 0
fileToCheck = args.filename
if args.api.lower() == "analytics":
desiredInterface = 1
elif args.api.lower() == "visualization":
desiredInterface = 2
elif args.api.lower() == "ingest":
desiredInterface = 3
elif args.api.lower() == "filter":
desiredInterface = 4
my_dir = args.input_directory
output_directory = args.output_directory
filter_specs = args.filter_specs
errorMessage = ""
errorMessage += str(find_imports(fileToCheck, desiredInterface))
errorMessage += str(compare_fuctions(fileToCheck, desiredInterface))
errorMessage += str(inheritance_check(fileToCheck, desiredInterface))
errorMessage += str(validate_file_name(fileToCheck, desiredInterface))
errorMessage += str(check_return_values(fileToCheck, desiredInterface))
if len(errorMessage) == 0:
print("File has been validated and is ready for input")
else:
print("Error Log: ")
print(errorMessage)
print(hard_type_check_return(fileToCheck, desiredInterface, my_dir, output_directory, filter_specs))
| Bedrock-py/bedrock-core | validation/validationScript.py | Python | lgpl-3.0 | 17,191 |
import math,re,sys,os,time
import random as RD
import time
try:
import netCDF4 as NC
except:
print("You no install netCDF4 for python")
print("So I do not import netCDF4")
try:
import numpy as NP
except:
print("You no install numpy")
print("Do not import numpy")
class GRIDINFORMATER:
"""
This object is the information of the input gridcells/array/map.
Using
.add_an_element to add an element/gridcell
.add_an_geo_element to add an element/gridcell
.create_resample_lat_lon to create a new map of lat and lon for resampling
.create_resample_map to create resample map as ARR_RESAMPLE_MAP
.create_reference_map to create ARR_REFERENCE_MAP to resample target map.
.export_reference_map to export ARR_REFERENCE_MAP into netCDF4 format
"""
STR_VALUE_INIT = "None"
NUM_VALUE_INIT = -9999.9
NUM_NULL = float("NaN")
ARR_RESAMPLE_X_LIM = []
ARR_RESAMPLE_Y_LIM = []
# FROM WRF: module_cam_shr_const_mod.f90
NUM_CONST_EARTH_R = 6.37122E6
NUM_CONST_PI = 3.14159265358979323846
def __init__(self, name="GRID", ARR_LAT=[], ARR_LON=[], NUM_NT=1, DIMENSIONS=2 ):
self.STR_NAME = name
self.NUM_DIMENSIONS = DIMENSIONS
self.NUM_LAST_INDEX = -1
self.ARR_GRID = []
self.NUM_NT = NUM_NT
self.ARR_LAT = ARR_LAT
self.ARR_LON = ARR_LON
self.ARR_RESAMPLE_MAP_PARA = { "EDGE": {"N" :-999, "S":-999, "E":-999, "W":-999 } }
if len(ARR_LAT) != 0 and len(ARR_LON) != 0:
NUM_ARR_NY_T1 = len(ARR_LAT)
NUM_ARR_NY_T2 = len(ARR_LON)
Y_T2 = len(ARR_LON)
NUM_ARR_NX_T1 = len(ARR_LAT[0])
NUM_ARR_NX_T2 = len(ARR_LON[0])
self.NUM_NX = NUM_ARR_NX_T1
self.NUM_NY = NUM_ARR_NY_T1
if NUM_ARR_NY_T1 - NUM_ARR_NY_T2 + NUM_ARR_NX_T1 - NUM_ARR_NX_T2 != 0:
print("The gridcell of LAT is {0:d}&{1:d}, and LON is {2:d}&{3:d} are not match"\
.format(NUM_ARR_NY_T1,NUM_ARR_NY_T2,NUM_ARR_NX_T1,NUM_ARR_NX_T2))
def index_map(self, ARR_IN=[], NUM_IN_NX=0, NUM_IN_NY=0):
if len(ARR_IN) == 0:
self.INDEX_MAP = [[ self.NUM_NULL for i in range(self.NUM_NX)] for j in range(self.NUM_NY)]
NUM_ALL_INDEX = len(self.ARR_GRID)
for n in range(NUM_ALL_INDEX):
self.INDEX_MAP[self.ARR_GRID[n]["INDEX_J"]][self.ARR_GRID[n]["INDEX_I"]] =\
self.ARR_GRID[n]["INDEX"]
else:
MAP_INDEX = [[ self.NUM_NULL for i in range(NUM_IN_NX)] for j in range(NUM_IN_NY)]
NUM_ALL_INDEX = len(ARR_IN)
for n in range(NUM_ALL_INDEX):
MAP_INDEX[ARR_IN[n]["INDEX_J"]][ARR_IN[n]["INDEX_I"]] = ARR_IN[n]["INDEX"]
return MAP_INDEX
def add_an_element(self, ARR_GRID, NUM_INDEX=0, STR_VALUE=STR_VALUE_INIT, NUM_VALUE=NUM_VALUE_INIT ):
""" Adding an element to an empty array """
OBJ_ELEMENT = {"INDEX" : NUM_INDEX, \
STR_VALUE : NUM_VALUE}
ARR_GRID.append(OBJ_ELEMENT)
def add_an_geo_element(self, ARR_GRID, NUM_INDEX=-999, NUM_J=0, NUM_I=0, \
NUM_NX = 0, NUM_NY = 0, NUM_NT=0, \
ARR_VALUE_STR=[], ARR_VALUE_NUM=[] ):
""" Adding an geological element to an empty array
The information for lat and lon of center, edge, and vertex will
be stored for further used.
"""
NUM_NVAR = len(ARR_VALUE_STR)
if NUM_NX == 0 or NUM_NY == 0:
NUM_NX = self.NUM_NX
NUM_NY = self.NUM_NY
if NUM_NT == 0:
NUM_NT = self.NUM_NT
NUM_CENTER_LON = self.ARR_LON[NUM_J][NUM_I]
NUM_CENTER_LAT = self.ARR_LAT[NUM_J][NUM_I]
if NUM_I == 0:
NUM_WE_LON = ( self.ARR_LON[NUM_J][NUM_I] - self.ARR_LON[NUM_J][NUM_I + 1] ) * 0.5
NUM_EW_LON = -1 * ( self.ARR_LON[NUM_J][NUM_I] - self.ARR_LON[NUM_J][NUM_I + 1] ) * 0.5
elif NUM_I == NUM_NX - 1:
NUM_WE_LON = -1 * ( self.ARR_LON[NUM_J][NUM_I] - self.ARR_LON[NUM_J][NUM_I - 1] ) * 0.5
NUM_EW_LON = ( self.ARR_LON[NUM_J][NUM_I] - self.ARR_LON[NUM_J][NUM_I - 1] ) * 0.5
else:
NUM_WE_LON = ( self.ARR_LON[NUM_J][NUM_I] - self.ARR_LON[NUM_J][NUM_I + 1] ) * 0.5
NUM_EW_LON = ( self.ARR_LON[NUM_J][NUM_I] - self.ARR_LON[NUM_J][NUM_I - 1] ) * 0.5
if NUM_J == 0:
NUM_SN_LAT = -1 * ( self.ARR_LAT[NUM_J][NUM_I] - self.ARR_LAT[NUM_J + 1][NUM_I ] ) * 0.5
NUM_NS_LAT = ( self.ARR_LAT[NUM_J][NUM_I] - self.ARR_LAT[NUM_J + 1][NUM_I ] ) * 0.5
elif NUM_J == NUM_NY - 1:
NUM_SN_LAT = ( self.ARR_LAT[NUM_J][NUM_I] - self.ARR_LAT[NUM_J - 1][NUM_I ] ) * 0.5
NUM_NS_LAT = -1 * ( self.ARR_LAT[NUM_J][NUM_I] - self.ARR_LAT[NUM_J - 1][NUM_I ] ) * 0.5
else:
NUM_SN_LAT = ( self.ARR_LAT[NUM_J][NUM_I] - self.ARR_LAT[NUM_J - 1][NUM_I ] ) * 0.5
NUM_NS_LAT = ( self.ARR_LAT[NUM_J][NUM_I] - self.ARR_LAT[NUM_J + 1][NUM_I ] ) * 0.5
ARR_NE = [ NUM_CENTER_LON + NUM_EW_LON , NUM_CENTER_LAT + NUM_NS_LAT ]
ARR_NW = [ NUM_CENTER_LON + NUM_WE_LON , NUM_CENTER_LAT + NUM_NS_LAT ]
ARR_SE = [ NUM_CENTER_LON + NUM_EW_LON , NUM_CENTER_LAT + NUM_SN_LAT ]
ARR_SW = [ NUM_CENTER_LON + NUM_WE_LON , NUM_CENTER_LAT + NUM_SN_LAT ]
if NUM_INDEX == -999:
NUM_INDEX = self.NUM_LAST_INDEX +1
self.NUM_LAST_INDEX += 1
OBJ_ELEMENT = {"INDEX" : NUM_INDEX,\
"INDEX_I" : NUM_I,\
"INDEX_J" : NUM_J,\
"CENTER" : {"LAT" : NUM_CENTER_LAT, "LON" : NUM_CENTER_LON},\
"VERTEX" : {"NE": ARR_NE, "SE": ARR_SE, "SW": ARR_SW, "NW": ARR_NW},\
"EDGE" : {"N": NUM_CENTER_LAT + NUM_NS_LAT,"S": NUM_CENTER_LAT + NUM_SN_LAT,\
"E": NUM_CENTER_LON + NUM_EW_LON,"W": NUM_CENTER_LON + NUM_WE_LON}}
if len(ARR_VALUE_STR) > 0:
for I, VAR in enumerate(ARR_VALUE_STR):
OBJ_ELEMENT[VAR] = [{ "VALUE" : 0.0} for t in range(NUM_NT) ]
if len(ARR_VALUE_NUM) == NUM_NVAR:
for T in range(NUM_NT):
OBJ_ELEMENT[VAR][T]["VALUE"] = ARR_VALUE_NUM[I][T]
ARR_GRID.append(OBJ_ELEMENT)
def add_an_geo_variable(self, ARR_GRID, NUM_INDEX=-999, NUM_J=0, NUM_I=0, NUM_NT=0,\
STR_VALUE=STR_VALUE_INIT, NUM_VALUE=NUM_VALUE_INIT ):
if NUM_INDEX == -999:
NUM_INDEX = self.INDEX_MAP[NUM_J][NUM_I]
if NUM_NT == 0:
NUM_NT = self.NUM_NT
ARR_GRID[NUM_INDEX][STR_VALUE] = {{"VALUE": NUM_VALUE } for t in range(NUM_NT)}
def create_resample_lat_lon(self, ARR_RANGE_LAT=[0,0],NUM_EDGE_LAT=0,\
ARR_RANGE_LON=[0,0],NUM_EDGE_LON=0 ):
self.NUM_GRIDS_LON = round((ARR_RANGE_LON[1] - ARR_RANGE_LON[0])/NUM_EDGE_LON)
self.NUM_GRIDS_LAT = round((ARR_RANGE_LAT[1] - ARR_RANGE_LAT[0])/NUM_EDGE_LAT)
self.ARR_LAT = [[ 0 for i in range(self.NUM_GRIDS_LON)] for j in range(self.NUM_GRIDS_LAT) ]
self.ARR_LON = [[ 0 for i in range(self.NUM_GRIDS_LON)] for j in range(self.NUM_GRIDS_LAT) ]
for j in range(self.NUM_GRIDS_LAT):
for i in range(self.NUM_GRIDS_LON):
NUM_LAT = ARR_RANGE_LAT[0] + NUM_EDGE_LAT * j
NUM_LON = ARR_RANGE_LON[0] + NUM_EDGE_LON * i
self.ARR_LON[j][i] = ARR_RANGE_LON[0] + NUM_EDGE_LON * i
self.ARR_LAT[j][i] = ARR_RANGE_LAT[0] + NUM_EDGE_LAT * j
def create_reference_map(self, MAP_TARGET, MAP_RESAMPLE, STR_TYPE="FIX", NUM_SHIFT=0.001, IF_PB=False):
"""Must input with OBJ_REFERENCE
WARNING: The edge of gridcells may not be included due to the unfinished algorithm
"""
self.ARR_REFERENCE_MAP = []
if STR_TYPE=="GRIDBYGEO":
NUM_OBJ_G_LEN = len(MAP_TARGET)
for OBJ_G in MAP_TARGET:
NUM_G_COOR = [OBJ_G["CENTER"]["LAT"], OBJ_G["CENTER"]["LON"]]
for OBJ_R in MAP_RESAMPLE:
NUM_CHK_IN_EW = (OBJ_R["EDGE"]["E"] - OBJ_G["CENTER"]["LON"]) *\
(OBJ_R["EDGE"]["W"] - OBJ_G["CENTER"]["LON"])
NUM_CHK_IN_SN = (OBJ_R["EDGE"]["N"] - OBJ_G["CENTER"]["LAT"]) *\
(OBJ_R["EDGE"]["S"] - OBJ_G["CENTER"]["LAT"])
if NUM_CHK_IN_EW == 0: NUM_CHK_IN_EW = (OBJ_R["EDGE"]["E"] + NUM_SHIFT - OBJ_G["CENTER"]["LON"]) *\
(OBJ_R["EDGE"]["W"] + NUM_SHIFT - OBJ_G["CENTER"]["LON"])
if NUM_CHK_IN_SN == 0: NUM_CHK_IN_SN = (OBJ_R["EDGE"]["E"] + NUM_SHIFT - OBJ_G["CENTER"]["LON"]) *\
(OBJ_R["EDGE"]["W"] + NUM_SHIFT - OBJ_G["CENTER"]["LON"])
if NUM_CHK_IN_EW < 0 and NUM_CHK_IN_SN < 0:
OBJ_ELEMENT = {"INDEX" : OBJ_G["INDEX"],\
"CENTER" : OBJ_G["CENTER"],\
"INDEX_REF" : OBJ_R["INDEX"],\
"INDEX_REF_I" : OBJ_R["INDEX_I"],\
"INDEX_REF_J" : OBJ_R["INDEX_J"],\
"CENTER_REF" : OBJ_R["CENTER"],\
}
self.ARR_REFERENCE_MAP.append(OBJ_ELEMENT)
break
if IF_PB: TOOLS.progress_bar(TOOLS.cal_loop_progress([OBJ_G["INDEX"]], [NUM_OBJ_G_LEN]), STR_DES="CREATING REFERENCE MAP")
elif STR_TYPE=="FIX":
NUM_OBJ_G_LEN = len(MAP_TARGET)
for OBJ_G in MAP_TARGET:
NUM_G_COOR = [OBJ_G["CENTER"]["LAT"], OBJ_G["CENTER"]["LON"]]
if self.ARR_RESAMPLE_MAP_PARA["EDGE"]["W"] == -999 or self.ARR_RESAMPLE_MAP_PARA["EDGE"]["E"] == -999:
NUM_CHK_EW_IN = -1
else:
NUM_CHK_EW_IN = (NUM_G_COOR[1] - self.ARR_RESAMPLE_MAP_PARA["EDGE"]["W"] ) * ( NUM_G_COOR[1] - self.ARR_RESAMPLE_MAP_PARA["EDGE"]["E"] )
if self.ARR_RESAMPLE_MAP_PARA["EDGE"]["N"] == -999 or self.ARR_RESAMPLE_MAP_PARA["EDGE"]["S"] == -999:
NUM_CHK_SN_IN = -1
else:
NUM_CHK_SN_IN = (NUM_G_COOR[0] - self.ARR_RESAMPLE_MAP_PARA["EDGE"]["S"] ) * ( NUM_G_COOR[0] - self.ARR_RESAMPLE_MAP_PARA["EDGE"]["N"] )
if NUM_CHK_EW_IN < 0 and NUM_CHK_SN_IN < 0:
for OBJ_R in MAP_RESAMPLE:
NUM_CHK_IN_EW = (OBJ_R["EDGE"]["E"] - OBJ_G["CENTER"]["LON"]) *\
(OBJ_R["EDGE"]["W"] - OBJ_G["CENTER"]["LON"])
NUM_CHK_IN_SN = (OBJ_R["EDGE"]["N"] - OBJ_G["CENTER"]["LAT"]) *\
(OBJ_R["EDGE"]["S"] - OBJ_G["CENTER"]["LAT"])
if NUM_CHK_IN_EW == 0: NUM_CHK_IN_EW = (OBJ_R["EDGE"]["E"] + NUM_SHIFT - OBJ_G["CENTER"]["LON"]) *\
(OBJ_R["EDGE"]["W"] + NUM_SHIFT - OBJ_G["CENTER"]["LON"])
if NUM_CHK_IN_SN == 0: NUM_CHK_IN_SN = (OBJ_R["EDGE"]["E"] + NUM_SHIFT - OBJ_G["CENTER"]["LON"]) *\
(OBJ_R["EDGE"]["W"] + NUM_SHIFT - OBJ_G["CENTER"]["LON"])
if NUM_CHK_IN_EW < 0 and NUM_CHK_IN_SN < 0:
OBJ_ELEMENT = {"INDEX" : OBJ_G["INDEX"],\
"INDEX_I" : OBJ_G["INDEX_I"],\
"INDEX_J" : OBJ_G["INDEX_J"],\
"CENTER" : OBJ_G["CENTER"],\
"INDEX_REF" : OBJ_R["INDEX"],\
"INDEX_REF_I" : OBJ_R["INDEX_I"],\
"INDEX_REF_J" : OBJ_R["INDEX_J"],\
"CENTER_REF" : OBJ_R["CENTER"],\
}
self.ARR_REFERENCE_MAP.append(OBJ_ELEMENT)
break
if IF_PB: TOOLS.progress_bar(TOOLS.cal_loop_progress([OBJ_G["INDEX"]], [NUM_OBJ_G_LEN]), STR_DES="CREATING REFERENCE MAP")
def export_grid_map(self, ARR_GRID_IN, STR_DIR, STR_FILENAME, ARR_VAR_STR=[],\
ARR_VAR_ITEM=["MEAN", "MEDIAN", "MIN", "MAX", "P95", "P75", "P25", "P05"],\
NUM_NX=0, NUM_NY=0, NUM_NT=0, STR_TYPE="netCDF4", IF_PB=False ):
TIME_NOW = time.gmtime()
STR_DATE_NOW = "{0:04d}-{1:02d}-{2:02d}".format(TIME_NOW.tm_year, TIME_NOW.tm_mon, TIME_NOW.tm_mday)
STR_TIME_NOW = "{0:04d}:{1:02d}:{2:02d}".format(TIME_NOW.tm_hour, TIME_NOW.tm_min, TIME_NOW.tm_sec)
if NUM_NX==0: NUM_NX = self.NUM_NX
if NUM_NY==0: NUM_NY = self.NUM_NY
if NUM_NT==0: NUM_NT = self.NUM_NT
if STR_TYPE == "netCDF4":
NCDF4_DATA = NC.Dataset("{0:s}/{1:s}".format(STR_DIR, STR_FILENAME), 'w', format="NETCDF4")
# CREATE ATTRIBUTEs:
NCDF4_DATA.description = \
"The grid information in netCDF4"
NCDF4_DATA.history = "Create on {0:s} at {1:s}".format(STR_DATE_NOW, STR_TIME_NOW)
# CREATE DIMENSIONs:
NCDF4_DATA.createDimension("Y" , NUM_NY )
NCDF4_DATA.createDimension("X" , NUM_NX )
NCDF4_DATA.createDimension("Time" , NUM_NT )
NCDF4_DATA.createDimension("Values", None )
# CREATE BASIC VARIABLES:
NCDF4_DATA.createVariable("INDEX", "i4", ("Y", "X"))
NCDF4_DATA.createVariable("INDEX_J", "i4", ("Y", "X"))
NCDF4_DATA.createVariable("INDEX_I", "i4", ("Y", "X"))
NCDF4_DATA.createVariable("CENTER_LON", "f8", ("Y", "X"))
NCDF4_DATA.createVariable("CENTER_LAT", "f8", ("Y", "X"))
# CREATE GROUP for Variables:
for VAR in ARR_VAR_STR:
NCDF4_DATA.createGroup(VAR)
for ITEM in ARR_VAR_ITEM:
if ITEM == "VALUE" :
NCDF4_DATA.groups[VAR].createVariable(ITEM, "f8", ("Time", "Y", "X", "Values"))
else:
NCDF4_DATA.groups[VAR].createVariable(ITEM, "f8", ("Time", "Y", "X"))
# WRITE IN VARIABLE
for V in ["INDEX", "INDEX_J", "INDEX_I"]:
map_in = self.convert_grid2map(ARR_GRID_IN, V, NX=NUM_NX, NY=NUM_NY, NC_TYPE="INT")
for n in range(len(map_in)):
NCDF4_DATA.variables[V][n] = map_in[n]
for V1 in ["CENTER"]:
for V2 in ["LON", "LAT"]:
map_in = self.convert_grid2map(ARR_GRID_IN, V1, V2, NX=NUM_NX, NY=NUM_NY, NC_TYPE="FLOAT")
for n in range(len(map_in)):
NCDF4_DATA.variables["{0:s}_{1:s}".format(V1, V2)][n] = map_in[n]
for V1 in ARR_VAR_STR:
for V2 in ARR_VAR_ITEM:
map_in = self.convert_grid2map(ARR_GRID_IN, V1, V2, NX=NUM_NX, NY=NUM_NY, NT=NUM_NT)
for n in range(len(map_in)):
NCDF4_DATA.groups[V1].variables[V2][n] = map_in[n]
NCDF4_DATA.close()
def export_grid(self, ARR_GRID_IN, STR_DIR, STR_FILENAME, ARR_VAR_STR=[],\
ARR_VAR_ITEM=["VALUE", "MEAN", "MEDIAN", "MIN", "MAX", "P95", "P75", "P25", "P05"],\
NUM_NX=0, NUM_NY=0, NUM_NT=0, STR_TYPE="netCDF4", IF_PB=False ):
TIME_NOW = time.gmtime()
STR_DATE_NOW = "{0:04d}-{1:02d}-{2:02d}".format(TIME_NOW.tm_year, TIME_NOW.tm_mon, TIME_NOW.tm_mday)
STR_TIME_NOW = "{0:04d}:{1:02d}:{2:02d}".format(TIME_NOW.tm_hour, TIME_NOW.tm_min, TIME_NOW.tm_sec)
if NUM_NX==0: NUM_NX = self.NUM_NX
if NUM_NY==0: NUM_NY = self.NUM_NY
if NUM_NT==0: NUM_NT = self.NUM_NT
if STR_TYPE == "netCDF4":
NCDF4_DATA = NC.Dataset("{0:s}/{1:s}".format(STR_DIR, STR_FILENAME), 'w', format="NETCDF4")
# CREATE ATTRIBUTEs:
NCDF4_DATA.description = \
"The grid information in netCDF4"
NCDF4_DATA.history = "Create on {0:s} at {1:s}".format(STR_DATE_NOW, STR_TIME_NOW)
# CREATE DIMENSIONs:
NCDF4_DATA.createDimension("Y" , NUM_NY )
NCDF4_DATA.createDimension("X" , NUM_NX )
NCDF4_DATA.createDimension("Time" , NUM_NT )
NCDF4_DATA.createDimension("Values", None )
# CREATE BASIC VARIABLES:
INDEX = NCDF4_DATA.createVariable("INDEX", "i4", ("Y", "X"))
INDEX_J = NCDF4_DATA.createVariable("INDEX_J", "i4", ("Y", "X"))
INDEX_I = NCDF4_DATA.createVariable("INDEX_I", "i4", ("Y", "X"))
CENTER_LON = NCDF4_DATA.createVariable("CENTER_LON", "f8", ("Y", "X"))
CENTER_LAT = NCDF4_DATA.createVariable("CENTER_LAT", "f8", ("Y", "X"))
# CREATE GROUP for Variables:
for VAR in ARR_VAR_STR:
NCDF4_DATA.createGroup(VAR)
for ITEM in ARR_VAR_ITEM:
if ITEM == "VALUE" :
NCDF4_DATA.groups[VAR].createVariable(ITEM, "f8", ("Time", "Y", "X", "Values"))
else:
NCDF4_DATA.groups[VAR].createVariable(ITEM, "f8", ("Time", "Y", "X"))
# WRITE IN VARIABLE
for IND, OBJ in enumerate(ARR_GRID_IN):
j = OBJ["INDEX_J"]
i = OBJ["INDEX_I"]
INDEX [j,i] = OBJ["INDEX"]
INDEX_J [j,i] = OBJ["INDEX_J"]
INDEX_I [j,i] = OBJ["INDEX_I"]
CENTER_LON [j,i] = OBJ["CENTER"]["LON"]
CENTER_LAT [j,i] = OBJ["CENTER"]["LAT"]
for VAR in ARR_VAR_STR:
for ITEM in ARR_VAR_ITEM:
for T in range(NUM_NT):
NCDF4_DATA.groups[VAR].variables[ITEM][T,j,i] = OBJ[VAR][T][ITEM]
if IF_PB: TOOLS.progress_bar((IND+1)/(NUM_NX*NUM_NY), STR_DES="WRITING PROGRESS")
NCDF4_DATA.close()
def export_reference_map(self, STR_DIR, STR_FILENAME, STR_TYPE="netCDF4", IF_PB=False, IF_PARALLEL=False ):
TIME_NOW = time.gmtime()
self.STR_DATE_NOW = "{0:04d}-{1:02d}-{2:02d}".format(TIME_NOW.tm_year, TIME_NOW.tm_mon, TIME_NOW.tm_mday)
self.STR_TIME_NOW = "{0:02d}:{1:02d}:{2:02d}".format(TIME_NOW.tm_hour, TIME_NOW.tm_min, TIME_NOW.tm_sec)
STR_INPUT_FILENAME = "{0:s}/{1:s}".format(STR_DIR, STR_FILENAME)
if STR_TYPE == "netCDF4":
IF_FILECHK = os.path.exists(STR_INPUT_FILENAME)
if IF_FILECHK:
NCDF4_DATA = NC.Dataset(STR_INPUT_FILENAME, 'a', format="NETCDF4", parallel=IF_PARALLEL)
INDEX = NCDF4_DATA.variables["INDEX" ]
INDEX_J = NCDF4_DATA.variables["INDEX_J" ]
INDEX_I = NCDF4_DATA.variables["INDEX_I" ]
CENTER_LON = NCDF4_DATA.variables["CENTER_LON" ]
CENTER_LAT = NCDF4_DATA.variables["CENTER_LAT" ]
INDEX_REF = NCDF4_DATA.variables["INDEX_REF" ]
INDEX_REF_J = NCDF4_DATA.variables["INDEX_REF_J" ]
INDEX_REF_I = NCDF4_DATA.variables["INDEX_REF_I" ]
CENTER_REF_LON = NCDF4_DATA.variables["CENTER_REF_LON" ]
CENTER_REF_LAT = NCDF4_DATA.variables["CENTER_REF_LAT" ]
else:
NCDF4_DATA = NC.Dataset(STR_INPUT_FILENAME, 'w', format="NETCDF4", parallel=IF_PARALLEL)
# CREATE ATTRIBUTEs:
NCDF4_DATA.description = \
"The netCDF4 version of reference map which contains grid information for resampling"
NCDF4_DATA.history = "Create on {0:s} at {1:s}".format(self.STR_DATE_NOW, self.STR_TIME_NOW)
# CREATE DIMENSIONs:
NCDF4_DATA.createDimension("Y",self.NUM_NY)
NCDF4_DATA.createDimension("X",self.NUM_NX)
# CREATE_VARIABLES:
INDEX = NCDF4_DATA.createVariable("INDEX", "i4", ("Y", "X"))
INDEX_J = NCDF4_DATA.createVariable("INDEX_J", "i4", ("Y", "X"))
INDEX_I = NCDF4_DATA.createVariable("INDEX_I", "i4", ("Y", "X"))
CENTER_LON = NCDF4_DATA.createVariable("CENTER_LON", "f8", ("Y", "X"))
CENTER_LAT = NCDF4_DATA.createVariable("CENTER_LAT", "f8", ("Y", "X"))
INDEX_REF = NCDF4_DATA.createVariable("INDEX_REF", "i4", ("Y", "X"))
INDEX_REF_J = NCDF4_DATA.createVariable("INDEX_REF_J", "i4", ("Y", "X"))
INDEX_REF_I = NCDF4_DATA.createVariable("INDEX_REF_I", "i4", ("Y", "X"))
CENTER_REF_LON = NCDF4_DATA.createVariable("CENTER_REF_LON", "f8", ("Y", "X"))
CENTER_REF_LAT = NCDF4_DATA.createVariable("CENTER_REF_LAT", "f8", ("Y", "X"))
NUM_TOTAL_OBJ = len(self.ARR_REFERENCE_MAP)
NUM_MAX_I = self.NUM_NX
for OBJ in self.ARR_REFERENCE_MAP:
j = OBJ["INDEX_J"]
i = OBJ["INDEX_I"]
INDEX[j,i] = OBJ["INDEX"]
INDEX_J[j,i] = OBJ["INDEX_J"]
INDEX_I[j,i] = OBJ["INDEX_I"]
INDEX_REF[j,i] = OBJ["INDEX_REF"]
INDEX_REF_J[j,i] = OBJ["INDEX_REF_J"]
INDEX_REF_I[j,i] = OBJ["INDEX_REF_I"]
CENTER_LON [j,i] = OBJ["CENTER"]["LON"]
CENTER_LAT [j,i] = OBJ["CENTER"]["LAT"]
CENTER_REF_LON [j,i] = OBJ["CENTER_REF"]["LON"]
CENTER_REF_LAT [j,i] = OBJ["CENTER_REF"]["LAT"]
if IF_PB: TOOLS.progress_bar((i+j*NUM_MAX_I)/float(NUM_TOTAL_OBJ), STR_DES="Exporting")
NCDF4_DATA.close()
def import_reference_map(self, STR_DIR, STR_FILENAME, ARR_X_RANGE=[], ARR_Y_RANGE=[], STR_TYPE="netCDF4", IF_PB=False):
self.ARR_REFERENCE_MAP = []
self.NUM_MAX_INDEX_RS = 0
self.NUM_MIN_INDEX_RS = 999
if len(ARR_X_RANGE) != 0:
self.I_MIN = ARR_X_RANGE[0]
self.I_MAX = ARR_X_RANGE[1]
else:
self.I_MIN = 0
self.I_MAX = self.REFERENCE_MAP_NX
if len(ARR_Y_RANGE) != 0:
self.J_MIN = ARR_Y_RANGE[0]
self.J_MAX = ARR_Y_RANGE[1]
else:
self.J_MIN = 0
self.J_MAX = self.REFERENCE_MAP_NY
if STR_TYPE == "netCDF4":
NCDF4_DATA = NC.Dataset("{0:s}/{1:s}".format(STR_DIR, STR_FILENAME), 'r', format="NETCDF4")
# READ DIMENSIONs:
self.REFERENCE_MAP_NY = NCDF4_DATA.dimensions["Y"].size
self.REFERENCE_MAP_NX = NCDF4_DATA.dimensions["X"].size
# CREATE_VARIABLES:
INDEX = NCDF4_DATA.variables["INDEX" ]
INDEX_J = NCDF4_DATA.variables["INDEX_J" ]
INDEX_I = NCDF4_DATA.variables["INDEX_I" ]
CENTER_LON = NCDF4_DATA.variables["CENTER_LON" ]
CENTER_LAT = NCDF4_DATA.variables["CENTER_LAT" ]
INDEX_REF = NCDF4_DATA.variables["INDEX_REF" ]
INDEX_REF_J = NCDF4_DATA.variables["INDEX_REF_J" ]
INDEX_REF_I = NCDF4_DATA.variables["INDEX_REF_I" ]
CENTER_REF_LON = NCDF4_DATA.variables["CENTER_REF_LON" ]
CENTER_REF_LAT = NCDF4_DATA.variables["CENTER_REF_LAT" ]
for j in range(self.J_MIN, self.J_MAX):
for i in range(self.I_MIN, self.I_MAX):
OBJ_ELEMENT = {"INDEX" : 0 ,\
"INDEX_I" : 0 ,\
"INDEX_J" : 0 ,\
"CENTER" : {"LAT": 0.0, "LON": 0.0} ,\
"INDEX_REF" : 0 ,\
"INDEX_REF_I" : 0 ,\
"INDEX_REF_J" : 0 ,\
"CENTER_REF" : {"LAT": 0.0, "LON": 0.0} }
if INDEX [j][i] != None:
OBJ_ELEMENT["INDEX"] = INDEX [j][i]
OBJ_ELEMENT["INDEX_J"] = INDEX_J [j][i]
OBJ_ELEMENT["INDEX_I"] = INDEX_I [j][i]
OBJ_ELEMENT["INDEX_REF"] = INDEX_REF [j][i]
OBJ_ELEMENT["INDEX_REF_J"] = INDEX_REF_J [j][i]
OBJ_ELEMENT["INDEX_REF_I"] = INDEX_REF_I [j][i]
OBJ_ELEMENT["CENTER"]["LAT"] = CENTER_LAT [j][i]
OBJ_ELEMENT["CENTER"]["LON"] = CENTER_LON [j][i]
OBJ_ELEMENT["CENTER_REF"]["LAT"] = CENTER_REF_LAT[j][i]
OBJ_ELEMENT["CENTER_REF"]["LON"] = CENTER_REF_LON[j][i]
else:
OBJ_ELEMENT["INDEX"] = INDEX [j][i]
OBJ_ELEMENT["INDEX_I"] = INDEX_J [j][i]
OBJ_ELEMENT["INDEX_J"] = INDEX_I [j][i]
OBJ_ELEMENT["INDEX_REF"] = -999
OBJ_ELEMENT["INDEX_REF_J"] = -999
OBJ_ELEMENT["INDEX_REF_I"] = -999
OBJ_ELEMENT["CENTER"]["LAT"] = CENTER_LAT [j][i]
OBJ_ELEMENT["CENTER"]["LON"] = CENTER_LON [j][i]
OBJ_ELEMENT["CENTER_REF"]["LAT"] = -999
OBJ_ELEMENT["CENTER_REF"]["LON"] = -999
self.ARR_REFERENCE_MAP.append(OBJ_ELEMENT)
self.NUM_MIN_INDEX_RS = min(self.NUM_MIN_INDEX_RS, INDEX_REF[j][i])
self.NUM_MAX_INDEX_RS = max(self.NUM_MAX_INDEX_RS, INDEX_REF[j][i])
if IF_PB: TOOLS.progress_bar((j - self.J_MIN + 1)/float(self.J_MAX - self.J_MIN), STR_DES="IMPORTING")
if self.NUM_MIN_INDEX_RS == 0:
self.NUM_MAX_RS = self.NUM_MAX_INDEX_RS + 1
NCDF4_DATA.close()
def create_resample_map(self, ARR_REFERENCE_MAP=[], ARR_VARIABLES=["Value"], ARR_GRID_IN=[],\
IF_PB=False, NUM_NT=0, NUM_NX=0, NUM_NY=0, NUM_NULL=-9999.999):
if NUM_NT == 0:
NUM_NT = self.NUM_NT
if NUM_NX == 0:
NUM_NX = self.NUM_NX
if NUM_NY == 0:
NUM_NY = self.NUM_NY
if len(ARR_REFERENCE_MAP) == 0:
self.ARR_RESAMPLE_OUT = []
self.ARR_RESAMPLE_OUT_PARA = {"EDGE": {"N": 0.0,"S": 0.0,"E": 0.0,"W": 0.0}}
NUM_END_J = self.NUM_GRIDS_LAT - 1
NUM_END_I = self.NUM_GRIDS_LON - 1
ARR_EMPTY = [float("NaN") for n in range(self.NUM_NT)]
for J in range(self.NUM_GRIDS_LAT):
for I in range(self.NUM_GRIDS_LON):
NUM_IND = I + J * self.NUM_GRIDS_LON
self.add_an_geo_element(self.ARR_RESAMPLE_OUT, NUM_INDEX=NUM_IND, NUM_J=J, NUM_I=I, \
NUM_NX= self.NUM_GRIDS_LON, NUM_NY= self.NUM_GRIDS_LAT,\
ARR_VALUE_STR=ARR_VARIABLES, NUM_NT=NUM_NT)
self.ARR_RESAMPLE_MAP_PARA["EDGE"]["N"] = max( self.ARR_LAT[NUM_END_J][0], self.ARR_LAT[NUM_END_J][NUM_END_I] )
self.ARR_RESAMPLE_MAP_PARA["EDGE"]["S"] = min( self.ARR_LAT[0][0], self.ARR_LAT[0][NUM_END_I] )
self.ARR_RESAMPLE_MAP_PARA["EDGE"]["W"] = min( self.ARR_LAT[0][0], self.ARR_LAT[NUM_END_J][0] )
self.ARR_RESAMPLE_MAP_PARA["EDGE"]["E"] = max( self.ARR_LAT[0][NUM_END_I], self.ARR_LAT[NUM_END_J][NUM_END_I] )
self.NUM_MAX_INDEX_RS = NUM_IND
else:
if ARR_GRID_IN == []: ARR_GRID_IN = self.ARR_GRID
self.ARR_RESAMPLE_OUT = [ {} for n in range(NUM_NX * NUM_NY)]
for IND in range(len(self.ARR_RESAMPLE_OUT)):
for VAR in ARR_VARIABLES:
self.ARR_RESAMPLE_OUT[IND][VAR] = [{"VALUE" : []} for T in range(NUM_NT) ]
#for IND in range(len(ARR_REFERENCE_MAP)):
for IND in range(len(ARR_GRID_IN)):
R_IND = ARR_REFERENCE_MAP[IND]["INDEX_REF"]
R_J = ARR_REFERENCE_MAP[IND]["INDEX_REF_J"]
R_I = ARR_REFERENCE_MAP[IND]["INDEX_REF_I"]
R_IND_FIX = TOOLS.fix_ind(R_IND, R_J, R_I, ARR_XRANGE=self.ARR_RESAMPLE_LIM_X, ARR_YRANGE=self.ARR_RESAMPLE_LIM_Y, NX=NUM_NX, NY=NUM_NY)
if R_IND != None:
for VAR in ARR_VARIABLES:
for T in range(NUM_NT):
#print("R_IND:{0:d}, T:{1:d}, IND:{2:d} ".format(R_IND, T, IND))
NUM_VAL_IN = ARR_GRID_IN[IND][VAR][T]["VALUE"]
self.ARR_RESAMPLE_OUT[R_IND][VAR][T]["VALUE"].append(NUM_VAL_IN)
self.ARR_RESAMPLE_OUT[R_IND]["INDEX"] = ARR_REFERENCE_MAP[IND]["INDEX_REF"]
self.ARR_RESAMPLE_OUT[R_IND]["INDEX_J"] = ARR_REFERENCE_MAP[IND]["INDEX_REF_J"]
self.ARR_RESAMPLE_OUT[R_IND]["INDEX_I"] = ARR_REFERENCE_MAP[IND]["INDEX_REF_I"]
self.ARR_RESAMPLE_OUT[R_IND]["CENTER"] = {"LAT": 0.0, "LON": 0.0 }
self.ARR_RESAMPLE_OUT[R_IND]["CENTER"]["LAT"] = ARR_REFERENCE_MAP[IND]["CENTER"]["LAT"]
self.ARR_RESAMPLE_OUT[R_IND]["CENTER"]["LON"] = ARR_REFERENCE_MAP[IND]["CENTER"]["LON"]
if IF_PB: TOOLS.progress_bar(TOOLS.cal_loop_progress([IND], [len(ARR_GRID_IN)]), STR_DES="RESAMPLING PROGRESS")
def cal_resample_map(self, ARR_VARIABLES, ARR_GRID_IN=[], NUM_NT=0, IF_PB=False, \
DIC_PERCENTILE={ "P05": 0.05, "P10": 0.1, "P25": 0.25, "P75": 0.75, "P90": 0.90, "P95": 0.95}, NUM_NULL=-9999.999):
if NUM_NT == 0:
NUM_NT = self.NUM_NT
NUM_RS_OUT_LEN = len(self.ARR_RESAMPLE_OUT)
for IND in range(NUM_RS_OUT_LEN):
for VAR in ARR_VARIABLES:
for T in range(NUM_NT):
ARR_IN = self.ARR_RESAMPLE_OUT[IND][VAR][T]["VALUE"]
if len(ARR_IN) > 0:
ARR_IN.sort()
NUM_ARR_LEN = len(ARR_IN)
NUM_ARR_MEAN = sum(ARR_IN) / float(NUM_ARR_LEN)
NUM_ARR_S2SUM = 0
if math.fmod(NUM_ARR_LEN,2) == 1:
NUM_MPOS = [int((NUM_ARR_LEN-1)/2.0), int((NUM_ARR_LEN-1)/2.0)]
else:
NUM_MPOS = [int(NUM_ARR_LEN/2.0) , int(NUM_ARR_LEN/2.0 -1) ]
self.ARR_RESAMPLE_OUT[IND][VAR][T]["MIN"] = min(ARR_IN)
self.ARR_RESAMPLE_OUT[IND][VAR][T]["MAX"] = max(ARR_IN)
self.ARR_RESAMPLE_OUT[IND][VAR][T]["MEAN"] = NUM_ARR_MEAN
self.ARR_RESAMPLE_OUT[IND][VAR][T]["MEDIAN"] = ARR_IN[NUM_MPOS[0]] *0.5 + ARR_IN[NUM_MPOS[1]] *0.5
for STVA in DIC_PERCENTILE:
self.ARR_RESAMPLE_OUT[IND][VAR][T][STVA] = ARR_IN[ round(NUM_ARR_LEN * DIC_PERCENTILE[STVA])-1]
for VAL in ARR_IN:
NUM_ARR_S2SUM += (VAL - NUM_ARR_MEAN)**2
self.ARR_RESAMPLE_OUT[IND][VAR][T]["STD"] = (NUM_ARR_S2SUM / max(1, NUM_ARR_LEN-1))**0.5
if IF_PB: TOOLS.progress_bar(TOOLS.cal_loop_progress([IND], [NUM_RS_OUT_LEN]), STR_DES="RESAMPLING CALCULATION")
def convert_grid2map(self, ARR_GRID_IN, STR_VAR, STR_VAR_TYPE="", NX=0, NY=0, NT=0, IF_PB=False, NC_TYPE=""):
if NC_TYPE == "INT":
if NT == 0:
ARR_OUT = NP.empty([NY, NX], dtype=NP.int8)
else:
ARR_OUT = NP.empty([NT, NY, NX], dtype=NP.int8)
elif NC_TYPE == "FLOAT":
if NT == 0:
ARR_OUT = NP.empty([NY, NX], dtype=NP.float64)
else:
ARR_OUT = NP.empty([NT, NY, NX], dtype=NP.float64)
else:
if NT == 0:
ARR_OUT = [[ self.NUM_NULL for i in range(NX)] for j in range(NY) ]
else:
ARR_OUT = [[[ self.NUM_NULL for i in range(NX)] for j in range(NY) ] for t in range(NT)]
if STR_VAR_TYPE == "":
for I, GRID in enumerate(ARR_GRID_IN):
if GRID["INDEX"] != -999:
if NT == 0:
#print(GRID["INDEX_J"], GRID["INDEX_I"], GRID[STR_VAR])
ARR_OUT[ GRID["INDEX_J"] ][ GRID["INDEX_I"] ] = GRID[STR_VAR]
else:
for T in range(NT):
ARR_OUT[T][ GRID["INDEX_J"] ][ GRID["INDEX_I"] ] = GRID[STR_VAR][T]
if IF_PB==True: TOOLS.progress_bar(((I+1)/(len(ARR_GRID_IN))))
else:
for I, GRID in enumerate(ARR_GRID_IN):
if GRID["INDEX"] != -999:
if NT == 0:
ARR_OUT[ GRID["INDEX_J"] ][ GRID["INDEX_I"] ] = GRID[STR_VAR][STR_VAR_TYPE]
else:
for T in range(NT):
ARR_OUT[T][ GRID["INDEX_J"] ][ GRID["INDEX_I"] ] = GRID[STR_VAR][T][STR_VAR_TYPE]
if IF_PB==True: TOOLS.progress_bar(((I+1)/(len(ARR_GRID_IN))))
return ARR_OUT
def mask_grid(self, ARR_GRID_IN, STR_VAR, STR_VAR_TYPE, NUM_NT=0, STR_MASK="MASK",\
ARR_NUM_DTM=[0,1,2], ARR_NUM_DTM_RANGE=[0,1]):
if NUM_NT == 0:
NUM_NT= self.NUM_NT
for IND, GRID in enumerate(ARR_GRID_IN):
for T in range(NUM_NT):
NUM_DTM = GEO_TOOLS.mask_dtm(GRID[STR_VAR][T][STR_VAR_TYPE], ARR_NUM_DTM=ARR_NUM_DTM, ARR_NUM_DTM_RANGE=ARR_NUM_DTM_RANGE)
ARR_GRID_IN[IND][STR_VAR][T][STR_MASK] = NUM_DTM
class MATH_TOOLS:
""" Some math tools that help us to calculate.
gau_kde: kernel density estimator by Gaussian Function
standard_dev: The Standard deviation
"""
def GaussJordanEli(arr_in):
num_ydim = len(arr_in)
num_xdim = len(arr_in[0])
arr_out = arr_in
if num_ydim -num_xdim == 0 or num_xdim - num_ydim == 1:
arr_i = NP.array([[0.0 for j in range(num_ydim)] for i in range(num_ydim)])
for ny in range(num_ydim):
arr_i[ny][ny] = 1.0
#print(arr_i)
for nx in range(num_xdim):
for ny in range(nx+1, num_ydim):
arr_i [ny] = arr_i [ny] - arr_i [nx] * arr_out[ny][nx] / float(arr_out[nx][nx])
arr_out[ny] = arr_out[ny] - arr_out[nx] * arr_out[ny][nx] / float(arr_out[nx][nx])
if num_xdim - num_ydim == 1:
for nx in range(num_xdim-1,-1,-1):
for ny in range(num_ydim-1,nx, -1):
print(nx,ny)
arr_i [nx] = arr_i [nx] - arr_i [ny] * arr_out[nx][ny] / float(arr_out[ny][ny])
arr_out[nx] = arr_out[nx] - arr_out[ny] * arr_out[nx][ny] / float(arr_out[ny][ny])
else:
for nx in range(num_xdim,-1,-1):
for ny in range(num_ydim-1, nx, -1):
print(nx,ny)
arr_i [nx] = arr_i [nx] - arr_i [ny] * arr_out[nx][ny] / float(arr_out[ny][ny])
arr_out[nx] = arr_out[nx] - arr_out[ny] * arr_out[nx][ny] / float(arr_out[ny][ny])
if num_xdim - num_ydim == 1:
arr_sol = [0.0 for n in range(num_ydim)]
for ny in range(num_ydim):
arr_sol[ny] = arr_out[ny][num_xdim-1]/arr_out[ny][ny]
return arr_out, arr_i, arr_sol
else:
return arr_out, arr_i
else:
print("Y dim: {0:d}, X dim: {1:d}: can not apply Gaussian-Jordan".format(num_ydim, num_xdim))
return [0]
def finding_XM_LSM(arr_in1, arr_in2, m=2):
# Finding the by least square method
arr_out=[[0.0 for i in range(m+2)] for j in range(m+1)]
arr_x_power_m = [0.0 for i in range(m+m+1)]
arr_xy_power_m = [0.0 for i in range(m+1)]
for n in range(len(arr_x_power_m)):
for x in range(len(arr_in1)):
arr_x_power_m[n] += arr_in1[x] ** n
for n in range(len(arr_xy_power_m)):
for x in range(len(arr_in1)):
arr_xy_power_m[n] += arr_in1[x] ** n * arr_in2[x]
for j in range(m+1):
for i in range(j,j+m+1):
arr_out[j][i-j] = arr_x_power_m[i]
arr_out[j][m+1] = arr_xy_power_m[j]
return arr_out
def cal_modelperform (arr_obs , arr_sim , num_empty=-999.999):
# Based on Vazquez et al. 2002 (Hydrol. Process.)
num_arr = len(arr_obs)
num_n_total = num_arr
num_sum = 0
num_obs_sum = 0
for n in range( num_arr ):
if math.isnan(arr_obs[n]) or arr_obs[n] == num_empty:
num_n_total += -1
else:
num_sum = num_sum + ( arr_sim[n] - arr_obs[n] ) ** 2
num_obs_sum = num_obs_sum + arr_obs[n]
if num_n_total == 0 or num_obs_sum == 0:
RRMSE = -999.999
RMSE = -999.999
obs_avg = -999.999
else:
RRMSE = ( num_sum / num_n_total ) ** 0.5 * ( num_n_total / num_obs_sum )
RMSE = ( num_sum / num_n_total ) ** 0.5
obs_avg = num_obs_sum / num_n_total
num_n_total = num_arr
oo_sum = 0
po_sum = 0
for nn in range( num_arr ):
if math.isnan(arr_obs[nn]) or arr_obs[nn] == num_empty:
num_n_total = num_n_total - 1
else:
oo_sum = oo_sum + ( arr_obs[nn] - obs_avg ) ** 2
po_sum = po_sum + ( arr_sim[nn] - arr_obs[nn] ) ** 2
if num_n_total == 0 or oo_sum * po_sum == 0:
EF = -999.999
CD = -999.999
else:
EF = ( oo_sum - po_sum ) / oo_sum
CD = oo_sum / po_sum
return RRMSE,EF,CD,RMSE, num_arr
def cal_kappa(ARR_IN, NUM_n=0, NUM_N=0, NUM_k=0):
""" Fleiss' kappa
Mustt input with ARR_IN in the following format:
ARR_IN = [ [ NUM for k in range(catalogue)] for N in range(Subjects)]
Additional parameters: NUM_n is the number of raters (e.g. sim and obs results)
Additional parameters: NUM_N is the number of subjects (e.g the outputs
Additional parameters: NUM_k is the number of catalogue (e.g. results )
"""
if NUM_N == 0:
NUM_N = len(ARR_IN)
if NUM_n == 0:
NUM_n = sum(ARR_IN[0])
if NUM_k == 0:
NUM_k = len(ARR_IN[0])
ARR_p_out = [ 0 for n in range(NUM_k)]
ARR_P_OUT = [ 0 for n in range(NUM_N)]
for N in range(NUM_N):
for k in range(NUM_k):
ARR_p_out[k] += ARR_IN[N][k]
ARR_P_OUT[N] += ARR_IN[N][k] ** 2
ARR_P_OUT[N] -= NUM_n
ARR_P_OUT[N] = ARR_P_OUT[N] * (1./(NUM_n *(NUM_n - 1)))
for k in range(NUM_k):
ARR_p_out[k] = ARR_p_out[k] / (NUM_N * NUM_n)
NUM_P_BAR = 0
for N in range(NUM_N):
NUM_P_BAR += ARR_P_OUT[N]
NUM_P_BAR = NUM_P_BAR / float(NUM_N)
NUM_p_bar = 0
for k in ARR_p_out:
NUM_p_bar += k **2
return (NUM_P_BAR - NUM_p_bar) / (1 - NUM_p_bar)
def gau_kde(ARR_IN_X, ARR_IN_I, NUM_BW=0.1 ):
NUM_SUM = 0.
NUM_LENG = len(ARR_IN_X)
ARR_OUT = [ 0. for n in range(NUM_LENG)]
for IND_J, J in enumerate(ARR_IN_X):
NUM_SUM = 0.0
for I in ARR_IN_I:
NUM_SUM += 1 / (2 * math.pi)**0.5 * math.e ** (-0.5 * ((J-I)/NUM_BW) ** 2 )
ARR_OUT[IND_J] = NUM_SUM / len(ARR_IN_I) / NUM_BW
return ARR_OUT
def standard_dev(ARR_IN):
NUM_SUM = sum(ARR_IN)
NUM_N = len(ARR_IN)
NUM_MEAN = 1.0*NUM_SUM/NUM_N
NUM_SUM2 = 0.0
for N in ARR_IN:
if not math.isnan(N):
NUM_SUM2 = (N-NUM_MEAN)**2
else:
NUM_N += -1
return (NUM_SUM2 / (NUM_N-1)) ** 0.5
def h_esti(ARR_IN):
#A rule-of-thumb bandwidth estimator
NUM_SIGMA = standard_dev(ARR_IN)
NUM_N = len(ARR_IN)
return ((4 * NUM_SIGMA ** 5) / (3*NUM_N) ) ** 0.2
def data2array(ARR_IN, STR_IN="MEAN"):
NUM_J = len(ARR_IN)
NUM_I = len(ARR_IN[0])
ARR_OUT = [[ 0.0 for i in range(NUM_I)] for j in range(NUM_J) ]
for j in range(NUM_J):
for i in range(NUM_I):
ARR_OUT[j][i] = ARR_IN[j][i][STR_IN]
return ARR_OUT
def reshape2d(ARR_IN):
ARR_OUT=[]
for A in ARR_IN:
for B in A:
ARR_OUT.append(B)
return ARR_OUT
def NormalVector( V1, V2):
return [(V1[1]*V2[2] - V1[2]*V2[1]), (V1[2]*V2[0] - V1[0]*V2[2]),(V1[0]*V2[1] - V1[1]*V2[0])]
def NVtoPlane( P0, P1, P2):
"""Input of P should be 3-dimensionals"""
V1 = [(P1[0]-P0[0]),(P1[1]-P0[1]),(P1[2]-P0[2])]
V2 = [(P2[0]-P0[0]),(P2[1]-P0[1]),(P2[2]-P0[2])]
ARR_NV = MATH_TOOLS.NormalVector(V1, V2)
D = ARR_NV[0] * P0[0] + ARR_NV[1] * P0[1] + ARR_NV[2] * P0[2]
return ARR_NV[0],ARR_NV[1],ARR_NV[2],D
def FindZatP3( P0, P1, P2, P3):
""" input of P: (X,Y,Z); but P3 is (X,Y) only """
A,B,C,D = MATH_TOOLS.NVtoPlane(P0, P1, P2)
return (D-A*P3[0] - B*P3[1])/float(C)
class TOOLS:
""" TOOLS is contains:
timestamp
fix_ind
progress_bar
cal_progrss
"""
ARR_HOY = [0, 744, 1416, 2160, 2880, 3624, 4344, 5088, 5832, 6552, 7296, 8016, 8760]
ARR_HOY_LEAP = [0, 744, 1440, 2184, 2904, 3648, 4368, 5112, 5856, 6576, 7320, 8040, 8784]
def NNARR(ARR_IN, IF_PAIRING=False):
"Clean the NaN value in the array"
if IF_PAIRING:
ARR_SIZE = len(ARR_IN)
ARR_OUT = [ [] for N in range(ARR_SIZE)]
for ind_n, N in enumerate(ARR_IN[0]):
IF_NAN = False
for ind_a in range(ARR_SIZE):
if math.isnan(ARR_IN[ind_a][ind_n]):
IF_NAN = True
break
if not IF_NAN:
for ind_a in range(ARR_SIZE):
ARR_OUT[ind_a].append(ARR_IN[ind_a][ind_n])
else:
ARR_OUT = [ ]
for N in ARR_IN:
if not math.isnan(N):
ARR_OUT.append(N)
return ARR_OUT
def DATETIME2HOY(ARR_TIME, ARR_HOY_IN=[]):
if math.fmod(ARR_TIME[0], 4) == 0 and len(ARR_HOY_IN) == 0:
ARR_HOY_IN = [0, 744, 1440, 2184, 2904, 3648, 4368, 5112, 5856, 6576, 7320, 8040, 8784]
elif math.fmod(ARR_TIME[0], 4) != 0 and len(ARR_HOY_IN) == 0:
ARR_HOY_IN = [0, 744, 1416, 2160, 2880, 3624, 4344, 5088, 5832, 6552, 7296, 8016, 8760]
else:
ARR_HOY_IN = ARR_HOY_IN
return ARR_HOY_IN[ARR_TIME[1]-1] + (ARR_TIME[2]-1)*24 + ARR_TIME[3]
def timestamp(STR_IN=""):
print("{0:04d}-{1:02d}-{2:02d}_{3:02d}:{4:02d}:{5:02d} {6:s}".format(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday,\
time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec, STR_IN) )
def fix_ind(IND_IN, IND_J, IND_I, ARR_XRANGE=[], ARR_YRANGE=[], NX=0, NY=0):
NUM_DY = ARR_YRANGE[0]
NUM_NX_F = ARR_XRANGE[0]
NUM_NX_R = NX - (ARR_XRANGE[1]+1)
if IND_J == ARR_YRANGE[0]:
IND_OUT = IND_IN - NUM_DY * NX - NUM_NX_F
else:
IND_OUT = IND_IN - NUM_DY * NX - NUM_NX_F * (IND_J - NUM_DY +1) - NUM_NX_R * (IND_J - NUM_DY)
return IND_OUT
def progress_bar(NUM_PROGRESS, NUM_PROGRESS_BIN=0.05, STR_SYS_SYMBOL="=", STR_DES="Progress"):
NUM_SYM = int(NUM_PROGRESS / NUM_PROGRESS_BIN)
sys.stdout.write('\r')
sys.stdout.write('[{0:20s}] {1:4.2f}% {2:s}'.format(STR_SYS_SYMBOL*NUM_SYM, NUM_PROGRESS*100, STR_DES))
sys.stdout.flush()
def clean_arr(ARR_IN, CRITERIA=1):
ARR_OUT=[]
for i,n in enumerate(ARR_IN):
if len(n)> CRITERIA:
ARR_OUT.append(n)
return ARR_OUT
def cal_loop_progress(ARR_INDEX, ARR_INDEX_MAX, NUM_CUM_MAX=1, NUM_CUM_IND=1, NUM_TOTAL_MAX=1):
""" Please list from smallest to largest, i.e.: x->y->z """
if len(ARR_INDEX) == len(ARR_INDEX_MAX):
for i, i_index in enumerate(ARR_INDEX):
NUM_IND_PER = (i_index+1)/float(ARR_INDEX_MAX[i])
NUM_TOTAL_MAX = NUM_TOTAL_MAX * ARR_INDEX_MAX[i]
if i >0: NUM_CUM_MAX = NUM_CUM_MAX * ARR_INDEX_MAX[i-1]
NUM_CUM_IND = NUM_CUM_IND + NUM_CUM_MAX * i_index
return NUM_CUM_IND / float(NUM_TOTAL_MAX)
else:
print("Wrong dimenstion for in put ARR_INDEX ({0:d}) and ARR_INDEX_MAX ({1:d})".format(len(ARR_INDEX), len(ARR_INDEX_MAX)))
def calendar_cal(ARR_START_TIME, ARR_INTERVAL, ARR_END_TIME_IN=[0, 0, 0, 0, 0, 0.0], IF_LEAP=False):
ARR_END_TIME = [ 0,0,0,0,0,0.0]
ARR_DATETIME = ["SECOND", "MINUTE", "HOUR","DAY", "MON", "YEAR"]
NUM_ARR_DATETIME = len(ARR_DATETIME)
IF_FERTIG = False
ARR_FERTIG = [0,0,0,0,0,0]
DIC_TIME_LIM = \
{"YEAR" : {"START": 0 , "LIMIT": 9999 },\
"MON" : {"START": 1 , "LIMIT": 12 },\
"DAY" : {"START": 1 , "LIMIT": 31 },\
"HOUR" : {"START": 0 , "LIMIT": 23 },\
"MINUTE": {"START": 0 , "LIMIT": 59 },\
"SECOND": {"START": 0 , "LIMIT": 59 },\
}
for I, T in enumerate(ARR_START_TIME):
ARR_END_TIME[I] = T + ARR_INTERVAL[I]
while IF_FERTIG == False:
if math.fmod(ARR_END_TIME[0],4) == 0: IF_LEAP=True
if IF_LEAP:
ARR_DAY_LIM = [0,31,29,31,30,31,30,31,31,30,31,30,31]
else:
ARR_DAY_LIM = [0,31,28,31,30,31,30,31,31,30,31,30,31]
for I, ITEM in enumerate(ARR_DATETIME):
NUM_ARR_POS = NUM_ARR_DATETIME-I-1
if ITEM == "DAY":
if ARR_END_TIME[NUM_ARR_POS] > ARR_DAY_LIM[ARR_END_TIME[1]]:
ARR_END_TIME[NUM_ARR_POS] = ARR_END_TIME[NUM_ARR_POS] - ARR_DAY_LIM[ARR_END_TIME[1]]
ARR_END_TIME[NUM_ARR_POS - 1] += 1
else:
if ARR_END_TIME[NUM_ARR_POS] > DIC_TIME_LIM[ITEM]["LIMIT"]:
ARR_END_TIME[NUM_ARR_POS - 1] += 1
ARR_END_TIME[NUM_ARR_POS] = ARR_END_TIME[NUM_ARR_POS] - DIC_TIME_LIM[ITEM]["LIMIT"] - 1
for I, ITEM in enumerate(ARR_DATETIME):
NUM_ARR_POS = NUM_ARR_DATETIME-I-1
if ITEM == "DAY":
if ARR_END_TIME[NUM_ARR_POS] <= ARR_DAY_LIM[ARR_END_TIME[1]]: ARR_FERTIG[NUM_ARR_POS] = 1
else:
if ARR_END_TIME[NUM_ARR_POS] <= DIC_TIME_LIM[ITEM]["LIMIT"]: ARR_FERTIG[NUM_ARR_POS] = 1
if sum(ARR_FERTIG) == 6: IF_FERTIG = True
return ARR_END_TIME
class MPI_TOOLS:
def __init__(self, MPI_SIZE=1, MPI_RANK=0,\
NUM_NX_END=1, NUM_NY_END=1, NUM_NX_START=0, NUM_NY_START=0, NUM_NX_CORES=1 ,\
NUM_NX_TOTAL=1, NUM_NY_TOTAL=1 ):
""" END number follow the python philisophy: End number is not included in the list """
self.NUM_SIZE = MPI_SIZE
self.NUM_RANK = MPI_RANK
self.NUM_NX_START = NUM_NX_START
self.NUM_NY_START = NUM_NY_START
self.NUM_NX_SIZE = NUM_NX_END - NUM_NX_START
self.NUM_NY_SIZE = NUM_NY_END - NUM_NY_START
self.NUM_NX_CORES = NUM_NX_CORES
self.NUM_NY_CORES = max(1, int(self.NUM_SIZE / NUM_NX_CORES))
self.ARR_RANK_DESIGN = [ {} for n in range(self.NUM_SIZE)]
def CPU_GEOMETRY_2D(self):
NUM_NX_REMAIN = self.NUM_NX_SIZE % self.NUM_NX_CORES
NUM_NY_REMAIN = self.NUM_NY_SIZE % self.NUM_NY_CORES
NUM_NX_DIFF = int((self.NUM_NX_SIZE - NUM_NX_REMAIN) / self.NUM_NX_CORES )
NUM_NY_DIFF = int((self.NUM_NY_SIZE - NUM_NY_REMAIN) / self.NUM_NY_CORES )
NUM_NY_DIFF_P1 = NUM_NY_DIFF + 1
NUM_NX_DIFF_P1 = NUM_NX_DIFF + 1
IND_RANK = 0
ARR_RANK_DESIGN = [ 0 for n in range(self.NUM_SIZE)]
for ny in range(self.NUM_NY_CORES):
for nx in range(self.NUM_NX_CORES):
NUM_RANK = ny * self.NUM_NX_CORES + nx
DIC_IN = {"INDEX_IN": NUM_RANK, "NX_START": 0, "NY_START": 0, "NX_END": 0, "NY_END": 0 }
if ny < NUM_NY_REMAIN:
DIC_IN["NY_START"] = (ny + 0) * NUM_NY_DIFF_P1 + self.NUM_NY_START
DIC_IN["NY_END" ] = (ny + 1) * NUM_NY_DIFF_P1 + self.NUM_NY_START
else:
DIC_IN["NY_START"] = (ny - NUM_NY_REMAIN + 0) * NUM_NY_DIFF + NUM_NY_REMAIN * NUM_NY_DIFF_P1 + self.NUM_NY_START
DIC_IN["NY_END" ] = (ny - NUM_NY_REMAIN + 1) * NUM_NY_DIFF + NUM_NY_REMAIN * NUM_NY_DIFF_P1 + self.NUM_NY_START
if nx < NUM_NX_REMAIN:
DIC_IN["NX_START"] = (nx + 0) * NUM_NX_DIFF_P1 + self.NUM_NX_START
DIC_IN["NX_END" ] = (nx + 1) * NUM_NX_DIFF_P1 + self.NUM_NX_START
else:
DIC_IN["NX_START"] = (nx - NUM_NX_REMAIN + 0) * NUM_NX_DIFF + NUM_NX_REMAIN * NUM_NX_DIFF_P1 + self.NUM_NX_START
DIC_IN["NX_END" ] = (nx - NUM_NX_REMAIN + 1) * NUM_NX_DIFF + NUM_NX_REMAIN * NUM_NX_DIFF_P1 + self.NUM_NX_START
ARR_RANK_DESIGN[NUM_RANK] = DIC_IN
self.ARR_RANK_DESIGN = ARR_RANK_DESIGN
return ARR_RANK_DESIGN
def CPU_MAP(self ):
ARR_CPU_MAP = [ [ NP.nan for i in range(self.NUM_NX_TOTAL)] for j in range(self.NUM_NY_TOTAL) ]
for RANK in range(len(ARR_RANK_DESIGN)):
print("DEAL WITH {0:d} {1:d}".format(RANK, ARR_RANK_DESIGN[RANK]["INDEX_IN"] ))
for jj in range(ARR_RANK_DESIGN[RANK]["NY_START"], ARR_RANK_DESIGN[RANK]["NY_END"]):
for ii in range(ARR_RANK_DESIGN[RANK]["NX_START"], ARR_RANK_DESIGN[RANK]["NX_END"]):
ARR_CPU_MAP[jj][ii] = ARR_RANK_DESIGN[RANK]["INDEX_IN"]
return MAP_CPU
def GATHER_ARR_2D(self, ARR_IN, ARR_IN_GATHER, ARR_RANK_DESIGN=[]):
if ARR_RANK_DESIGN == []:
ARR_RANK_DESIGN = self.ARR_RANK_DESIGN
for N in range(1, self.NUM_SIZE):
I_STA = ARR_RANK_DESIGN[N]["NX_START"]
I_END = ARR_RANK_DESIGN[N]["NX_END" ]
J_STA = ARR_RANK_DESIGN[N]["NY_START"]
J_END = ARR_RANK_DESIGN[N]["NY_END" ]
for J in range(J_STA, J_END ):
for I in range(I_STA, I_END ):
ARR_IN[J][I] = ARR_IN_GATHER[N][J][I]
return ARR_IN
def MPI_MESSAGE(self, STR_TEXT=""):
TIME_NOW = time.gmtime()
print("MPI RANK: {0:5d} @ {1:02d}:{2:02d}:{3:02d} # {4:s}"\
.format(self.NUM_RANK, TIME_NOW.tm_hour, TIME_NOW.tm_min, TIME_NOW.tm_sec, STR_TEXT ))
class GEO_TOOLS:
def __init__(self):
STR_NCDF4PY = NC.__version__
print("Using netCDF4 for Python, Version: {0:s}".format(STR_NCDF4PY))
def mask_dtm(self, NUM, ARR_DTM=[0,1,2], ARR_DTM_RANGE=[0,1], ARR_DTM_STR=["OUT","IN","OUT"]):
""" The determination algorithm is : x-1 < NUM <= x """
for i, n in enumerate(ARR_DTM):
if i == 0:
if NUM <= ARR_DTM_RANGE[i]: NUM_OUT = n
elif i == len(ARR_DTM_RANGE):
if NUM > ARR_DTM_RANGE[i-1]: NUM_OUT = n
else:
if NUM > ARR_DTM_RANGE[i-1] and NUM <= ARR_DTM_RANGE[i]: NUM_OUT = n
return NUM_OUT
def mask_array(self, ARR_IN, ARR_MASK_OUT=[], ARR_DTM=[0,1,2], ARR_DTM_RANGE=[0,1], ARR_DTM_STR=["OUT","IN","OUT"], IF_2D=False):
if IF_2D:
NUM_NX = len(ARR_IN[0])
NUM_NY = len(ARR_IN)
ARR_OUT = [ [ self.NUM_NULL for i in range(NUM_NX)] for j in range(NUM_NY) ]
for J in range(NUM_NY):
for I in range(NUM_NY):
ARR_OUT[J][I] = self.mask_dtm(ARR_IN[J][I], ARR_NUM_DTM=ARR_NUM_DTM, ARR_NUM_DTM_RANGE=ARR_NUM_DTM_RANGE, ARR_STR_DTM=ARR_STR_DTM)
else:
NUM_NX = len(ARR_IN)
ARR_OUT = [0 for n in range(NUM_NX)]
for N in range(NUM_NX):
ARR_OUT[N] = self.mask_dtm(ARR_IN[N], ARR_NUM_DTM=ARR_NUM_DTM, ARR_NUM_DTM_RANGE=ARR_NUM_DTM_RANGE, ARR_STR_DTM=ARR_STR_DTM)
return ARR_OUT
def MAKE_LAT_LON_ARR(self, FILE_NC_IN, STR_LAT="lat", STR_LON="lon", source="CFC"):
""" Reading LAT and LON from a NC file """
NC_DATA_IN = NC.Dataset(FILE_NC_IN, "r", format="NETCDF4")
if source == "CFC":
arr_lat_in = NC_DATA_IN.variables[STR_LAT]
arr_lon_in = NC_DATA_IN.variables[STR_LON]
num_nlat = len(arr_lat_in)
num_nlon = len(arr_lon_in)
arr_lon_out = [[0.0 for i in range(num_nlon)] for j in range(num_nlat)]
arr_lat_out = [[0.0 for i in range(num_nlon)] for j in range(num_nlat)]
for j in range(num_nlat):
for i in range(num_nlon):
arr_lon_out[j][i] = arr_lat_in[j]
arr_lat_out[j][i] = arr_lon_in[i]
return arr_lat_out, arr_lon_out
class NETCDF4_HELPER:
def __init__(self):
STR_NCDF4PY = NC.__version__
print("Using netCDF4 for Python, Version: {0:s}".format(STR_NCDF4PY))
def create_wrf_ensemble(self, STR_FILE_IN, STR_FILE_OUT, ARR_VAR=[], STR_DIR="./", NUM_ENSEMBLE_SIZE=1 ):
FILE_OUT = NC.Dataset("{0:s}/{1:s}".format(STR_DIR, STR_FILE_OUT), "w",format="NETCDF4")
FILE_IN = NC.Dataset("{1:s}/{1:s}".format(STR_DIR, STR_FILE_IN ), "r",format="NETCDF4")
# CREATE DIMENSIONS:
for DIM in FILE_IN.dimensions:
FILE_OUT.createDimension(DIM, FILE_IN.dimensions[DIM].size )
FILE_OUT.createDimension("Ensembles", NUM_ENSEMBLE_SIZE )
# CREATE ATTRIBUTES:
FILE_OUT.TITLE = FILE_IN.TITLE
FILE_OUT.START_DATE = FILE_IN.START_DATE
FILE_OUT.SIMULATION_START_DATE = FILE_IN.SIMULATION_START_DATE
FILE_OUT.DX = FILE_IN.DX
FILE_OUT.DY = FILE_IN.DY
FILE_OUT.SKEBS_ON = FILE_IN.SKEBS_ON
FILE_OUT.SPEC_BDY_FINAL_MU = FILE_IN.SPEC_BDY_FINAL_MU
FILE_OUT.USE_Q_DIABATIC = FILE_IN.USE_Q_DIABATIC
FILE_OUT.GRIDTYPE = FILE_IN.GRIDTYPE
FILE_OUT.DIFF_OPT = FILE_IN.DIFF_OPT
FILE_OUT.KM_OPT = FILE_IN.KM_OPT
if len(ARR_VAR) >0:
for V in ARR_VAR:
if V[1] == "2D":
FILE_OUT.createVariable(V[0], "f8", ("Ensembles", "Time", "south_north", "west_east" ))
elif V[1] == "3D":
FILE_OUT.createVariable(V[0], "f8", ("Ensembles", "Time", "bottom_top", "south_north", "west_east" ))
FILE_OUT.close()
FILE_IN.close()
def add_ensemble(self, FILE_IN, FILE_OUT, STR_VAR, STR_DIM="2D", STR_DIR="./", IND_ENSEMBLE=0):
FILE_OUT = NC.Dataset("{0:s}/{1:s}".format(STR_DIR, FILE_OUT), "a",format="NETCDF4")
FILE_IN = NC.Dataset("{0:s}/{1:s}".format(STR_DIR, FILE_IN ), "r",format="NETCDF4")
ARR_VAR_IN = FILE_IN.variables[STR_VAR]
NUM_NT = len(ARR_VAR_IN)
NUM_NK = FILE_IN.dimensions["bottom_top"].size
NUM_NJ = FILE_IN.dimensions["south_north"].size
NUM_NI = FILE_IN.dimensions["west_east"].size
for time in range(NUM_NT):
if STR_DIM == "2D":
FILE_OUT.variables[STR_VAR][IND_ENSEMBLE, time] = FILE_IN.variables[STR_VAR][time]
elif STR_DIM == "3D":
for k in range(NUM_NK):
FILE_OUT.variables[STR_VAR][IND_ENSEMBLE, time] = FILE_IN.variables[STR_VAR][time]
FILE_OUT.close()
FILE_IN.close()
class WRF_HELPER:
STR_DIR_ROOT = "./"
NUM_TIME_INIT = 0
NUM_SHIFT = 0.001
def __init__(self):
"""
Remember: most array should be follow the rule of [j,i] instead of [x,y].
"""
STR_NCDF4PY = NC.__version__
print("Using netCDF4 for Python, Version: {0:s}".format(STR_NCDF4PY))
def GEO_INFORMATER(self, STR_FILE="geo_em.d01.nc", STR_DIR=""):
print("INPUT GEO FILE: {0:s}".format(STR_FILE))
if STR_DIR == "":
STR_DIR == self.STR_DIR_ROOT
self.FILE_IN = NC.Dataset("{0:s}/{1:s}".format(STR_DIR, STR_FILE ), "r",format="NETCDF4")
self.MAP_LAT = self.FILE_IN.variables["CLAT"] [self.NUM_TIME_INIT]
self.MAP_LON = self.FILE_IN.variables["CLONG"][self.NUM_TIME_INIT]
ARR_TMP_IN = self.FILE_IN.variables["CLONG"][0]
# Since NetCDF4 for python does not support the hyphen in attributes, I
# am forced to calculate the NX and NY based on a map in the NC file.
self.NUM_NX = len(ARR_TMP_IN[0])
self.NUM_NY = len(ARR_TMP_IN)
self.NUM_DX = self.FILE_IN.DX
self.NUM_DY = self.FILE_IN.DX
def GEO_HELPER(self, ARR_LL_SW, ARR_LL_NE):
self.MAP_CROP_MASK = [[ 0 for i in range(self.NUM_NX)] for j in range(self.NUM_NY)]
self.DIC_CROP_INFO = {"NE": {"LAT":0, "LON":0, "I":0, "J":0},\
"SW": {"LAT":0, "LON":0, "I":0, "J":0}}
ARR_TMP_I = []
ARR_TMP_J = []
for j in range(self.NUM_NY):
for i in range(self.NUM_NX):
NUM_CHK_SW_J = self.MAP_LAT[j][i] - ARR_LL_SW[0]
if NUM_CHK_SW_J == 0:
NUM_CHK_SW_J = self.MAP_LAT[j][i] - ARR_LL_SW[0] + self.NUM_SHIFT
NUM_CHK_SW_I = self.MAP_LON[j][i] - ARR_LL_SW[1]
if NUM_CHK_SW_I == 0:
NUM_CHK_SW_I = self.MAP_LAT[j][i] - ARR_LL_SW[1] - self.NUM_SHIFT
NUM_CHK_NE_J = self.MAP_LAT[j][i] - ARR_LL_NE[0]
if NUM_CHK_NE_J == 0:
NUM_CHK_NE_J = self.MAP_LAT[j][i] - ARR_LL_NE[0] + self.NUM_SHIFT
NUM_CHK_NE_I = self.MAP_LON[j][i] - ARR_LL_NE[1]
if NUM_CHK_NE_I == 0:
NUM_CHK_NE_I = self.MAP_LON[j][i] - ARR_LL_NE[1] - self.NUM_SHIFT
NUM_CHK_NS_IN = NUM_CHK_SW_J * NUM_CHK_NE_J
NUM_CHK_WE_IN = NUM_CHK_SW_I * NUM_CHK_NE_I
if NUM_CHK_NS_IN < 0 and NUM_CHK_WE_IN < 0:
self.MAP_CROP_MASK[j][i] = 1
ARR_TMP_J.append(j)
ARR_TMP_I.append(i)
NUM_SW_J = min( ARR_TMP_J )
NUM_SW_I = min( ARR_TMP_I )
NUM_NE_J = max( ARR_TMP_J )
NUM_NE_I = max( ARR_TMP_I )
self.DIC_CROP_INFO["NE"]["J"] = NUM_NE_J
self.DIC_CROP_INFO["NE"]["I"] = NUM_NE_I
self.DIC_CROP_INFO["NE"]["LAT"] = self.MAP_LAT[NUM_NE_J][NUM_NE_I]
self.DIC_CROP_INFO["NE"]["LON"] = self.MAP_LON[NUM_NE_J][NUM_NE_I]
self.DIC_CROP_INFO["SW"]["J"] = NUM_SW_J
self.DIC_CROP_INFO["SW"]["I"] = NUM_SW_I
self.DIC_CROP_INFO["SW"]["LAT"] = self.MAP_LAT[NUM_SW_J][NUM_SW_I]
self.DIC_CROP_INFO["SW"]["LON"] = self.MAP_LON[NUM_SW_J][NUM_SW_I]
def PROFILE_HELPER(STR_FILE_IN, ARR_DATE_START, NUM_DOMS=3, NUM_TIMESTEPS=24, IF_PB=False):
"""
This functions reads the filename, array of starting date,
and simulation hours and numbers of domains
to profiling the time it takes for WRF.
"""
FILE_READ_IN = open("{0:s}".format(STR_FILE_IN))
ARR_READ_IN = FILE_READ_IN.readlines()
NUM_TIME = NUM_TIMESTEPS
NUM_DOMAIN = NUM_DOMS
NUM_DATE_START = ARR_DATE_START
NUM_LEN_IN = len(ARR_READ_IN)
ARR_TIME_PROFILE = [[0 for T in range(NUM_TIME)] for D in range(NUM_DOMS)]
for I, TEXT_IN in enumerate(ARR_READ_IN):
ARR_TEXT = re.split("\s",TEXT_IN.strip())
if ARR_TEXT[0] == "Timing":
if ARR_TEXT[2] == "main:" or ARR_TEXT[2] == "main":
for ind, T in enumerate(ARR_TEXT):
if T == "time" : ind_time_text = ind + 1
if T == "elapsed": ind_elapsed_text = ind - 1
if T == "domain" : ind_domain_text = ind + 3
arr_time_in = re.split("_", ARR_TEXT[ind_time_text])
arr_date = re.split("-", arr_time_in[0])
arr_time = re.split(":", arr_time_in[1])
num_domain = int(re.split(":", ARR_TEXT[ind_domain_text])[0])
num_elapsed = float(ARR_TEXT[ind_elapsed_text])
NUM_HOUR_FIX = (int(arr_date[2]) - NUM_DATE_START[2]) * 24
NUM_HOUR = NUM_HOUR_FIX + int(arr_time[0])
ARR_TIME_PROFILE[num_domain-1][NUM_HOUR] += num_elapsed
if IF_PB: TOOLS.progress_bar(I/float(NUM_LEN_IN))
#self.ARR_TIME_PROFILE = ARR_TIME_PROFILE
return ARR_TIME_PROFILE
class DATA_READER:
"""
The DATA_READER is based on my old work: gridtrans.py.
"""
def __init__(self, STR_NULL="noData", NUM_NULL=-999.999):
self.STR_NULL=STR_NULL
self.NUM_NULL=NUM_NULL
def stripblnk(arr,*num_typ):
new_arr=[]
for i in arr:
if i == "":
pass
else:
if num_typ[0] == 'int':
new_arr.append(int(i))
elif num_typ[0] == 'float':
new_arr.append(float(i))
elif num_typ[0] == '':
new_arr.append(i)
else:
print("WRONG num_typ!")
return new_arr
def tryopen(self, sourcefile, ag):
try:
opf=open(sourcefile,ag)
return opf
except :
print("No such file.")
return "error"
def READCSV(self, sourcefile):
opf = self.tryopen(sourcefile,'r')
opfchk = self.tryopen(sourcefile,'r')
print("reading source file {0:s}".format(sourcefile))
chk_lines = opfchk.readlines()
num_totallines = len(chk_lines)
ncols = 0
num_notnum = 0
for n in range(num_totallines):
line_in = chk_lines[n]
c_first = re.findall(".",line_in.strip())
if c_first[0] == "#":
num_notnum += 1
else:
ncols = len( re.split(",",line_in.strip()) )
break
if ncols == 0:
print("something wrong with the input file! (all comments?)")
else:
del opfchk
nrows=num_totallines - num_notnum
result_arr=[[self.NUM_NULL for j in range(ncols)] for i in range(nrows)]
result_arr_text=[]
num_pass = 0
for j in range(0,num_totallines):
# chk if comment
#print (j,i,chk_val)
line_in = opf.readline()
c_first = re.findall(".",line_in.strip())[0]
if c_first == "#":
result_arr_text.append(line_in)
num_pass += 1
else:
arr_in = re.split(",",line_in.strip())
for i in range(ncols):
chk_val = arr_in[i]
if chk_val == self.STR_NULL:
result_arr[j-num_pass][i] = self.NUM_NULL
else:
result_arr[j-num_pass][i] = float(chk_val)
return result_arr,result_arr_text
| metalpen1984/SciTool_Py | GRIDINFORMER.py | Python | lgpl-3.0 | 66,239 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lxml import html
import requests
import csv, codecs, cStringIO
import sys
class Person:
def __init__(self, party, name, email):
self.party = party
self.name = name
self.email = email
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
# For some very weird reason, party can contain utf-8 characters. But last_name can. Weird
party_pages = {
'Socialdemokraterna': 'http://www.riksdagen.se/sv/ledamoter-partier/Socialdemokraterna/Ledamoter/',
'Moderaterna': 'http://www.riksdagen.se/sv/ledamoter-partier/Moderata-samlingspartiet/Ledamoter/',
'Sverigedemokraterna': 'http://www.riksdagen.se/sv/ledamoter-partier/Sverigedemokraterna/Ledamoter/',
'Miljopartiet': 'http://www.riksdagen.se/sv/ledamoter-partier/Miljopartiet-de-grona/Ledamoter/',
'Centerpartiet': 'http://www.riksdagen.se/sv/ledamoter-partier/Centerpartiet/Ledamoter/',
'Vansterpartiet': 'http://www.riksdagen.se/sv/ledamoter-partier/Vansterpartiet/Ledamoter/',
'Liberalerna': 'http://www.riksdagen.se/sv/ledamoter-partier/Folkpartiet/Ledamoter/',
'Kristdemokraterna': 'http://www.riksdagen.se/sv/ledamoter-partier/Kristdemokraterna/Ledamoter/',
}
if __name__ == "__main__":
all_people = []
for party, party_page in party_pages.iteritems():
page = requests.get(party_page)
tree = html.fromstring(page.text)
# Only include "ledamöter", not "partisekreterare" and such since they don't have emails
names = tree.xpath("//*[contains(@class, 'large-12 columns alphabetical component-fellows-list')]//a[contains(@class, 'fellow-item-container')]/@href")
root = "http://www.riksdagen.se"
unique_name_list = []
for name in names:
full_url = root + name
if full_url not in unique_name_list:
unique_name_list.append(full_url)
print unique_name_list
print "unique:"
for name_url in unique_name_list:
print name_url
personal_page = requests.get(name_url)
personal_tree = html.fromstring(personal_page.text)
email_list = personal_tree.xpath("//*[contains(@class, 'scrambled-email')]/text()")
email_scrambled = email_list[0]
email = email_scrambled.replace(u'[på]', '@')
print email
name_list = personal_tree.xpath("//header/h1[contains(@class, 'biggest fellow-name')]/text()")
name = name_list[0]
name = name.replace("\n", "")
name = name.replace("\r", "")
name = name[:name.find("(")-1]
name = name.strip()
print name
print "-----"
person = Person(party, name, email)
all_people.append(person)
for person in all_people:
print person.party + ", " + person.name + ", " + person.email
with open('names.csv', 'wb') as csvfile:
fieldnames = ['name', 'email', 'party']
writer = UnicodeWriter(csvfile)
writer.writerow(fieldnames)
for person in all_people:
print person.party + ", " + person.name + ", " + person.email
writer.writerow([person.name, person.email, person.party])
| samuelskanberg/riksdagen-crawler | scraper.py | Python | unlicense | 4,168 |
import matplotlib.pyplot as plt
import numpy as np
n = 50
x = np.random.randn(n)
y = x * np.random.randn(n)
fig, ax = plt.subplots(2, figsize=(6, 6))
ax[0].scatter(x, y, s=50)
sizes = (np.random.randn(n) * 8) ** 2
ax[1].scatter(x, y, s=sizes)
fig.show()
"""(

)"""
| pythonpatterns/patterns | p0171.py | Python | unlicense | 323 |
from django.apps import AppConfig
class GroupInvitationsConfig(AppConfig):
name = 'GroupInvitations'
verbose_name = 'Group Invitations' | Yury191/brownstonetutors | GroupInvitations/apps.py | Python | unlicense | 144 |
import enum
class H264Trellis(enum.Enum):
DISABLED = 'DISABLED'
ENABLED_FINAL_MB = 'ENABLED_FINAL_MB'
ENABLED_ALL = 'ENABLED_ALL'
| bitmovin/bitmovin-python | bitmovin/resources/enums/h264_trellis.py | Python | unlicense | 144 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains test cases for the utils.py module."""
from __future__ import unicode_literals
import sys
import os.path
import unittest
PATH = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(PATH)))
try:
from youtube_dl_gui import utils
except ImportError as error:
print error
sys.exit(1)
class TestToBytes(unittest.TestCase):
"""Test case for the to_bytes method."""
def test_to_bytes_bytes(self):
self.assertEqual(utils.to_bytes("596.00B"), 596.00)
self.assertEqual(utils.to_bytes("133.55B"), 133.55)
def test_to_bytes_kilobytes(self):
self.assertEqual(utils.to_bytes("1.00KiB"), 1024.00)
self.assertEqual(utils.to_bytes("5.55KiB"), 5683.20)
def test_to_bytes_megabytes(self):
self.assertEqual(utils.to_bytes("13.64MiB"), 14302576.64)
self.assertEqual(utils.to_bytes("1.00MiB"), 1048576.00)
def test_to_bytes_gigabytes(self):
self.assertEqual(utils.to_bytes("1.00GiB"), 1073741824.00)
self.assertEqual(utils.to_bytes("1.55GiB"), 1664299827.20)
def test_to_bytes_terabytes(self):
self.assertEqual(utils.to_bytes("1.00TiB"), 1099511627776.00)
class TestFormatBytes(unittest.TestCase):
"""Test case for the format_bytes method."""
def test_format_bytes_bytes(self):
self.assertEqual(utils.format_bytes(518.00), "518.00B")
def test_format_bytes_kilobytes(self):
self.assertEqual(utils.format_bytes(1024.00), "1.00KiB")
def test_format_bytes_megabytes(self):
self.assertEqual(utils.format_bytes(1048576.00), "1.00MiB")
def test_format_bytes_gigabytes(self):
self.assertEqual(utils.format_bytes(1073741824.00), "1.00GiB")
def test_format_bytes_terabytes(self):
self.assertEqual(utils.format_bytes(1099511627776.00), "1.00TiB")
def main():
unittest.main()
if __name__ == "__main__":
main()
| Sofronio/youtube-dl-gui | tests/test_utils.py | Python | unlicense | 1,981 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Brand'
db.create_table(u'automotive_brand', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('slug', self.gf('autoslug.fields.AutoSlugField')(unique_with=(), max_length=50, populate_from='name')),
))
db.send_create_signal(u'automotive', ['Brand'])
# Adding model 'Model'
db.create_table(u'automotive_model', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brand', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['automotive.Brand'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('year', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'automotive', ['Model'])
def backwards(self, orm):
# Deleting model 'Brand'
db.delete_table(u'automotive_brand')
# Deleting model 'Model'
db.delete_table(u'automotive_model')
models = {
u'automotive.brand': {
'Meta': {'object_name': 'Brand'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name'"})
},
u'automotive.model': {
'Meta': {'object_name': 'Model'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['automotive.Brand']"}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'year': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['automotive'] | mscam/django-automotive | automotive/south_migrations/0001_initial.py | Python | unlicense | 2,513 |
class MyClass:
'''This is the docstring for this class'''
def __init__(self):
# setup per-instance variables
self.x = 1
self.y = 2
self.z = 3
class MySecondClass:
'''This is the docstring for this second class'''
def __init__(self):
# setup per-instance variables
self.p = 1
self.d = 2
self.q = 3
| dagostinelli/python-package-boilerplate | packagename/somemodule.py | Python | unlicense | 382 |
#%matplotlib inline
# All the imports
from __future__ import print_function, division
from math import *
import random
import sys
import matplotlib.pyplot as plt
# TODO 1: Enter your unity ID here
__author__ = "magoff2"
class O:
"""
Basic Class which
- Helps dynamic updates
- Pretty Prints
"""
def __init__(self, **kwargs):
self.has().update(**kwargs)
def has(self):
return self.__dict__
def update(self, **kwargs):
self.has().update(kwargs)
return self
def __repr__(self):
show = [':%s %s' % (k, self.has()[k])
for k in sorted(self.has().keys())
if k[0] is not "_"]
txt = ' '.join(show)
if len(txt) > 60:
show = map(lambda x: '\t' + x + '\n', show)
return '{' + ' '.join(show) + '}'
print("Unity ID: ", __author__)
####################################################################
#SECTION 2
####################################################################
# Few Utility functions
def say(*lst):
"""
Print whithout going to new line
"""
print(*lst, end="")
sys.stdout.flush()
def random_value(low, high, decimals=2):
"""
Generate a random number between low and high.
decimals incidicate number of decimal places
"""
return round(random.uniform(low, high),decimals)
def gt(a, b): return a > b
def lt(a, b): return a < b
def shuffle(lst):
"""
Shuffle a list
"""
random.shuffle(lst)
return lst
class Decision(O):
"""
Class indicating Decision of a problem
"""
def __init__(self, name, low, high):
"""
@param name: Name of the decision
@param low: minimum value
@param high: maximum value
"""
O.__init__(self, name=name, low=low, high=high)
class Objective(O):
"""
Class indicating Objective of a problem
"""
def __init__(self, name, do_minimize=True):
"""
@param name: Name of the objective
@param do_minimize: Flag indicating if objective has to be minimized or maximized
"""
O.__init__(self, name=name, do_minimize=do_minimize)
class Point(O):
"""
Represents a member of the population
"""
def __init__(self, decisions):
O.__init__(self)
self.decisions = decisions
self.objectives = None
def __hash__(self):
return hash(tuple(self.decisions))
def __eq__(self, other):
return self.decisions == other.decisions
def clone(self):
new = Point(self.decisions)
new.objectives = self.objectives
return new
class Problem(O):
"""
Class representing the cone problem.
"""
def __init__(self):
O.__init__(self)
# TODO 2: Code up decisions and objectives below for the problem
# using the auxilary classes provided above.
self.decisions = [Decision('r', 0, 10), Decision('h', 0, 20)]
self.objectives = [Objective('S'), Objective('T')]
@staticmethod
def evaluate(point):
[r, h] = point.decisions
l = (r**2 + h**2)**0.5
S = pi * r * l
T = S + pi * r**2
point.objectives = [S, T]
# TODO 3: Evaluate the objectives S and T for the point.
return point.objectives
@staticmethod
def is_valid(point):
[r, h] = point.decisions
# TODO 4: Check if the point has valid decisions
V = pi / 3 * (r**2) * h
return V > 200
def generate_one(self):
# TODO 5: Generate a valid instance of Point.
while(True):
point = Point([random_value(d.low, d.high) for d in self.decisions])
if Problem.is_valid(point):
return point
cone = Problem()
point = cone.generate_one()
cone.evaluate(point)
print (point)
def populate(problem, size):
population = []
# TODO 6: Create a list of points of length 'size'
for _ in xrange(size):
population.append(problem.generate_one())
return population
#or
#return(problem.generate_one() for _ in xrange(size)
pop = populate(cone,5)
print(pop)
def crossover(mom, dad):
# TODO 7: Create a new point which contains decisions from
# the first half of mom and second half of dad
n = len(mom.decisions)
return Point(mom.decisions[:n//2] + dad.decisions[n//2:])
mom = cone.generate_one()
dad = cone.generate_one()
print(mom)
print(dad)
print(crossover(mom,dad))
print(crossover(pop[0],pop[1]))
def mutate(problem, point, mutation_rate=0.01):
# TODO 8: Iterate through all the decisions in the point
# and if the probability is less than mutation rate
# change the decision(randomly set it between its max and min).
for i, d in enumerate(problem.decisions):
if(random.random() < mutation_rate) :
point.decisions[i] = random_value(d.low, d.high)
return point
def bdom(problem, one, two):
"""
Return if one dominates two
"""
objs_one = problem.evaluate(one)
objs_two = problem.evaluate(two)
if(one == two):
return False;
dominates = False
# TODO 9: Return True/False based on the definition
# of bdom above.
first = True
second = False
for i,_ in enumerate(problem.objectives):
if ((first is True) & gt(one.objectives[i], two.objectives[i])):
first = False
elif (not second & (one.objectives[i] is not two.objectives[i])):
second = True
dominates = first & second
return dominates
print(bdom(cone,pop[0],pop[1]))
def fitness(problem, population, point):
dominates = 0
# TODO 10: Evaluate fitness of a point.
# For this workshop define fitness of a point
# as the number of points dominated by it.
# For example point dominates 5 members of population,
# then fitness of point is 5.
for another in population:
if bdom(problem, point, another):
dominates += 1
return dominates
''' should be 1 because it will run across the same point.'''
print(fitness(cone, pop, pop[0]))
print('HELLO WORLD\n')
def elitism(problem, population, retain_size):
# TODO 11: Sort the population with respect to the fitness
# of the points and return the top 'retain_size' points of the population
fit_pop = [fitness(cone,population,pop) for pop in population]
population = [pop for _,pop in sorted(zip(fit_pop,population), reverse = True)]
return population[:retain_size]
print(elitism(cone, pop, 3))
def ga(pop_size=100, gens=250):
problem = Problem()
population = populate(problem, pop_size)
[problem.evaluate(point) for point in population]
initial_population = [point.clone() for point in population]
gen = 0
while gen < gens:
say(".")
children = []
for _ in range(pop_size):
mom = random.choice(population)
dad = random.choice(population)
while (mom == dad):
dad = random.choice(population)
child = mutate(problem, crossover(mom, dad))
if problem.is_valid(child) and child not in population + children:
children.append(child)
population += children
population = elitism(problem, population, pop_size)
gen += 1
print("")
return initial_population, population
def plot_pareto(initial, final):
initial_objs = [point.objectives for point in initial]
final_objs = [point.objectives for point in final]
initial_x = [i[0] for i in initial_objs]
initial_y = [i[1] for i in initial_objs]
final_x = [i[0] for i in final_objs]
final_y = [i[1] for i in final_objs]
plt.scatter(initial_x, initial_y, color='b', marker='+', label='initial')
plt.scatter(final_x, final_y, color='r', marker='o', label='final')
plt.title("Scatter Plot between initial and final population of GA")
plt.ylabel("Total Surface Area(T)")
plt.xlabel("Curved Surface Area(S)")
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.175), ncol=2)
plt.show()
initial, final = ga()
plot_pareto(initial, final)
| gbtimmon/ase16GBT | code/4/ga/magoff2.py | Python | unlicense | 8,088 |
"""
You get an array of numbers, return the sum of all of the positives ones.
Example [1,-4,7,12] => 1 + 7 + 12 = 20
Note: array may be empty, in this case return 0.
"""
def positive_sum(arr):
# Your code here
sum = 0
for number in arr:
if number > 0:
sum += number
return sum
| aadithpm/code-a-day | py/Sum Of Positive.py | Python | unlicense | 316 |
from __future__ import unicode_literals
from django.apps import AppConfig
class RewardsConfig(AppConfig):
name = 'rewards'
| bfrick22/monetary-rewards | mysite/rewards/apps.py | Python | unlicense | 130 |
"""
Python bindings for GLFW.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = 'Florian Rhiem ([email protected])'
__copyright__ = 'Copyright (c) 2013-2016 Florian Rhiem'
__license__ = 'MIT'
__version__ = '1.3.1'
# By default (ERROR_REPORTING = True), GLFW errors will be reported as Python
# exceptions. Set ERROR_REPORTING to False or set a curstom error callback to
# disable this behavior.
ERROR_REPORTING = True
import ctypes
import os
import functools
import glob
import sys
import subprocess
import textwrap
# Python 3 compatibility:
try:
_getcwd = os.getcwdu
except AttributeError:
_getcwd = os.getcwd
if sys.version_info.major > 2:
_to_char_p = lambda s: s.encode('utf-8')
def _reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
_to_char_p = lambda s: s
def _reraise(exception, traceback):
raise (exception, None, traceback)
class GLFWError(Exception):
"""
Exception class used for reporting GLFW errors.
"""
def __init__(self, message):
super(GLFWError, self).__init__(message)
def _find_library_candidates(library_names,
library_file_extensions,
library_search_paths):
"""
Finds and returns filenames which might be the library you are looking for.
"""
candidates = set()
for library_name in library_names:
for search_path in library_search_paths:
glob_query = os.path.join(search_path, '*'+library_name+'*')
for filename in glob.iglob(glob_query):
filename = os.path.realpath(filename)
if filename in candidates:
continue
basename = os.path.basename(filename)
if basename.startswith('lib'+library_name):
basename_end = basename[len('lib'+library_name):]
elif basename.startswith(library_name):
basename_end = basename[len(library_name):]
else:
continue
for file_extension in library_file_extensions:
if basename_end.startswith(file_extension):
if basename_end[len(file_extension):][:1] in ('', '.'):
candidates.add(filename)
if basename_end.endswith(file_extension):
basename_middle = basename_end[:-len(file_extension)]
if all(c in '0123456789.' for c in basename_middle):
candidates.add(filename)
return candidates
def _load_library(library_names, library_file_extensions,
library_search_paths, version_check_callback):
"""
Finds, loads and returns the most recent version of the library.
"""
candidates = _find_library_candidates(library_names,
library_file_extensions,
library_search_paths)
library_versions = []
for filename in candidates:
version = version_check_callback(filename)
if version is not None and version >= (3, 0, 0):
library_versions.append((version, filename))
if not library_versions:
return None
library_versions.sort()
return ctypes.CDLL(library_versions[-1][1])
def _glfw_get_version(filename):
"""
Queries and returns the library version tuple or None by using a
subprocess.
"""
version_checker_source = '''
import sys
import ctypes
def get_version(library_handle):
"""
Queries and returns the library version tuple or None.
"""
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
if hasattr(library_handle, 'glfwGetVersion'):
library_handle.glfwGetVersion(major, minor, rev)
version = (major_value.value,
minor_value.value,
rev_value.value)
return version
else:
return None
try:
input_func = raw_input
except NameError:
input_func = input
filename = input_func().strip()
try:
library_handle = ctypes.CDLL(filename)
except OSError:
pass
else:
version = get_version(library_handle)
print(version)
'''
args = [sys.executable, '-c', textwrap.dedent(version_checker_source)]
process = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = process.communicate(filename)[0]
out = out.strip()
if out:
return eval(out)
else:
return None
if sys.platform == 'win32':
# only try glfw3.dll on windows
try:
_glfw = ctypes.CDLL('glfw3.dll')
except OSError:
_glfw = None
else:
_glfw = _load_library(['glfw', 'glfw3'], ['.so', '.dylib'],
['',
'/usr/lib64', '/usr/local/lib64',
'/usr/lib', '/usr/local/lib',
'/usr/lib/x86_64-linux-gnu/'], _glfw_get_version)
if _glfw is None:
raise ImportError("Failed to load GLFW3 shared library.")
_callback_repositories = []
class _GLFWwindow(ctypes.Structure):
"""
Wrapper for:
typedef struct GLFWwindow GLFWwindow;
"""
_fields_ = [("dummy", ctypes.c_int)]
class _GLFWmonitor(ctypes.Structure):
"""
Wrapper for:
typedef struct GLFWmonitor GLFWmonitor;
"""
_fields_ = [("dummy", ctypes.c_int)]
class _GLFWvidmode(ctypes.Structure):
"""
Wrapper for:
typedef struct GLFWvidmode GLFWvidmode;
"""
_fields_ = [("width", ctypes.c_int),
("height", ctypes.c_int),
("red_bits", ctypes.c_int),
("green_bits", ctypes.c_int),
("blue_bits", ctypes.c_int),
("refresh_rate", ctypes.c_uint)]
def __init__(self):
ctypes.Structure.__init__(self)
self.width = 0
self.height = 0
self.red_bits = 0
self.green_bits = 0
self.blue_bits = 0
self.refresh_rate = 0
def wrap(self, video_mode):
"""
Wraps a nested python sequence.
"""
size, bits, self.refresh_rate = video_mode
self.width, self.height = size
self.red_bits, self.green_bits, self.blue_bits = bits
def unwrap(self):
"""
Returns a nested python sequence.
"""
size = self.width, self.height
bits = self.red_bits, self.green_bits, self.blue_bits
return size, bits, self.refresh_rate
class _GLFWgammaramp(ctypes.Structure):
"""
Wrapper for:
typedef struct GLFWgammaramp GLFWgammaramp;
"""
_fields_ = [("red", ctypes.POINTER(ctypes.c_ushort)),
("green", ctypes.POINTER(ctypes.c_ushort)),
("blue", ctypes.POINTER(ctypes.c_ushort)),
("size", ctypes.c_uint)]
def __init__(self):
ctypes.Structure.__init__(self)
self.red = None
self.red_array = None
self.green = None
self.green_array = None
self.blue = None
self.blue_array = None
self.size = 0
def wrap(self, gammaramp):
"""
Wraps a nested python sequence.
"""
red, green, blue = gammaramp
size = min(len(red), len(green), len(blue))
array_type = ctypes.c_ushort*size
self.size = ctypes.c_uint(size)
self.red_array = array_type()
self.green_array = array_type()
self.blue_array = array_type()
for i in range(self.size):
self.red_array[i] = int(red[i]*65535)
self.green_array[i] = int(green[i]*65535)
self.blue_array[i] = int(blue[i]*65535)
pointer_type = ctypes.POINTER(ctypes.c_ushort)
self.red = ctypes.cast(self.red_array, pointer_type)
self.green = ctypes.cast(self.green_array, pointer_type)
self.blue = ctypes.cast(self.blue_array, pointer_type)
def unwrap(self):
"""
Returns a nested python sequence.
"""
red = [self.red[i]/65535.0 for i in range(self.size)]
green = [self.green[i]/65535.0 for i in range(self.size)]
blue = [self.blue[i]/65535.0 for i in range(self.size)]
return red, green, blue
class _GLFWcursor(ctypes.Structure):
"""
Wrapper for:
typedef struct GLFWcursor GLFWcursor;
"""
_fields_ = [("dummy", ctypes.c_int)]
class _GLFWimage(ctypes.Structure):
"""
Wrapper for:
typedef struct GLFWimage GLFWimage;
"""
_fields_ = [("width", ctypes.c_int),
("height", ctypes.c_int),
("pixels", ctypes.POINTER(ctypes.c_ubyte))]
def __init__(self):
ctypes.Structure.__init__(self)
self.width = 0
self.height = 0
self.pixels = None
self.pixels_array = None
def wrap(self, image):
"""
Wraps a nested python sequence.
"""
self.width, self.height, pixels = image
array_type = ctypes.c_ubyte * 4 * self.width * self.height
self.pixels_array = array_type()
for i in range(self.height):
for j in range(self.width):
for k in range(4):
self.pixels_array[i][j][k] = pixels[i][j][k]
pointer_type = ctypes.POINTER(ctypes.c_ubyte)
self.pixels = ctypes.cast(self.pixels_array, pointer_type)
def unwrap(self):
"""
Returns a nested python sequence.
"""
pixels = [[[int(c) for c in p] for p in l] for l in self.pixels_array]
return self.width, self.height, pixels
VERSION_MAJOR = 3
VERSION_MINOR = 2
VERSION_REVISION = 1
RELEASE = 0
PRESS = 1
REPEAT = 2
KEY_UNKNOWN = -1
KEY_SPACE = 32
KEY_APOSTROPHE = 39
KEY_COMMA = 44
KEY_MINUS = 45
KEY_PERIOD = 46
KEY_SLASH = 47
KEY_0 = 48
KEY_1 = 49
KEY_2 = 50
KEY_3 = 51
KEY_4 = 52
KEY_5 = 53
KEY_6 = 54
KEY_7 = 55
KEY_8 = 56
KEY_9 = 57
KEY_SEMICOLON = 59
KEY_EQUAL = 61
KEY_A = 65
KEY_B = 66
KEY_C = 67
KEY_D = 68
KEY_E = 69
KEY_F = 70
KEY_G = 71
KEY_H = 72
KEY_I = 73
KEY_J = 74
KEY_K = 75
KEY_L = 76
KEY_M = 77
KEY_N = 78
KEY_O = 79
KEY_P = 80
KEY_Q = 81
KEY_R = 82
KEY_S = 83
KEY_T = 84
KEY_U = 85
KEY_V = 86
KEY_W = 87
KEY_X = 88
KEY_Y = 89
KEY_Z = 90
KEY_LEFT_BRACKET = 91
KEY_BACKSLASH = 92
KEY_RIGHT_BRACKET = 93
KEY_GRAVE_ACCENT = 96
KEY_WORLD_1 = 161
KEY_WORLD_2 = 162
KEY_ESCAPE = 256
KEY_ENTER = 257
KEY_TAB = 258
KEY_BACKSPACE = 259
KEY_INSERT = 260
KEY_DELETE = 261
KEY_RIGHT = 262
KEY_LEFT = 263
KEY_DOWN = 264
KEY_UP = 265
KEY_PAGE_UP = 266
KEY_PAGE_DOWN = 267
KEY_HOME = 268
KEY_END = 269
KEY_CAPS_LOCK = 280
KEY_SCROLL_LOCK = 281
KEY_NUM_LOCK = 282
KEY_PRINT_SCREEN = 283
KEY_PAUSE = 284
KEY_F1 = 290
KEY_F2 = 291
KEY_F3 = 292
KEY_F4 = 293
KEY_F5 = 294
KEY_F6 = 295
KEY_F7 = 296
KEY_F8 = 297
KEY_F9 = 298
KEY_F10 = 299
KEY_F11 = 300
KEY_F12 = 301
KEY_F13 = 302
KEY_F14 = 303
KEY_F15 = 304
KEY_F16 = 305
KEY_F17 = 306
KEY_F18 = 307
KEY_F19 = 308
KEY_F20 = 309
KEY_F21 = 310
KEY_F22 = 311
KEY_F23 = 312
KEY_F24 = 313
KEY_F25 = 314
KEY_KP_0 = 320
KEY_KP_1 = 321
KEY_KP_2 = 322
KEY_KP_3 = 323
KEY_KP_4 = 324
KEY_KP_5 = 325
KEY_KP_6 = 326
KEY_KP_7 = 327
KEY_KP_8 = 328
KEY_KP_9 = 329
KEY_KP_DECIMAL = 330
KEY_KP_DIVIDE = 331
KEY_KP_MULTIPLY = 332
KEY_KP_SUBTRACT = 333
KEY_KP_ADD = 334
KEY_KP_ENTER = 335
KEY_KP_EQUAL = 336
KEY_LEFT_SHIFT = 340
KEY_LEFT_CONTROL = 341
KEY_LEFT_ALT = 342
KEY_LEFT_SUPER = 343
KEY_RIGHT_SHIFT = 344
KEY_RIGHT_CONTROL = 345
KEY_RIGHT_ALT = 346
KEY_RIGHT_SUPER = 347
KEY_MENU = 348
KEY_LAST = KEY_MENU
MOD_SHIFT = 0x0001
MOD_CONTROL = 0x0002
MOD_ALT = 0x0004
MOD_SUPER = 0x0008
MOUSE_BUTTON_1 = 0
MOUSE_BUTTON_2 = 1
MOUSE_BUTTON_3 = 2
MOUSE_BUTTON_4 = 3
MOUSE_BUTTON_5 = 4
MOUSE_BUTTON_6 = 5
MOUSE_BUTTON_7 = 6
MOUSE_BUTTON_8 = 7
MOUSE_BUTTON_LAST = MOUSE_BUTTON_8
MOUSE_BUTTON_LEFT = MOUSE_BUTTON_1
MOUSE_BUTTON_RIGHT = MOUSE_BUTTON_2
MOUSE_BUTTON_MIDDLE = MOUSE_BUTTON_3
JOYSTICK_1 = 0
JOYSTICK_2 = 1
JOYSTICK_3 = 2
JOYSTICK_4 = 3
JOYSTICK_5 = 4
JOYSTICK_6 = 5
JOYSTICK_7 = 6
JOYSTICK_8 = 7
JOYSTICK_9 = 8
JOYSTICK_10 = 9
JOYSTICK_11 = 10
JOYSTICK_12 = 11
JOYSTICK_13 = 12
JOYSTICK_14 = 13
JOYSTICK_15 = 14
JOYSTICK_16 = 15
JOYSTICK_LAST = JOYSTICK_16
NOT_INITIALIZED = 0x00010001
NO_CURRENT_CONTEXT = 0x00010002
INVALID_ENUM = 0x00010003
INVALID_VALUE = 0x00010004
OUT_OF_MEMORY = 0x00010005
API_UNAVAILABLE = 0x00010006
VERSION_UNAVAILABLE = 0x00010007
PLATFORM_ERROR = 0x00010008
FORMAT_UNAVAILABLE = 0x00010009
NO_WINDOW_CONTEXT = 0x0001000A
FOCUSED = 0x00020001
ICONIFIED = 0x00020002
RESIZABLE = 0x00020003
VISIBLE = 0x00020004
DECORATED = 0x00020005
AUTO_ICONIFY = 0x00020006
FLOATING = 0x00020007
MAXIMIZED = 0x00020008
RED_BITS = 0x00021001
GREEN_BITS = 0x00021002
BLUE_BITS = 0x00021003
ALPHA_BITS = 0x00021004
DEPTH_BITS = 0x00021005
STENCIL_BITS = 0x00021006
ACCUM_RED_BITS = 0x00021007
ACCUM_GREEN_BITS = 0x00021008
ACCUM_BLUE_BITS = 0x00021009
ACCUM_ALPHA_BITS = 0x0002100A
AUX_BUFFERS = 0x0002100B
STEREO = 0x0002100C
SAMPLES = 0x0002100D
SRGB_CAPABLE = 0x0002100E
REFRESH_RATE = 0x0002100F
DOUBLEBUFFER = 0x00021010
CLIENT_API = 0x00022001
CONTEXT_VERSION_MAJOR = 0x00022002
CONTEXT_VERSION_MINOR = 0x00022003
CONTEXT_REVISION = 0x00022004
CONTEXT_ROBUSTNESS = 0x00022005
OPENGL_FORWARD_COMPAT = 0x00022006
OPENGL_DEBUG_CONTEXT = 0x00022007
OPENGL_PROFILE = 0x00022008
CONTEXT_RELEASE_BEHAVIOR = 0x00022009
CONTEXT_NO_ERROR = 0x0002200A
CONTEXT_CREATION_API = 0x0002200B
NO_API = 0
OPENGL_API = 0x00030001
OPENGL_ES_API = 0x00030002
NO_ROBUSTNESS = 0
NO_RESET_NOTIFICATION = 0x00031001
LOSE_CONTEXT_ON_RESET = 0x00031002
OPENGL_ANY_PROFILE = 0
OPENGL_CORE_PROFILE = 0x00032001
OPENGL_COMPAT_PROFILE = 0x00032002
CURSOR = 0x00033001
STICKY_KEYS = 0x00033002
STICKY_MOUSE_BUTTONS = 0x00033003
CURSOR_NORMAL = 0x00034001
CURSOR_HIDDEN = 0x00034002
CURSOR_DISABLED = 0x00034003
ANY_RELEASE_BEHAVIOR = 0
RELEASE_BEHAVIOR_FLUSH = 0x00035001
RELEASE_BEHAVIOR_NONE = 0x00035002
NATIVE_CONTEXT_API = 0x00036001
EGL_CONTEXT_API = 0x00036002
ARROW_CURSOR = 0x00036001
IBEAM_CURSOR = 0x00036002
CROSSHAIR_CURSOR = 0x00036003
HAND_CURSOR = 0x00036004
HRESIZE_CURSOR = 0x00036005
VRESIZE_CURSOR = 0x00036006
CONNECTED = 0x00040001
DISCONNECTED = 0x00040002
DONT_CARE = -1
_exc_info_from_callback = None
def _callback_exception_decorator(func):
@functools.wraps(func)
def callback_wrapper(*args, **kwargs):
global _exc_info_from_callback
if _exc_info_from_callback is not None:
# We are on the way back to Python after an exception was raised.
# Do not call further callbacks and wait for the errcheck function
# to handle the exception first.
return
try:
return func(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
_exc_info_from_callback = sys.exc_info()
return callback_wrapper
def _prepare_errcheck():
"""
This function sets the errcheck attribute of all ctypes wrapped functions
to evaluate the _exc_info_from_callback global variable and re-raise any
exceptions that might have been raised in callbacks.
It also modifies all callback types to automatically wrap the function
using the _callback_exception_decorator.
"""
def errcheck(result, *args):
global _exc_info_from_callback
if _exc_info_from_callback is not None:
exc = _exc_info_from_callback
_exc_info_from_callback = None
_reraise(exc[1], exc[2])
return result
for symbol in dir(_glfw):
if symbol.startswith('glfw'):
getattr(_glfw, symbol).errcheck = errcheck
_globals = globals()
for symbol in _globals:
if symbol.startswith('_GLFW') and symbol.endswith('fun'):
def wrapper_cfunctype(func, cfunctype=_globals[symbol]):
return cfunctype(_callback_exception_decorator(func))
_globals[symbol] = wrapper_cfunctype
_GLFWerrorfun = ctypes.CFUNCTYPE(None,
ctypes.c_int,
ctypes.c_char_p)
_GLFWwindowposfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int)
_GLFWwindowsizefun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int)
_GLFWwindowclosefun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow))
_GLFWwindowrefreshfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow))
_GLFWwindowfocusfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int)
_GLFWwindowiconifyfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int)
_GLFWframebuffersizefun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int)
_GLFWmousebuttonfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int)
_GLFWcursorposfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_double,
ctypes.c_double)
_GLFWcursorenterfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int)
_GLFWscrollfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_double,
ctypes.c_double)
_GLFWkeyfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int)
_GLFWcharfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int)
_GLFWmonitorfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWmonitor),
ctypes.c_int)
_GLFWdropfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.POINTER(ctypes.c_char_p))
_GLFWcharmodsfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_uint,
ctypes.c_int)
_GLFWjoystickfun = ctypes.CFUNCTYPE(None,
ctypes.c_int,
ctypes.c_int)
_glfw.glfwInit.restype = ctypes.c_int
_glfw.glfwInit.argtypes = []
def init():
"""
Initializes the GLFW library.
Wrapper for:
int glfwInit(void);
"""
cwd = _getcwd()
res = _glfw.glfwInit()
os.chdir(cwd)
return res
_glfw.glfwTerminate.restype = None
_glfw.glfwTerminate.argtypes = []
def terminate():
"""
Terminates the GLFW library.
Wrapper for:
void glfwTerminate(void);
"""
for callback_repository in _callback_repositories:
for window_addr in list(callback_repository.keys()):
del callback_repository[window_addr]
for window_addr in list(_window_user_data_repository.keys()):
del _window_user_data_repository[window_addr]
_glfw.glfwTerminate()
_glfw.glfwGetVersion.restype = None
_glfw.glfwGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_version():
"""
Retrieves the version of the GLFW library.
Wrapper for:
void glfwGetVersion(int* major, int* minor, int* rev);
"""
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
_glfw.glfwGetVersion(major, minor, rev)
return major_value.value, minor_value.value, rev_value.value
_glfw.glfwGetVersionString.restype = ctypes.c_char_p
_glfw.glfwGetVersionString.argtypes = []
def get_version_string():
"""
Returns a string describing the compile-time configuration.
Wrapper for:
const char* glfwGetVersionString(void);
"""
return _glfw.glfwGetVersionString()
@_callback_exception_decorator
def _raise_glfw_errors_as_exceptions(error_code, description):
"""
Default error callback that raises GLFWError exceptions for glfw errors.
Set an alternative error callback or set glfw.ERROR_REPORTING to False to
disable this behavior.
"""
global ERROR_REPORTING
if ERROR_REPORTING:
message = "(%d) %s" % (error_code, description)
raise GLFWError(message)
_default_error_callback = _GLFWerrorfun(_raise_glfw_errors_as_exceptions)
_error_callback = (_raise_glfw_errors_as_exceptions, _default_error_callback)
_glfw.glfwSetErrorCallback.restype = _GLFWerrorfun
_glfw.glfwSetErrorCallback.argtypes = [_GLFWerrorfun]
_glfw.glfwSetErrorCallback(_default_error_callback)
def set_error_callback(cbfun):
"""
Sets the error callback.
Wrapper for:
GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun cbfun);
"""
global _error_callback
previous_callback = _error_callback
if cbfun is None:
cbfun = _raise_glfw_errors_as_exceptions
c_cbfun = _default_error_callback
else:
c_cbfun = _GLFWerrorfun(cbfun)
_error_callback = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetErrorCallback(cbfun)
if previous_callback is not None and previous_callback[0] != _raise_glfw_errors_as_exceptions:
return previous_callback[0]
_glfw.glfwGetMonitors.restype = ctypes.POINTER(ctypes.POINTER(_GLFWmonitor))
_glfw.glfwGetMonitors.argtypes = [ctypes.POINTER(ctypes.c_int)]
def get_monitors():
"""
Returns the currently connected monitors.
Wrapper for:
GLFWmonitor** glfwGetMonitors(int* count);
"""
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetMonitors(count)
monitors = [result[i] for i in range(count_value.value)]
return monitors
_glfw.glfwGetPrimaryMonitor.restype = ctypes.POINTER(_GLFWmonitor)
_glfw.glfwGetPrimaryMonitor.argtypes = []
def get_primary_monitor():
"""
Returns the primary monitor.
Wrapper for:
GLFWmonitor* glfwGetPrimaryMonitor(void);
"""
return _glfw.glfwGetPrimaryMonitor()
_glfw.glfwGetMonitorPos.restype = None
_glfw.glfwGetMonitorPos.argtypes = [ctypes.POINTER(_GLFWmonitor),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_monitor_pos(monitor):
"""
Returns the position of the monitor's viewport on the virtual screen.
Wrapper for:
void glfwGetMonitorPos(GLFWmonitor* monitor, int* xpos, int* ypos);
"""
xpos_value = ctypes.c_int(0)
xpos = ctypes.pointer(xpos_value)
ypos_value = ctypes.c_int(0)
ypos = ctypes.pointer(ypos_value)
_glfw.glfwGetMonitorPos(monitor, xpos, ypos)
return xpos_value.value, ypos_value.value
_glfw.glfwGetMonitorPhysicalSize.restype = None
_glfw.glfwGetMonitorPhysicalSize.argtypes = [ctypes.POINTER(_GLFWmonitor),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_monitor_physical_size(monitor):
"""
Returns the physical size of the monitor.
Wrapper for:
void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* width, int* height);
"""
width_value = ctypes.c_int(0)
width = ctypes.pointer(width_value)
height_value = ctypes.c_int(0)
height = ctypes.pointer(height_value)
_glfw.glfwGetMonitorPhysicalSize(monitor, width, height)
return width_value.value, height_value.value
_glfw.glfwGetMonitorName.restype = ctypes.c_char_p
_glfw.glfwGetMonitorName.argtypes = [ctypes.POINTER(_GLFWmonitor)]
def get_monitor_name(monitor):
"""
Returns the name of the specified monitor.
Wrapper for:
const char* glfwGetMonitorName(GLFWmonitor* monitor);
"""
return _glfw.glfwGetMonitorName(monitor)
_monitor_callback = None
_glfw.glfwSetMonitorCallback.restype = _GLFWmonitorfun
_glfw.glfwSetMonitorCallback.argtypes = [_GLFWmonitorfun]
def set_monitor_callback(cbfun):
"""
Sets the monitor configuration callback.
Wrapper for:
GLFWmonitorfun glfwSetMonitorCallback(GLFWmonitorfun cbfun);
"""
global _monitor_callback
previous_callback = _monitor_callback
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWmonitorfun(cbfun)
_monitor_callback = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetMonitorCallback(cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_glfw.glfwGetVideoModes.restype = ctypes.POINTER(_GLFWvidmode)
_glfw.glfwGetVideoModes.argtypes = [ctypes.POINTER(_GLFWmonitor),
ctypes.POINTER(ctypes.c_int)]
def get_video_modes(monitor):
"""
Returns the available video modes for the specified monitor.
Wrapper for:
const GLFWvidmode* glfwGetVideoModes(GLFWmonitor* monitor, int* count);
"""
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetVideoModes(monitor, count)
videomodes = [result[i].unwrap() for i in range(count_value.value)]
return videomodes
_glfw.glfwGetVideoMode.restype = ctypes.POINTER(_GLFWvidmode)
_glfw.glfwGetVideoMode.argtypes = [ctypes.POINTER(_GLFWmonitor)]
def get_video_mode(monitor):
"""
Returns the current mode of the specified monitor.
Wrapper for:
const GLFWvidmode* glfwGetVideoMode(GLFWmonitor* monitor);
"""
videomode = _glfw.glfwGetVideoMode(monitor).contents
return videomode.unwrap()
_glfw.glfwSetGamma.restype = None
_glfw.glfwSetGamma.argtypes = [ctypes.POINTER(_GLFWmonitor),
ctypes.c_float]
def set_gamma(monitor, gamma):
"""
Generates a gamma ramp and sets it for the specified monitor.
Wrapper for:
void glfwSetGamma(GLFWmonitor* monitor, float gamma);
"""
_glfw.glfwSetGamma(monitor, gamma)
_glfw.glfwGetGammaRamp.restype = ctypes.POINTER(_GLFWgammaramp)
_glfw.glfwGetGammaRamp.argtypes = [ctypes.POINTER(_GLFWmonitor)]
def get_gamma_ramp(monitor):
"""
Retrieves the current gamma ramp for the specified monitor.
Wrapper for:
const GLFWgammaramp* glfwGetGammaRamp(GLFWmonitor* monitor);
"""
gammaramp = _glfw.glfwGetGammaRamp(monitor).contents
return gammaramp.unwrap()
_glfw.glfwSetGammaRamp.restype = None
_glfw.glfwSetGammaRamp.argtypes = [ctypes.POINTER(_GLFWmonitor),
ctypes.POINTER(_GLFWgammaramp)]
def set_gamma_ramp(monitor, ramp):
"""
Sets the current gamma ramp for the specified monitor.
Wrapper for:
void glfwSetGammaRamp(GLFWmonitor* monitor, const GLFWgammaramp* ramp);
"""
gammaramp = _GLFWgammaramp()
gammaramp.wrap(ramp)
_glfw.glfwSetGammaRamp(monitor, ctypes.pointer(gammaramp))
_glfw.glfwDefaultWindowHints.restype = None
_glfw.glfwDefaultWindowHints.argtypes = []
def default_window_hints():
"""
Resets all window hints to their default values.
Wrapper for:
void glfwDefaultWindowHints(void);
"""
_glfw.glfwDefaultWindowHints()
_glfw.glfwWindowHint.restype = None
_glfw.glfwWindowHint.argtypes = [ctypes.c_int,
ctypes.c_int]
def window_hint(target, hint):
"""
Sets the specified window hint to the desired value.
Wrapper for:
void glfwWindowHint(int target, int hint);
"""
_glfw.glfwWindowHint(target, hint)
_glfw.glfwCreateWindow.restype = ctypes.POINTER(_GLFWwindow)
_glfw.glfwCreateWindow.argtypes = [ctypes.c_int,
ctypes.c_int,
ctypes.c_char_p,
ctypes.POINTER(_GLFWmonitor),
ctypes.POINTER(_GLFWwindow)]
def create_window(width, height, title, monitor, share):
"""
Creates a window and its associated context.
Wrapper for:
GLFWwindow* glfwCreateWindow(int width, int height, const char* title, GLFWmonitor* monitor, GLFWwindow* share);
"""
return _glfw.glfwCreateWindow(width, height, _to_char_p(title),
monitor, share)
_glfw.glfwDestroyWindow.restype = None
_glfw.glfwDestroyWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def destroy_window(window):
"""
Destroys the specified window and its context.
Wrapper for:
void glfwDestroyWindow(GLFWwindow* window);
"""
_glfw.glfwDestroyWindow(window)
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_ulong)).contents.value
for callback_repository in _callback_repositories:
if window_addr in callback_repository:
del callback_repository[window_addr]
if window_addr in _window_user_data_repository:
del _window_user_data_repository[window_addr]
_glfw.glfwWindowShouldClose.restype = ctypes.c_int
_glfw.glfwWindowShouldClose.argtypes = [ctypes.POINTER(_GLFWwindow)]
def window_should_close(window):
"""
Checks the close flag of the specified window.
Wrapper for:
int glfwWindowShouldClose(GLFWwindow* window);
"""
return _glfw.glfwWindowShouldClose(window)
_glfw.glfwSetWindowShouldClose.restype = None
_glfw.glfwSetWindowShouldClose.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int]
def set_window_should_close(window, value):
"""
Sets the close flag of the specified window.
Wrapper for:
void glfwSetWindowShouldClose(GLFWwindow* window, int value);
"""
_glfw.glfwSetWindowShouldClose(window, value)
_glfw.glfwSetWindowTitle.restype = None
_glfw.glfwSetWindowTitle.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_char_p]
def set_window_title(window, title):
"""
Sets the title of the specified window.
Wrapper for:
void glfwSetWindowTitle(GLFWwindow* window, const char* title);
"""
_glfw.glfwSetWindowTitle(window, _to_char_p(title))
_glfw.glfwGetWindowPos.restype = None
_glfw.glfwGetWindowPos.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_window_pos(window):
"""
Retrieves the position of the client area of the specified window.
Wrapper for:
void glfwGetWindowPos(GLFWwindow* window, int* xpos, int* ypos);
"""
xpos_value = ctypes.c_int(0)
xpos = ctypes.pointer(xpos_value)
ypos_value = ctypes.c_int(0)
ypos = ctypes.pointer(ypos_value)
_glfw.glfwGetWindowPos(window, xpos, ypos)
return xpos_value.value, ypos_value.value
_glfw.glfwSetWindowPos.restype = None
_glfw.glfwSetWindowPos.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int]
def set_window_pos(window, xpos, ypos):
"""
Sets the position of the client area of the specified window.
Wrapper for:
void glfwSetWindowPos(GLFWwindow* window, int xpos, int ypos);
"""
_glfw.glfwSetWindowPos(window, xpos, ypos)
_glfw.glfwGetWindowSize.restype = None
_glfw.glfwGetWindowSize.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_window_size(window):
"""
Retrieves the size of the client area of the specified window.
Wrapper for:
void glfwGetWindowSize(GLFWwindow* window, int* width, int* height);
"""
width_value = ctypes.c_int(0)
width = ctypes.pointer(width_value)
height_value = ctypes.c_int(0)
height = ctypes.pointer(height_value)
_glfw.glfwGetWindowSize(window, width, height)
return width_value.value, height_value.value
_glfw.glfwSetWindowSize.restype = None
_glfw.glfwSetWindowSize.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int]
def set_window_size(window, width, height):
"""
Sets the size of the client area of the specified window.
Wrapper for:
void glfwSetWindowSize(GLFWwindow* window, int width, int height);
"""
_glfw.glfwSetWindowSize(window, width, height)
_glfw.glfwGetFramebufferSize.restype = None
_glfw.glfwGetFramebufferSize.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_framebuffer_size(window):
"""
Retrieves the size of the framebuffer of the specified window.
Wrapper for:
void glfwGetFramebufferSize(GLFWwindow* window, int* width, int* height);
"""
width_value = ctypes.c_int(0)
width = ctypes.pointer(width_value)
height_value = ctypes.c_int(0)
height = ctypes.pointer(height_value)
_glfw.glfwGetFramebufferSize(window, width, height)
return width_value.value, height_value.value
_glfw.glfwIconifyWindow.restype = None
_glfw.glfwIconifyWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def iconify_window(window):
"""
Iconifies the specified window.
Wrapper for:
void glfwIconifyWindow(GLFWwindow* window);
"""
_glfw.glfwIconifyWindow(window)
_glfw.glfwRestoreWindow.restype = None
_glfw.glfwRestoreWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def restore_window(window):
"""
Restores the specified window.
Wrapper for:
void glfwRestoreWindow(GLFWwindow* window);
"""
_glfw.glfwRestoreWindow(window)
_glfw.glfwShowWindow.restype = None
_glfw.glfwShowWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def show_window(window):
"""
Makes the specified window visible.
Wrapper for:
void glfwShowWindow(GLFWwindow* window);
"""
_glfw.glfwShowWindow(window)
_glfw.glfwHideWindow.restype = None
_glfw.glfwHideWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def hide_window(window):
"""
Hides the specified window.
Wrapper for:
void glfwHideWindow(GLFWwindow* window);
"""
_glfw.glfwHideWindow(window)
_glfw.glfwGetWindowMonitor.restype = ctypes.POINTER(_GLFWmonitor)
_glfw.glfwGetWindowMonitor.argtypes = [ctypes.POINTER(_GLFWwindow)]
def get_window_monitor(window):
"""
Returns the monitor that the window uses for full screen mode.
Wrapper for:
GLFWmonitor* glfwGetWindowMonitor(GLFWwindow* window);
"""
return _glfw.glfwGetWindowMonitor(window)
_glfw.glfwGetWindowAttrib.restype = ctypes.c_int
_glfw.glfwGetWindowAttrib.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int]
def get_window_attrib(window, attrib):
"""
Returns an attribute of the specified window.
Wrapper for:
int glfwGetWindowAttrib(GLFWwindow* window, int attrib);
"""
return _glfw.glfwGetWindowAttrib(window, attrib)
_window_user_data_repository = {}
_glfw.glfwSetWindowUserPointer.restype = None
_glfw.glfwSetWindowUserPointer.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_void_p]
def set_window_user_pointer(window, pointer):
"""
Sets the user pointer of the specified window. You may pass a normal python object into this function and it will
be wrapped automatically. The object will be kept in existence until the pointer is set to something else or
until the window is destroyed.
Wrapper for:
void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer);
"""
data = (False, pointer)
if not isinstance(pointer, ctypes.c_void_p):
data = (True, pointer)
# Create a void pointer for the python object
pointer = ctypes.cast(ctypes.pointer(ctypes.py_object(pointer)), ctypes.c_void_p)
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
_window_user_data_repository[window_addr] = data
_glfw.glfwSetWindowUserPointer(window, pointer)
_glfw.glfwGetWindowUserPointer.restype = ctypes.c_void_p
_glfw.glfwGetWindowUserPointer.argtypes = [ctypes.POINTER(_GLFWwindow)]
def get_window_user_pointer(window):
"""
Returns the user pointer of the specified window.
Wrapper for:
void* glfwGetWindowUserPointer(GLFWwindow* window);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_user_data_repository:
data = _window_user_data_repository[window_addr]
is_wrapped_py_object = data[0]
if is_wrapped_py_object:
return data[1]
return _glfw.glfwGetWindowUserPointer(window)
_window_pos_callback_repository = {}
_callback_repositories.append(_window_pos_callback_repository)
_glfw.glfwSetWindowPosCallback.restype = _GLFWwindowposfun
_glfw.glfwSetWindowPosCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowposfun]
def set_window_pos_callback(window, cbfun):
"""
Sets the position callback for the specified window.
Wrapper for:
GLFWwindowposfun glfwSetWindowPosCallback(GLFWwindow* window, GLFWwindowposfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_pos_callback_repository:
previous_callback = _window_pos_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowposfun(cbfun)
_window_pos_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowPosCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_window_size_callback_repository = {}
_callback_repositories.append(_window_size_callback_repository)
_glfw.glfwSetWindowSizeCallback.restype = _GLFWwindowsizefun
_glfw.glfwSetWindowSizeCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowsizefun]
def set_window_size_callback(window, cbfun):
"""
Sets the size callback for the specified window.
Wrapper for:
GLFWwindowsizefun glfwSetWindowSizeCallback(GLFWwindow* window, GLFWwindowsizefun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_size_callback_repository:
previous_callback = _window_size_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowsizefun(cbfun)
_window_size_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowSizeCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_window_close_callback_repository = {}
_callback_repositories.append(_window_close_callback_repository)
_glfw.glfwSetWindowCloseCallback.restype = _GLFWwindowclosefun
_glfw.glfwSetWindowCloseCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowclosefun]
def set_window_close_callback(window, cbfun):
"""
Sets the close callback for the specified window.
Wrapper for:
GLFWwindowclosefun glfwSetWindowCloseCallback(GLFWwindow* window, GLFWwindowclosefun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_close_callback_repository:
previous_callback = _window_close_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowclosefun(cbfun)
_window_close_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowCloseCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_window_refresh_callback_repository = {}
_callback_repositories.append(_window_refresh_callback_repository)
_glfw.glfwSetWindowRefreshCallback.restype = _GLFWwindowrefreshfun
_glfw.glfwSetWindowRefreshCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowrefreshfun]
def set_window_refresh_callback(window, cbfun):
"""
Sets the refresh callback for the specified window.
Wrapper for:
GLFWwindowrefreshfun glfwSetWindowRefreshCallback(GLFWwindow* window, GLFWwindowrefreshfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_refresh_callback_repository:
previous_callback = _window_refresh_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowrefreshfun(cbfun)
_window_refresh_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowRefreshCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_window_focus_callback_repository = {}
_callback_repositories.append(_window_focus_callback_repository)
_glfw.glfwSetWindowFocusCallback.restype = _GLFWwindowfocusfun
_glfw.glfwSetWindowFocusCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowfocusfun]
def set_window_focus_callback(window, cbfun):
"""
Sets the focus callback for the specified window.
Wrapper for:
GLFWwindowfocusfun glfwSetWindowFocusCallback(GLFWwindow* window, GLFWwindowfocusfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_focus_callback_repository:
previous_callback = _window_focus_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowfocusfun(cbfun)
_window_focus_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowFocusCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_window_iconify_callback_repository = {}
_callback_repositories.append(_window_iconify_callback_repository)
_glfw.glfwSetWindowIconifyCallback.restype = _GLFWwindowiconifyfun
_glfw.glfwSetWindowIconifyCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowiconifyfun]
def set_window_iconify_callback(window, cbfun):
"""
Sets the iconify callback for the specified window.
Wrapper for:
GLFWwindowiconifyfun glfwSetWindowIconifyCallback(GLFWwindow* window, GLFWwindowiconifyfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_iconify_callback_repository:
previous_callback = _window_iconify_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowiconifyfun(cbfun)
_window_iconify_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowIconifyCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_framebuffer_size_callback_repository = {}
_callback_repositories.append(_framebuffer_size_callback_repository)
_glfw.glfwSetFramebufferSizeCallback.restype = _GLFWframebuffersizefun
_glfw.glfwSetFramebufferSizeCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWframebuffersizefun]
def set_framebuffer_size_callback(window, cbfun):
"""
Sets the framebuffer resize callback for the specified window.
Wrapper for:
GLFWframebuffersizefun glfwSetFramebufferSizeCallback(GLFWwindow* window, GLFWframebuffersizefun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _framebuffer_size_callback_repository:
previous_callback = _framebuffer_size_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWframebuffersizefun(cbfun)
_framebuffer_size_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetFramebufferSizeCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_glfw.glfwPollEvents.restype = None
_glfw.glfwPollEvents.argtypes = []
def poll_events():
"""
Processes all pending events.
Wrapper for:
void glfwPollEvents(void);
"""
_glfw.glfwPollEvents()
_glfw.glfwWaitEvents.restype = None
_glfw.glfwWaitEvents.argtypes = []
def wait_events():
"""
Waits until events are pending and processes them.
Wrapper for:
void glfwWaitEvents(void);
"""
_glfw.glfwWaitEvents()
_glfw.glfwGetInputMode.restype = ctypes.c_int
_glfw.glfwGetInputMode.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int]
def get_input_mode(window, mode):
"""
Returns the value of an input option for the specified window.
Wrapper for:
int glfwGetInputMode(GLFWwindow* window, int mode);
"""
return _glfw.glfwGetInputMode(window, mode)
_glfw.glfwSetInputMode.restype = None
_glfw.glfwSetInputMode.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int]
def set_input_mode(window, mode, value):
"""
Sets an input option for the specified window.
@param[in] window The window whose input mode to set.
@param[in] mode One of `GLFW_CURSOR`, `GLFW_STICKY_KEYS` or
`GLFW_STICKY_MOUSE_BUTTONS`.
@param[in] value The new value of the specified input mode.
Wrapper for:
void glfwSetInputMode(GLFWwindow* window, int mode, int value);
"""
_glfw.glfwSetInputMode(window, mode, value)
_glfw.glfwGetKey.restype = ctypes.c_int
_glfw.glfwGetKey.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int]
def get_key(window, key):
"""
Returns the last reported state of a keyboard key for the specified
window.
Wrapper for:
int glfwGetKey(GLFWwindow* window, int key);
"""
return _glfw.glfwGetKey(window, key)
_glfw.glfwGetMouseButton.restype = ctypes.c_int
_glfw.glfwGetMouseButton.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int]
def get_mouse_button(window, button):
"""
Returns the last reported state of a mouse button for the specified
window.
Wrapper for:
int glfwGetMouseButton(GLFWwindow* window, int button);
"""
return _glfw.glfwGetMouseButton(window, button)
_glfw.glfwGetCursorPos.restype = None
_glfw.glfwGetCursorPos.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double)]
def get_cursor_pos(window):
"""
Retrieves the last reported cursor position, relative to the client
area of the window.
Wrapper for:
void glfwGetCursorPos(GLFWwindow* window, double* xpos, double* ypos);
"""
xpos_value = ctypes.c_double(0.0)
xpos = ctypes.pointer(xpos_value)
ypos_value = ctypes.c_double(0.0)
ypos = ctypes.pointer(ypos_value)
_glfw.glfwGetCursorPos(window, xpos, ypos)
return xpos_value.value, ypos_value.value
_glfw.glfwSetCursorPos.restype = None
_glfw.glfwSetCursorPos.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_double,
ctypes.c_double]
def set_cursor_pos(window, xpos, ypos):
"""
Sets the position of the cursor, relative to the client area of the window.
Wrapper for:
void glfwSetCursorPos(GLFWwindow* window, double xpos, double ypos);
"""
_glfw.glfwSetCursorPos(window, xpos, ypos)
_key_callback_repository = {}
_callback_repositories.append(_key_callback_repository)
_glfw.glfwSetKeyCallback.restype = _GLFWkeyfun
_glfw.glfwSetKeyCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWkeyfun]
def set_key_callback(window, cbfun):
"""
Sets the key callback.
Wrapper for:
GLFWkeyfun glfwSetKeyCallback(GLFWwindow* window, GLFWkeyfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _key_callback_repository:
previous_callback = _key_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWkeyfun(cbfun)
_key_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetKeyCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_char_callback_repository = {}
_callback_repositories.append(_char_callback_repository)
_glfw.glfwSetCharCallback.restype = _GLFWcharfun
_glfw.glfwSetCharCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWcharfun]
def set_char_callback(window, cbfun):
"""
Sets the Unicode character callback.
Wrapper for:
GLFWcharfun glfwSetCharCallback(GLFWwindow* window, GLFWcharfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _char_callback_repository:
previous_callback = _char_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWcharfun(cbfun)
_char_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetCharCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_mouse_button_callback_repository = {}
_callback_repositories.append(_mouse_button_callback_repository)
_glfw.glfwSetMouseButtonCallback.restype = _GLFWmousebuttonfun
_glfw.glfwSetMouseButtonCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWmousebuttonfun]
def set_mouse_button_callback(window, cbfun):
"""
Sets the mouse button callback.
Wrapper for:
GLFWmousebuttonfun glfwSetMouseButtonCallback(GLFWwindow* window, GLFWmousebuttonfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _mouse_button_callback_repository:
previous_callback = _mouse_button_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWmousebuttonfun(cbfun)
_mouse_button_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetMouseButtonCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_cursor_pos_callback_repository = {}
_callback_repositories.append(_cursor_pos_callback_repository)
_glfw.glfwSetCursorPosCallback.restype = _GLFWcursorposfun
_glfw.glfwSetCursorPosCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWcursorposfun]
def set_cursor_pos_callback(window, cbfun):
"""
Sets the cursor position callback.
Wrapper for:
GLFWcursorposfun glfwSetCursorPosCallback(GLFWwindow* window, GLFWcursorposfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _cursor_pos_callback_repository:
previous_callback = _cursor_pos_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWcursorposfun(cbfun)
_cursor_pos_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetCursorPosCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_cursor_enter_callback_repository = {}
_callback_repositories.append(_cursor_enter_callback_repository)
_glfw.glfwSetCursorEnterCallback.restype = _GLFWcursorenterfun
_glfw.glfwSetCursorEnterCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWcursorenterfun]
def set_cursor_enter_callback(window, cbfun):
"""
Sets the cursor enter/exit callback.
Wrapper for:
GLFWcursorenterfun glfwSetCursorEnterCallback(GLFWwindow* window, GLFWcursorenterfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _cursor_enter_callback_repository:
previous_callback = _cursor_enter_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWcursorenterfun(cbfun)
_cursor_enter_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetCursorEnterCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_scroll_callback_repository = {}
_callback_repositories.append(_scroll_callback_repository)
_glfw.glfwSetScrollCallback.restype = _GLFWscrollfun
_glfw.glfwSetScrollCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWscrollfun]
def set_scroll_callback(window, cbfun):
"""
Sets the scroll callback.
Wrapper for:
GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _scroll_callback_repository:
previous_callback = _scroll_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWscrollfun(cbfun)
_scroll_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetScrollCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_glfw.glfwJoystickPresent.restype = ctypes.c_int
_glfw.glfwJoystickPresent.argtypes = [ctypes.c_int]
def joystick_present(joy):
"""
Returns whether the specified joystick is present.
Wrapper for:
int glfwJoystickPresent(int joy);
"""
return _glfw.glfwJoystickPresent(joy)
_glfw.glfwGetJoystickAxes.restype = ctypes.POINTER(ctypes.c_float)
_glfw.glfwGetJoystickAxes.argtypes = [ctypes.c_int,
ctypes.POINTER(ctypes.c_int)]
def get_joystick_axes(joy):
"""
Returns the values of all axes of the specified joystick.
Wrapper for:
const float* glfwGetJoystickAxes(int joy, int* count);
"""
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickAxes(joy, count)
return result, count_value.value
_glfw.glfwGetJoystickButtons.restype = ctypes.POINTER(ctypes.c_ubyte)
_glfw.glfwGetJoystickButtons.argtypes = [ctypes.c_int,
ctypes.POINTER(ctypes.c_int)]
def get_joystick_buttons(joy):
"""
Returns the state of all buttons of the specified joystick.
Wrapper for:
const unsigned char* glfwGetJoystickButtons(int joy, int* count);
"""
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickButtons(joy, count)
return result, count_value.value
_glfw.glfwGetJoystickName.restype = ctypes.c_char_p
_glfw.glfwGetJoystickName.argtypes = [ctypes.c_int]
def get_joystick_name(joy):
"""
Returns the name of the specified joystick.
Wrapper for:
const char* glfwGetJoystickName(int joy);
"""
return _glfw.glfwGetJoystickName(joy)
_glfw.glfwSetClipboardString.restype = None
_glfw.glfwSetClipboardString.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_char_p]
def set_clipboard_string(window, string):
"""
Sets the clipboard to the specified string.
Wrapper for:
void glfwSetClipboardString(GLFWwindow* window, const char* string);
"""
_glfw.glfwSetClipboardString(window, _to_char_p(string))
_glfw.glfwGetClipboardString.restype = ctypes.c_char_p
_glfw.glfwGetClipboardString.argtypes = [ctypes.POINTER(_GLFWwindow)]
def get_clipboard_string(window):
"""
Retrieves the contents of the clipboard as a string.
Wrapper for:
const char* glfwGetClipboardString(GLFWwindow* window);
"""
return _glfw.glfwGetClipboardString(window)
_glfw.glfwGetTime.restype = ctypes.c_double
_glfw.glfwGetTime.argtypes = []
def get_time():
"""
Returns the value of the GLFW timer.
Wrapper for:
double glfwGetTime(void);
"""
return _glfw.glfwGetTime()
_glfw.glfwSetTime.restype = None
_glfw.glfwSetTime.argtypes = [ctypes.c_double]
def set_time(time):
"""
Sets the GLFW timer.
Wrapper for:
void glfwSetTime(double time);
"""
_glfw.glfwSetTime(time)
_glfw.glfwMakeContextCurrent.restype = None
_glfw.glfwMakeContextCurrent.argtypes = [ctypes.POINTER(_GLFWwindow)]
def make_context_current(window):
"""
Makes the context of the specified window current for the calling
thread.
Wrapper for:
void glfwMakeContextCurrent(GLFWwindow* window);
"""
_glfw.glfwMakeContextCurrent(window)
_glfw.glfwGetCurrentContext.restype = ctypes.POINTER(_GLFWwindow)
_glfw.glfwGetCurrentContext.argtypes = []
def get_current_context():
"""
Returns the window whose context is current on the calling thread.
Wrapper for:
GLFWwindow* glfwGetCurrentContext(void);
"""
return _glfw.glfwGetCurrentContext()
_glfw.glfwSwapBuffers.restype = None
_glfw.glfwSwapBuffers.argtypes = [ctypes.POINTER(_GLFWwindow)]
def swap_buffers(window):
"""
Swaps the front and back buffers of the specified window.
Wrapper for:
void glfwSwapBuffers(GLFWwindow* window);
"""
_glfw.glfwSwapBuffers(window)
_glfw.glfwSwapInterval.restype = None
_glfw.glfwSwapInterval.argtypes = [ctypes.c_int]
def swap_interval(interval):
"""
Sets the swap interval for the current context.
Wrapper for:
void glfwSwapInterval(int interval);
"""
_glfw.glfwSwapInterval(interval)
_glfw.glfwExtensionSupported.restype = ctypes.c_int
_glfw.glfwExtensionSupported.argtypes = [ctypes.c_char_p]
def extension_supported(extension):
"""
Returns whether the specified extension is available.
Wrapper for:
int glfwExtensionSupported(const char* extension);
"""
return _glfw.glfwExtensionSupported(_to_char_p(extension))
_glfw.glfwGetProcAddress.restype = ctypes.c_void_p
_glfw.glfwGetProcAddress.argtypes = [ctypes.c_char_p]
def get_proc_address(procname):
"""
Returns the address of the specified function for the current
context.
Wrapper for:
GLFWglproc glfwGetProcAddress(const char* procname);
"""
return _glfw.glfwGetProcAddress(_to_char_p(procname))
if hasattr(_glfw, 'glfwSetDropCallback'):
_window_drop_callback_repository = {}
_callback_repositories.append(_window_drop_callback_repository)
_glfw.glfwSetDropCallback.restype = _GLFWdropfun
_glfw.glfwSetDropCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWdropfun]
def set_drop_callback(window, cbfun):
"""
Sets the file drop callback.
Wrapper for:
GLFWdropfun glfwSetDropCallback(GLFWwindow* window, GLFWdropfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_drop_callback_repository:
previous_callback = _window_drop_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
else:
def cb_wrapper(window, count, c_paths, cbfun=cbfun):
paths = [c_paths[i].decode('utf-8') for i in range(count)]
cbfun(window, paths)
cbfun = cb_wrapper
c_cbfun = _GLFWdropfun(cbfun)
_window_drop_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetDropCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
if hasattr(_glfw, 'glfwSetCharModsCallback'):
_window_char_mods_callback_repository = {}
_callback_repositories.append(_window_char_mods_callback_repository)
_glfw.glfwSetCharModsCallback.restype = _GLFWcharmodsfun
_glfw.glfwSetCharModsCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWcharmodsfun]
def set_char_mods_callback(window, cbfun):
"""
Sets the Unicode character with modifiers callback.
Wrapper for:
GLFWcharmodsfun glfwSetCharModsCallback(GLFWwindow* window, GLFWcharmodsfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_char_mods_callback_repository:
previous_callback = _window_char_mods_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWcharmodsfun(cbfun)
_window_char_mods_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetCharModsCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
if hasattr(_glfw, 'glfwVulkanSupported'):
_glfw.glfwVulkanSupported.restype = ctypes.c_int
_glfw.glfwVulkanSupported.argtypes = []
def vulkan_supported():
"""
Returns whether the Vulkan loader has been found.
Wrapper for:
int glfwVulkanSupported(void);
"""
return _glfw.glfwVulkanSupported() != 0
if hasattr(_glfw, 'glfwGetRequiredInstanceExtensions'):
_glfw.glfwGetRequiredInstanceExtensions.restype = ctypes.POINTER(ctypes.c_char_p)
_glfw.glfwGetRequiredInstanceExtensions.argtypes = [ctypes.POINTER(ctypes.c_uint32)]
def get_required_instance_extensions():
"""
Returns the Vulkan instance extensions required by GLFW.
Wrapper for:
const char** glfwGetRequiredInstanceExtensions(uint32_t* count);
"""
count_value = ctypes.c_uint32(0)
count = ctypes.pointer(count_value)
c_extensions = _glfw.glfwGetRequiredInstanceExtensions(count)
count = count_value.value
extensions = [c_extensions[i].decode('utf-8') for i in range(count)]
return extensions
if hasattr(_glfw, 'glfwGetTimerValue'):
_glfw.glfwGetTimerValue.restype = ctypes.c_uint64
_glfw.glfwGetTimerValue.argtypes = []
def get_timer_value():
"""
Returns the current value of the raw timer.
Wrapper for:
uint64_t glfwGetTimerValue(void);
"""
return int(_glfw.glfwGetTimerValue())
if hasattr(_glfw, 'glfwGetTimerFrequency'):
_glfw.glfwGetTimerFrequency.restype = ctypes.c_uint64
_glfw.glfwGetTimerFrequency.argtypes = []
def get_timer_frequency():
"""
Returns the frequency, in Hz, of the raw timer.
Wrapper for:
uint64_t glfwGetTimerFrequency(void);
"""
return int(_glfw.glfwGetTimerFrequency())
if hasattr(_glfw, 'glfwSetJoystickCallback'):
_joystick_callback = None
_glfw.glfwSetJoystickCallback.restype = _GLFWjoystickfun
_glfw.glfwSetJoystickCallback.argtypes = [_GLFWjoystickfun]
def set_joystick_callback(cbfun):
"""
Sets the error callback.
Wrapper for:
GLFWjoystickfun glfwSetJoystickCallback(GLFWjoystickfun cbfun);
"""
global _joystick_callback
previous_callback = _error_callback
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWjoystickfun(cbfun)
_joystick_callback = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetJoystickCallback(cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
if hasattr(_glfw, 'glfwGetKeyName'):
_glfw.glfwGetKeyName.restype = ctypes.c_char_p
_glfw.glfwGetKeyName.argtypes = [ctypes.c_int, ctypes.c_int]
def get_key_name(key, scancode):
"""
Returns the localized name of the specified printable key.
Wrapper for:
const char* glfwGetKeyName(int key, int scancode);
"""
key_name = _glfw.glfwGetKeyName(key, scancode)
if key_name:
return key_name.decode('utf-8')
return None
if hasattr(_glfw, 'glfwCreateCursor'):
_glfw.glfwCreateCursor.restype = ctypes.POINTER(_GLFWcursor)
_glfw.glfwCreateCursor.argtypes = [ctypes.POINTER(_GLFWimage),
ctypes.c_int,
ctypes.c_int]
def create_cursor(image, xhot, yhot):
"""
Creates a custom cursor.
Wrapper for:
GLFWcursor* glfwCreateCursor(const GLFWimage* image, int xhot, int yhot);
"""
c_image = _GLFWimage()
c_image.wrap(image)
return _glfw.glfwCreateCursor(ctypes.pointer(c_image), xhot, yhot)
if hasattr(_glfw, 'glfwCreateStandardCursor'):
_glfw.glfwCreateStandardCursor.restype = ctypes.POINTER(_GLFWcursor)
_glfw.glfwCreateStandardCursor.argtypes = [ctypes.c_int]
def create_standard_cursor(shape):
"""
Creates a cursor with a standard shape.
Wrapper for:
GLFWcursor* glfwCreateStandardCursor(int shape);
"""
return _glfw.glfwCreateStandardCursor(shape)
if hasattr(_glfw, 'glfwDestroyCursor'):
_glfw.glfwDestroyCursor.restype = None
_glfw.glfwDestroyCursor.argtypes = [ctypes.POINTER(_GLFWcursor)]
def destroy_cursor(cursor):
"""
Destroys a cursor.
Wrapper for:
void glfwDestroyCursor(GLFWcursor* cursor);
"""
_glfw.glfwDestroyCursor(cursor)
if hasattr(_glfw, 'glfwSetCursor'):
_glfw.glfwSetCursor.restype = None
_glfw.glfwSetCursor.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(_GLFWcursor)]
def set_cursor(window, cursor):
"""
Sets the cursor for the window.
Wrapper for:
void glfwSetCursor(GLFWwindow* window, GLFWcursor* cursor);
"""
_glfw.glfwSetCursor(window, cursor)
if hasattr(_glfw, 'glfwCreateWindowSurface'):
_glfw.glfwCreateWindowSurface.restype = ctypes.c_int
_glfw.glfwCreateWindowSurface.argtypes = [ctypes.c_void_p,
ctypes.POINTER(_GLFWwindow),
ctypes.c_void_p,
ctypes.c_void_p]
def create_window_surface(instance, window, allocator, surface):
"""
Creates a Vulkan surface for the specified window.
Wrapper for:
VkResult glfwCreateWindowSurface(VkInstance instance, GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface);
"""
return _glfw.glfwCreateWindowSurface(instance, window, allocator, surface)
if hasattr(_glfw, 'glfwGetPhysicalDevicePresentationSupport'):
_glfw.glfwGetPhysicalDevicePresentationSupport.restype = ctypes.c_int
_glfw.glfwGetPhysicalDevicePresentationSupport.argtypes = [ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_uint32]
def get_physical_device_presentation_support(instance, device, queuefamily):
"""
Creates a Vulkan surface for the specified window.
Wrapper for:
int glfwGetPhysicalDevicePresentationSupport(VkInstance instance, VkPhysicalDevice device, uint32_t queuefamily);
"""
return _glfw.glfwGetPhysicalDevicePresentationSupport(instance, device, queuefamily)
if hasattr(_glfw, 'glfwGetInstanceProcAddress'):
_glfw.glfwGetInstanceProcAddress.restype = ctypes.c_void_p
_glfw.glfwGetInstanceProcAddress.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
def get_instance_proc_address(instance, procname):
"""
Returns the address of the specified Vulkan instance function.
Wrapper for:
GLFWvkproc glfwGetInstanceProcAddress(VkInstance instance, const char* procname);
"""
return _glfw.glfwGetInstanceProcAddress(instance, procname)
if hasattr(_glfw, 'glfwSetWindowIcon'):
_glfw.glfwSetWindowIcon.restype = None
_glfw.glfwSetWindowIcon.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.POINTER(_GLFWimage)]
def set_window_icon(window, count, image):
"""
Sets the icon for the specified window.
Wrapper for:
void glfwSetWindowIcon(GLFWwindow* window, int count, const GLFWimage* images);
"""
_image = _GLFWimage()
_image.wrap(image)
_glfw.glfwSetWindowIcon(window, count, ctypes.pointer(_image))
if hasattr(_glfw, 'glfwSetWindowSizeLimits'):
_glfw.glfwSetWindowSizeLimits.restype = None
_glfw.glfwSetWindowSizeLimits.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
def set_window_size_limits(window,
minwidth, minheight,
maxwidth, maxheight):
"""
Sets the size limits of the specified window.
Wrapper for:
void glfwSetWindowSizeLimits(GLFWwindow* window, int minwidth, int minheight, int maxwidth, int maxheight);
"""
_glfw.glfwSetWindowSizeLimits(window,
minwidth, minheight,
maxwidth, maxheight)
if hasattr(_glfw, 'glfwSetWindowAspectRatio'):
_glfw.glfwSetWindowAspectRatio.restype = None
_glfw.glfwSetWindowAspectRatio.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int, ctypes.c_int]
def set_window_aspect_ratio(window, numer, denom):
"""
Sets the aspect ratio of the specified window.
Wrapper for:
void glfwSetWindowAspectRatio(GLFWwindow* window, int numer, int denom);
"""
_glfw.glfwSetWindowAspectRatio(window, numer, denom)
if hasattr(_glfw, 'glfwGetWindowFrameSize'):
_glfw.glfwGetWindowFrameSize.restype = None
_glfw.glfwGetWindowFrameSize.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def set_get_window_frame_size(window):
"""
Retrieves the size of the frame of the window.
Wrapper for:
void glfwGetWindowFrameSize(GLFWwindow* window, int* left, int* top, int* right, int* bottom);
"""
left = ctypes.c_int(0)
top = ctypes.c_int(0)
right = ctypes.c_int(0)
bottom = ctypes.c_int(0)
_glfw.glfwGetWindowFrameSize(window,
ctypes.pointer(left),
ctypes.pointer(top),
ctypes.pointer(right),
ctypes.pointer(bottom))
return left.value, top.value, right.value, bottom.value
if hasattr(_glfw, 'glfwMaximizeWindow'):
_glfw.glfwMaximizeWindow.restype = None
_glfw.glfwMaximizeWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def maximize_window(window):
"""
Maximizes the specified window.
Wrapper for:
void glfwMaximizeWindow(GLFWwindow* window);
"""
_glfw.glfwMaximizeWindow(window)
if hasattr(_glfw, 'glfwFocusWindow'):
_glfw.glfwFocusWindow.restype = None
_glfw.glfwFocusWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def focus_window(window):
"""
Brings the specified window to front and sets input focus.
Wrapper for:
void glfwFocusWindow(GLFWwindow* window);
"""
_glfw.glfwFocusWindow(window)
if hasattr(_glfw, 'glfwSetWindowMonitor'):
_glfw.glfwSetWindowMonitor.restype = None
_glfw.glfwSetWindowMonitor.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(_GLFWmonitor),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int]
def set_window_monitor(window, monitor, xpos, ypos, width, height,
refresh_rate):
"""
Sets the mode, monitor, video mode and placement of a window.
Wrapper for:
void glfwSetWindowMonitor(GLFWwindow* window, GLFWmonitor* monitor, int xpos, int ypos, int width, int height, int refreshRate);
"""
_glfw.glfwSetWindowMonitor(window, monitor,
xpos, ypos, width, height, refresh_rate)
if hasattr(_glfw, 'glfwWaitEventsTimeout'):
_glfw.glfwWaitEventsTimeout.restype = None
_glfw.glfwWaitEventsTimeout.argtypes = [ctypes.c_double]
def wait_events_timeout(timeout):
"""
Waits with timeout until events are queued and processes them.
Wrapper for:
void glfwWaitEventsTimeout(double timeout);
"""
_glfw.glfwWaitEventsTimeout(timeout)
if hasattr(_glfw, 'glfwPostEmptyEvent'):
_glfw.glfwPostEmptyEvent.restype = None
_glfw.glfwPostEmptyEvent.argtypes = []
def post_empty_event():
"""
Posts an empty event to the event queue.
Wrapper for:
void glfwPostEmptyEvent();
"""
_glfw.glfwPostEmptyEvent()
_prepare_errcheck()
| ronhandler/gitroot | pyglfw/glfw.py | Python | unlicense | 77,583 |
a = 'a|b|c|d|e'
b = a.split('|', 3)
print(b)
"""<
['a', 'b', 'c', 'd|e']
>"""
| pythonpatterns/patterns | p0081.py | Python | unlicense | 81 |
"""Values export object module."""
class Values(object):
"""Class for Values export object."""
name = None
document = None
def save(self, section, document, graphics=True):
"""Get intro."""
self.document = document
section = self.name
return document
def save_with_graphics(self, name, section, document):
"""Get intro."""
self.name = name
self.document = document
section = self.name
return document
| executive-consultants-of-los-angeles/rsum | rsum/export/sections/values.py | Python | unlicense | 498 |
import StringIO
from novaclient.v1_1 import servers
from tests import utils
from tests.v1_1 import fakes
cs = fakes.FakeClient()
class ServersTest(utils.TestCase):
def test_list_servers(self):
sl = cs.servers.list()
cs.assert_called('GET', '/servers/detail')
[self.assertTrue(isinstance(s, servers.Server)) for s in sl]
def test_list_servers_undetailed(self):
sl = cs.servers.list(detailed=False)
cs.assert_called('GET', '/servers')
[self.assertTrue(isinstance(s, servers.Server)) for s in sl]
def test_get_server_details(self):
s = cs.servers.get(1234)
cs.assert_called('GET', '/servers/1234')
self.assertTrue(isinstance(s, servers.Server))
self.assertEqual(s.id, 1234)
self.assertEqual(s.status, 'BUILD')
def test_create_server(self):
s = cs.servers.create(
name="My server",
image=1,
flavor=1,
meta={'foo': 'bar'},
userdata="hello moto",
key_name="fakekey",
files={
'/etc/passwd': 'some data', # a file
'/tmp/foo.txt': StringIO.StringIO('data'), # a stream
}
)
cs.assert_called('POST', '/servers')
self.assertTrue(isinstance(s, servers.Server))
def test_create_server_userdata_file_object(self):
s = cs.servers.create(
name="My server",
image=1,
flavor=1,
meta={'foo': 'bar'},
userdata=StringIO.StringIO('hello moto'),
files={
'/etc/passwd': 'some data', # a file
'/tmp/foo.txt': StringIO.StringIO('data'), # a stream
},
)
cs.assert_called('POST', '/servers')
self.assertTrue(isinstance(s, servers.Server))
def test_update_server(self):
s = cs.servers.get(1234)
# Update via instance
s.update(name='hi')
cs.assert_called('PUT', '/servers/1234')
s.update(name='hi')
cs.assert_called('PUT', '/servers/1234')
# Silly, but not an error
s.update()
# Update via manager
cs.servers.update(s, name='hi')
cs.assert_called('PUT', '/servers/1234')
def test_delete_server(self):
s = cs.servers.get(1234)
s.delete()
cs.assert_called('DELETE', '/servers/1234')
cs.servers.delete(1234)
cs.assert_called('DELETE', '/servers/1234')
cs.servers.delete(s)
cs.assert_called('DELETE', '/servers/1234')
def test_delete_server_meta(self):
s = cs.servers.delete_meta(1234, ['test_key'])
cs.assert_called('DELETE', '/servers/1234/metadata/test_key')
def test_set_server_meta(self):
s = cs.servers.set_meta(1234, {'test_key': 'test_value'})
reval = cs.assert_called('POST', '/servers/1234/metadata',
{'metadata': {'test_key': 'test_value'}})
def test_find(self):
s = cs.servers.find(name='sample-server')
cs.assert_called('GET', '/servers/detail')
self.assertEqual(s.name, 'sample-server')
# Find with multiple results arbitraility returns the first item
s = cs.servers.find(flavor={"id": 1, "name": "256 MB Server"})
sl = cs.servers.findall(flavor={"id": 1, "name": "256 MB Server"})
self.assertEqual(sl[0], s)
self.assertEqual([s.id for s in sl], [1234, 5678])
def test_reboot_server(self):
s = cs.servers.get(1234)
s.reboot()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.reboot(s, type='HARD')
cs.assert_called('POST', '/servers/1234/action')
def test_rebuild_server(self):
s = cs.servers.get(1234)
s.rebuild(image=1)
cs.assert_called('POST', '/servers/1234/action')
cs.servers.rebuild(s, image=1)
cs.assert_called('POST', '/servers/1234/action')
s.rebuild(image=1, password='5678')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.rebuild(s, image=1, password='5678')
cs.assert_called('POST', '/servers/1234/action')
def test_resize_server(self):
s = cs.servers.get(1234)
s.resize(flavor=1)
cs.assert_called('POST', '/servers/1234/action')
cs.servers.resize(s, flavor=1)
cs.assert_called('POST', '/servers/1234/action')
def test_confirm_resized_server(self):
s = cs.servers.get(1234)
s.confirm_resize()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.confirm_resize(s)
cs.assert_called('POST', '/servers/1234/action')
def test_revert_resized_server(self):
s = cs.servers.get(1234)
s.revert_resize()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.revert_resize(s)
cs.assert_called('POST', '/servers/1234/action')
def test_migrate_server(self):
s = cs.servers.get(1234)
s.migrate()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.migrate(s)
cs.assert_called('POST', '/servers/1234/action')
def test_add_fixed_ip(self):
s = cs.servers.get(1234)
s.add_fixed_ip(1)
cs.assert_called('POST', '/servers/1234/action')
cs.servers.add_fixed_ip(s, 1)
cs.assert_called('POST', '/servers/1234/action')
def test_remove_fixed_ip(self):
s = cs.servers.get(1234)
s.remove_fixed_ip('10.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.remove_fixed_ip(s, '10.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
def test_add_floating_ip(self):
s = cs.servers.get(1234)
s.add_floating_ip('11.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.add_floating_ip(s, '11.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
f = cs.floating_ips.list()[0]
cs.servers.add_floating_ip(s, f)
cs.assert_called('POST', '/servers/1234/action')
s.add_floating_ip(f)
cs.assert_called('POST', '/servers/1234/action')
def test_remove_floating_ip(self):
s = cs.servers.get(1234)
s.remove_floating_ip('11.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.remove_floating_ip(s, '11.0.0.1')
cs.assert_called('POST', '/servers/1234/action')
f = cs.floating_ips.list()[0]
cs.servers.remove_floating_ip(s, f)
cs.assert_called('POST', '/servers/1234/action')
s.remove_floating_ip(f)
cs.assert_called('POST', '/servers/1234/action')
def test_rescue(self):
s = cs.servers.get(1234)
s.rescue()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.rescue(s)
cs.assert_called('POST', '/servers/1234/action')
def test_unrescue(self):
s = cs.servers.get(1234)
s.unrescue()
cs.assert_called('POST', '/servers/1234/action')
cs.servers.unrescue(s)
cs.assert_called('POST', '/servers/1234/action')
def test_get_console_output_without_length(self):
success = 'foo'
s = cs.servers.get(1234)
s.get_console_output()
self.assertEqual(s.get_console_output(), success)
cs.assert_called('POST', '/servers/1234/action')
cs.servers.get_console_output(s)
self.assertEqual(cs.servers.get_console_output(s), success)
cs.assert_called('POST', '/servers/1234/action')
def test_get_console_output_with_length(self):
success = 'foo'
s = cs.servers.get(1234)
s.get_console_output(length=50)
self.assertEqual(s.get_console_output(length=50), success)
cs.assert_called('POST', '/servers/1234/action')
cs.servers.get_console_output(s, length=50)
self.assertEqual(cs.servers.get_console_output(s, length=50), success)
cs.assert_called('POST', '/servers/1234/action')
def test_get_server_actions(self):
s = cs.servers.get(1234)
actions = s.actions()
self.assertTrue(actions is not None)
cs.assert_called('GET', '/servers/1234/actions')
actions_from_manager = cs.servers.actions(1234)
self.assertTrue(actions_from_manager is not None)
cs.assert_called('GET', '/servers/1234/actions')
self.assertEqual(actions, actions_from_manager)
def test_get_server_diagnostics(self):
s = cs.servers.get(1234)
diagnostics = s.diagnostics()
self.assertTrue(diagnostics is not None)
cs.assert_called('GET', '/servers/1234/diagnostics')
diagnostics_from_manager = cs.servers.diagnostics(1234)
self.assertTrue(diagnostics_from_manager is not None)
cs.assert_called('GET', '/servers/1234/diagnostics')
self.assertEqual(diagnostics, diagnostics_from_manager)
def test_get_vnc_console(self):
s = cs.servers.get(1234)
s.get_vnc_console('fake')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.get_vnc_console(s, 'fake')
cs.assert_called('POST', '/servers/1234/action')
def test_create_image(self):
s = cs.servers.get(1234)
s.create_image('123')
cs.assert_called('POST', '/servers/1234/action')
s.create_image('123', {})
cs.assert_called('POST', '/servers/1234/action')
cs.servers.create_image(s, '123')
cs.assert_called('POST', '/servers/1234/action')
cs.servers.create_image(s, '123', {})
| rcbops/python-novaclient-buildpackage | tests/v1_1/test_servers.py | Python | apache-2.0 | 9,616 |
# -*- coding: utf-8 -*-
from dateutil.parser import parse
import json
import os
import yaml
import db_old
# TODO: This function is currently only used by the db.py module,
# imported from serving.py via sys.path modification. Due to the new
# sys.path content, db.py attempts to find the `yaml_load` function in
# this file, instead of in its own "utils.py". When imports are done
# properly, delete this function from here.
def yaml_load(path):
with open(path, 'r', encoding='utf-8') as f:
data_yaml = yaml.load(f, Loader=yaml.FullLoader)
return data_yaml
def NormalizeIco(ico):
if ico is None:
return None
ico = ico.replace(" ", "")
try:
a = int(ico)
return a
except:
return None
def IcoToLatLngMap():
output_map = {}
for table in ["orsresd_data", "firmy_data", "new_orsr_data"]:
with db_old.getCursor() as cur:
sql = "SELECT ico, lat, lng FROM " + table + \
" JOIN entities on entities.id = " + table + ".id" + \
" WHERE ico IS NOT NULL"
db_old.execute(cur, sql)
for row in cur:
output_map[int(row["ico"])] = (row["lat"], row["lng"])
return output_map
# Returns the value of 'entities.column' for the entitity with 'table'.ico='ico'
def getColumnForTableIco(table, column, ico):
sql = "SELECT " + column + " FROM " + table + \
" JOIN entities ON entities.id = " + table + ".id" + \
" WHERE ico = %s" + \
" LIMIT 1"
with db_old.getCursor() as cur:
try:
cur = db_old.execute(cur, sql, [ico])
row = cur.fetchone()
if row is None:
return None
return row[column]
except:
return None
# TODO: refactor as this is also done in server
def getEidForIco(ico):
ico = NormalizeIco(ico)
for table in ["new_orsr_data", "firmy_data", "orsresd_data"]:
value = getColumnForTableIco(table, "eid", ico)
if value is not None:
return value
return None
def getAddressForIco(ico):
ico = NormalizeIco(ico)
for table in ["new_orsr_data", "firmy_data", "orsresd_data"]:
value = getColumnForTableIco(table, "address", ico)
if value is not None:
return value.decode("utf8")
return ""
# Returns estimated/final value
def getValue(obstaravanie):
if obstaravanie.final_price is not None:
return obstaravanie.final_price
if obstaravanie.draft_price is not None:
return obstaravanie.draft_price
return None
def obstaravanieToJson(obstaravanie, candidates, full_candidates=1, compute_range=False):
current = {}
current["id"] = obstaravanie.id
if obstaravanie.description is None:
current["text"] = "N/A"
else:
current["text"] = obstaravanie.description
if obstaravanie.title is not None:
current["title"] = obstaravanie.title
if obstaravanie.bulletin_year is not None:
current["bulletin_year"] = obstaravanie.bulletin_year
if obstaravanie.bulleting_number is not None:
current["bulletin_number"] = obstaravanie.bulleting_number
current["price"] = getValue(obstaravanie)
predictions = obstaravanie.predictions
if (predictions is not None) and (len(predictions) > 0):
prediction = predictions[0]
current["price_avg"] = prediction.mean
current["price_stdev"] = prediction.stdev
current["price_num"] = prediction.num
if obstaravanie.json is not None:
j = json.loads(obstaravanie.json)
if ("bulletin_issue" in j) and ("published_on" in j["bulletin_issue"]):
bdate = parse(j["bulletin_issue"]["published_on"])
current["bulletin_day"] = bdate.day
current["bulletin_month"] = bdate.month
current["bulletin_date"] = "%d. %s %d" % (bdate.day,
["január", "február", "marec", "apríl", "máj", "jún",
"júl", "august", "september", "október", "november",
"december"][bdate.month - 1], bdate.year)
current["customer"] = obstaravanie.customer.name
if candidates > 0:
# Generate at most one candidate in full, others empty, so we know the count
current["kandidati"] = [{
"id": c.reason.id,
"eid": getEidForIco(c.company.ico),
"name": c.company.name,
"ico": c.company.ico,
"text": c.reason.description,
"title": c.reason.title,
"customer": c.reason.customer.name,
"price": getValue(c.reason),
"score": c.score} for c in obstaravanie.candidates[:full_candidates]]
for _ in obstaravanie.candidates[full_candidates:candidates]:
current["kandidati"].append({})
return current
def getAddressJson(eid):
# json with all geocoded data
j = {}
with db_old.getCursor() as cur:
cur = db_old.execute(cur, "SELECT json FROM entities WHERE eid=%s", [eid])
row = cur.fetchone()
if row is None:
return None
j = json.loads(row["json"])
# TODO: do not duplicate this with code in verejne/
def getComponent(json, typeName):
try:
for component in json[0]["address_components"]:
if typeName in component["types"]:
return component["long_name"]
return ""
except:
return ""
# types description: https://developers.google.com/maps/documentation/geocoding/intro#Types
# street / city can be defined in multiple ways
address = {
"street": (
getComponent(j, "street_address") +
getComponent(j, "route") +
getComponent(j, "intersection") +
" " + getComponent(j, "street_number")
),
"city": getComponent(j, "locality"),
"zip": getComponent(j, "postal_code"),
"country": getComponent(j, "country"),
}
return address
# Generates report with notifications,
# saving pdf file to filename
def generateReport(notifications):
# Bail out if no notifications
if len(notifications) == 0:
return False
company = notifications[0].candidate.company
eid = getEidForIco(company.ico)
if eid is None:
return False
data = {}
data["company"] = {
"name": company.name,
"ico": company.ico,
"address_full": getAddressForIco(company.ico),
}
data["company"].update(getAddressJson(eid))
notifications_json = []
for notification in notifications:
notifications_json.append({
"reason": obstaravanieToJson(
notification.candidate.reason, candidates=0, full_candidates=0),
"what": obstaravanieToJson(
notification.candidate.obstaravanie, candidates=0, full_candidates=0),
})
data["notifications"] = notifications_json
# Generate .json file atomically into the following directory. It is picked up
# from there and automatically turned into .pdf and then send.
shared_path = "/data/notifikacie/in/"
tmp_filename = shared_path + ("json_%d_%d.tmp" % (eid, os.getpid()))
final_filename = shared_path + ("data_%d_%d.json" % (eid, os.getpid()))
with open(tmp_filename, "w") as tmp_file:
json.dump(data, tmp_file, sort_keys=True, indent=4, separators=(',', ': '))
os.rename(tmp_filename, final_filename)
return True
| verejnedigital/verejne.digital | obstaravania/utils.py | Python | apache-2.0 | 7,620 |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 11 23:06:06 2016
@author: DIP
"""
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
def build_feature_matrix(documents, feature_type='frequency',
ngram_range=(1, 1), min_df=0.0, max_df=1.0):
feature_type = feature_type.lower().strip()
if feature_type == 'binary':
vectorizer = CountVectorizer(binary=True, min_df=min_df,
max_df=max_df, ngram_range=ngram_range)
elif feature_type == 'frequency':
vectorizer = CountVectorizer(binary=False, min_df=min_df,
max_df=max_df, ngram_range=ngram_range)
elif feature_type == 'tfidf':
vectorizer = TfidfVectorizer(min_df=min_df, max_df=max_df,
ngram_range=ngram_range)
else:
raise Exception("Wrong feature type entered. Possible values: 'binary', 'frequency', 'tfidf'")
feature_matrix = vectorizer.fit_transform(documents).astype(float)
return vectorizer, feature_matrix
from sklearn import metrics
import numpy as np
import pandas as pd
def display_evaluation_metrics(true_labels, predicted_labels, positive_class=1):
print 'Accuracy:', np.round(
metrics.accuracy_score(true_labels,
predicted_labels),
2)
print 'Precision:', np.round(
metrics.precision_score(true_labels,
predicted_labels,
pos_label=positive_class,
average='binary'),
2)
print 'Recall:', np.round(
metrics.recall_score(true_labels,
predicted_labels,
pos_label=positive_class,
average='binary'),
2)
print 'F1 Score:', np.round(
metrics.f1_score(true_labels,
predicted_labels,
pos_label=positive_class,
average='binary'),
2)
def display_confusion_matrix(true_labels, predicted_labels, classes=[1,0]):
cm = metrics.confusion_matrix(y_true=true_labels,
y_pred=predicted_labels,
labels=classes)
cm_frame = pd.DataFrame(data=cm,
columns=pd.MultiIndex(levels=[['Predicted:'], classes],
labels=[[0,0],[0,1]]),
index=pd.MultiIndex(levels=[['Actual:'], classes],
labels=[[0,0],[0,1]]))
print cm_frame
def display_classification_report(true_labels, predicted_labels, classes=[1,0]):
report = metrics.classification_report(y_true=true_labels,
y_pred=predicted_labels,
labels=classes)
print report | dipanjanS/text-analytics-with-python | Old-First-Edition/Ch07_Semantic_and_Sentiment_Analysis/utils.py | Python | apache-2.0 | 3,381 |
# Copyright 2012 OpenStack Foundation.
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2017 FUJITSU LIMITED
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import inspect
import itertools
import logging
import re
import time
import urllib.parse as urlparse
import debtcollector.renames
from keystoneauth1 import exceptions as ksa_exc
import requests
from neutronclient._i18n import _
from neutronclient import client
from neutronclient.common import exceptions
from neutronclient.common import extension as client_extension
from neutronclient.common import serializer
from neutronclient.common import utils
_logger = logging.getLogger(__name__)
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
def exception_handler_v20(status_code, error_content):
"""Exception handler for API v2.0 client.
This routine generates the appropriate Neutron exception according to
the contents of the response body.
:param status_code: HTTP error status code
:param error_content: deserialized body of error response
"""
error_dict = None
request_ids = error_content.request_ids
if isinstance(error_content, dict):
error_dict = error_content.get('NeutronError')
# Find real error type
client_exc = None
if error_dict:
# If Neutron key is found, it will definitely contain
# a 'message' and 'type' keys?
try:
error_type = error_dict['type']
error_message = error_dict['message']
if error_dict['detail']:
error_message += "\n" + error_dict['detail']
# If corresponding exception is defined, use it.
client_exc = getattr(exceptions, '%sClient' % error_type, None)
except Exception:
error_message = "%s" % error_dict
else:
error_message = None
if isinstance(error_content, dict):
error_message = error_content.get('message')
if not error_message:
# If we end up here the exception was not a neutron error
error_message = "%s-%s" % (status_code, error_content)
# If an exception corresponding to the error type is not found,
# look up per status-code client exception.
if not client_exc:
client_exc = exceptions.HTTP_EXCEPTION_MAP.get(status_code)
# If there is no exception per status-code,
# Use NeutronClientException as fallback.
if not client_exc:
client_exc = exceptions.NeutronClientException
raise client_exc(message=error_message,
status_code=status_code,
request_ids=request_ids)
class _RequestIdMixin(object):
"""Wrapper class to expose x-openstack-request-id to the caller."""
def _request_ids_setup(self):
self._request_ids = []
@property
def request_ids(self):
return self._request_ids
def _append_request_ids(self, resp):
"""Add request_ids as an attribute to the object
:param resp: Response object or list of Response objects
"""
if isinstance(resp, list):
# Add list of request_ids if response is of type list.
for resp_obj in resp:
self._append_request_id(resp_obj)
elif resp is not None:
# Add request_ids if response contains single object.
self._append_request_id(resp)
def _append_request_id(self, resp):
if isinstance(resp, requests.Response):
# Extract 'x-openstack-request-id' from headers if
# response is a Response object.
request_id = resp.headers.get('x-openstack-request-id')
else:
# If resp is of type string.
request_id = resp
if request_id:
self._request_ids.append(request_id)
class _DictWithMeta(dict, _RequestIdMixin):
def __init__(self, values, resp):
super(_DictWithMeta, self).__init__(values)
self._request_ids_setup()
self._append_request_ids(resp)
class _TupleWithMeta(tuple, _RequestIdMixin):
def __new__(cls, values, resp):
return super(_TupleWithMeta, cls).__new__(cls, values)
def __init__(self, values, resp):
self._request_ids_setup()
self._append_request_ids(resp)
class _StrWithMeta(str, _RequestIdMixin):
def __new__(cls, value, resp):
return super(_StrWithMeta, cls).__new__(cls, value)
def __init__(self, values, resp):
self._request_ids_setup()
self._append_request_ids(resp)
class _GeneratorWithMeta(_RequestIdMixin):
def __init__(self, paginate_func, collection, path, **params):
self.paginate_func = paginate_func
self.collection = collection
self.path = path
self.params = params
self.generator = None
self._request_ids_setup()
def _paginate(self):
for r in self.paginate_func(
self.collection, self.path, **self.params):
yield r, r.request_ids
def __iter__(self):
return self
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
if not self.generator:
self.generator = self._paginate()
try:
obj, req_id = next(self.generator)
self._append_request_ids(req_id)
except StopIteration:
raise StopIteration()
return obj
class ClientBase(object):
"""Client for the OpenStack Neutron v2.0 API.
:param string username: Username for authentication. (optional)
:param string user_id: User ID for authentication. (optional)
:param string password: Password for authentication. (optional)
:param string token: Token for authentication. (optional)
:param string tenant_name: DEPRECATED! Use project_name instead.
:param string project_name: Project name. (optional)
:param string tenant_id: DEPRECATED! Use project_id instead.
:param string project_id: Project id. (optional)
:param string auth_strategy: 'keystone' by default, 'noauth' for no
authentication against keystone. (optional)
:param string auth_url: Keystone service endpoint for authorization.
:param string service_type: Network service type to pull from the
keystone catalog (e.g. 'network') (optional)
:param string endpoint_type: Network service endpoint type to pull from the
keystone catalog (e.g. 'publicURL',
'internalURL', or 'adminURL') (optional)
:param string region_name: Name of a region to select when choosing an
endpoint from the service catalog.
:param string endpoint_url: A user-supplied endpoint URL for the neutron
service. Lazy-authentication is possible for API
service calls if endpoint is set at
instantiation.(optional)
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
:param bool insecure: SSL certificate validation. (optional)
:param bool log_credentials: Allow for logging of passwords or not.
Defaults to False. (optional)
:param string ca_cert: SSL CA bundle file to use. (optional)
:param cert: A client certificate to pass to requests. These are of the
same form as requests expects. Either a single filename
containing both the certificate and key or a tuple containing
the path to the certificate then a path to the key. (optional)
:param integer retries: How many times idempotent (GET, PUT, DELETE)
requests to Neutron server should be retried if
they fail (default: 0).
:param bool raise_errors: If True then exceptions caused by connection
failure are propagated to the caller.
(default: True)
:param session: Keystone client auth session to use. (optional)
:param auth: Keystone auth plugin to use. (optional)
Example::
from neutronclient.v2_0 import client
neutron = client.Client(username=USER,
password=PASS,
project_name=PROJECT_NAME,
auth_url=KEYSTONE_URL)
nets = neutron.list_networks()
...
"""
# API has no way to report plurals, so we have to hard code them
# This variable should be overridden by a child class.
EXTED_PLURALS = {}
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def __init__(self, **kwargs):
"""Initialize a new client for the Neutron v2.0 API."""
super(ClientBase, self).__init__()
self.retries = kwargs.pop('retries', 0)
self.raise_errors = kwargs.pop('raise_errors', True)
self.httpclient = client.construct_http_client(**kwargs)
self.version = '2.0'
self.action_prefix = "/v%s" % (self.version)
self.retry_interval = 1
def _handle_fault_response(self, status_code, response_body, resp):
# Create exception with HTTP status code and message
_logger.debug("Error message: %s", response_body)
# Add deserialized error message to exception arguments
try:
des_error_body = self.deserialize(response_body, status_code)
except Exception:
# If unable to deserialized body it is probably not a
# Neutron error
des_error_body = {'message': response_body}
error_body = self._convert_into_with_meta(des_error_body, resp)
# Raise the appropriate exception
exception_handler_v20(status_code, error_body)
def do_request(self, method, action, body=None, headers=None, params=None):
# Add format and project_id
action = self.action_prefix + action
if isinstance(params, dict) and params:
params = utils.safe_encode_dict(params)
action += '?' + urlparse.urlencode(params, doseq=1)
if body:
body = self.serialize(body)
resp, replybody = self.httpclient.do_request(action, method, body=body,
headers=headers)
status_code = resp.status_code
if status_code in (requests.codes.ok,
requests.codes.created,
requests.codes.accepted,
requests.codes.no_content):
data = self.deserialize(replybody, status_code)
return self._convert_into_with_meta(data, resp)
else:
if not replybody:
replybody = resp.reason
self._handle_fault_response(status_code, replybody, resp)
def get_auth_info(self):
return self.httpclient.get_auth_info()
def serialize(self, data):
"""Serializes a dictionary into JSON.
A dictionary with a single key can be passed and it can contain any
structure.
"""
if data is None:
return None
elif isinstance(data, dict):
return serializer.Serializer().serialize(data)
else:
raise Exception(_("Unable to serialize object of type = '%s'") %
type(data))
def deserialize(self, data, status_code):
"""Deserializes a JSON string into a dictionary."""
if not data:
return data
return serializer.Serializer().deserialize(
data)['body']
def retry_request(self, method, action, body=None,
headers=None, params=None):
"""Call do_request with the default retry configuration.
Only idempotent requests should retry failed connection attempts.
:raises: ConnectionFailed if the maximum # of retries is exceeded
"""
max_attempts = self.retries + 1
for i in range(max_attempts):
try:
return self.do_request(method, action, body=body,
headers=headers, params=params)
except (exceptions.ConnectionFailed, ksa_exc.ConnectionError):
# Exception has already been logged by do_request()
if i < self.retries:
_logger.debug('Retrying connection to Neutron service')
time.sleep(self.retry_interval)
elif self.raise_errors:
raise
if self.retries:
msg = (_("Failed to connect to Neutron server after %d attempts")
% max_attempts)
else:
msg = _("Failed to connect Neutron server")
raise exceptions.ConnectionFailed(reason=msg)
def delete(self, action, body=None, headers=None, params=None):
return self.retry_request("DELETE", action, body=body,
headers=headers, params=params)
def get(self, action, body=None, headers=None, params=None):
return self.retry_request("GET", action, body=body,
headers=headers, params=params)
def post(self, action, body=None, headers=None, params=None):
# Do not retry POST requests to avoid the orphan objects problem.
return self.do_request("POST", action, body=body,
headers=headers, params=params)
def put(self, action, body=None, headers=None, params=None):
return self.retry_request("PUT", action, body=body,
headers=headers, params=params)
def list(self, collection, path, retrieve_all=True, **params):
if retrieve_all:
res = []
request_ids = []
for r in self._pagination(collection, path, **params):
res.extend(r[collection])
request_ids.extend(r.request_ids)
return _DictWithMeta({collection: res}, request_ids)
else:
return _GeneratorWithMeta(self._pagination, collection,
path, **params)
def _pagination(self, collection, path, **params):
if params.get('page_reverse', False):
linkrel = 'previous'
else:
linkrel = 'next'
next = True
while next:
res = self.get(path, params=params)
yield res
next = False
try:
for link in res['%s_links' % collection]:
if link['rel'] == linkrel:
query_str = urlparse.urlparse(link['href']).query
params = urlparse.parse_qs(query_str)
next = True
break
except KeyError:
break
def _convert_into_with_meta(self, item, resp):
if item:
if isinstance(item, dict):
return _DictWithMeta(item, resp)
elif isinstance(item, str):
return _StrWithMeta(item, resp)
else:
return _TupleWithMeta((), resp)
def get_resource_plural(self, resource):
for k in self.EXTED_PLURALS:
if self.EXTED_PLURALS[k] == resource:
return k
return resource + 's'
def find_resource_by_id(self, resource, resource_id, cmd_resource=None,
parent_id=None, fields=None):
if not cmd_resource:
cmd_resource = resource
cmd_resource_plural = self.get_resource_plural(cmd_resource)
resource_plural = self.get_resource_plural(resource)
# TODO(amotoki): Use show_%s instead of list_%s
obj_lister = getattr(self, "list_%s" % cmd_resource_plural)
# perform search by id only if we are passing a valid UUID
match = re.match(UUID_PATTERN, resource_id)
collection = resource_plural
if match:
params = {'id': resource_id}
if fields:
params['fields'] = fields
if parent_id:
data = obj_lister(parent_id, **params)
else:
data = obj_lister(**params)
if data and data[collection]:
return data[collection][0]
not_found_message = (_("Unable to find %(resource)s with id "
"'%(id)s'") %
{'resource': resource, 'id': resource_id})
# 404 is raised by exceptions.NotFound to simulate serverside behavior
raise exceptions.NotFound(message=not_found_message)
def _find_resource_by_name(self, resource, name, project_id=None,
cmd_resource=None, parent_id=None, fields=None):
if not cmd_resource:
cmd_resource = resource
cmd_resource_plural = self.get_resource_plural(cmd_resource)
resource_plural = self.get_resource_plural(resource)
obj_lister = getattr(self, "list_%s" % cmd_resource_plural)
params = {'name': name}
if fields:
params['fields'] = fields
if project_id:
params['tenant_id'] = project_id
if parent_id:
data = obj_lister(parent_id, **params)
else:
data = obj_lister(**params)
collection = resource_plural
info = data[collection]
if len(info) > 1:
raise exceptions.NeutronClientNoUniqueMatch(resource=resource,
name=name)
elif len(info) == 0:
not_found_message = (_("Unable to find %(resource)s with name "
"'%(name)s'") %
{'resource': resource, 'name': name})
# 404 is raised by exceptions.NotFound
# to simulate serverside behavior
raise exceptions.NotFound(message=not_found_message)
else:
return info[0]
def find_resource(self, resource, name_or_id, project_id=None,
cmd_resource=None, parent_id=None, fields=None):
try:
return self.find_resource_by_id(resource, name_or_id,
cmd_resource, parent_id, fields)
except exceptions.NotFound:
try:
return self._find_resource_by_name(
resource, name_or_id, project_id,
cmd_resource, parent_id, fields)
except exceptions.NotFound:
not_found_message = (_("Unable to find %(resource)s with name "
"or id '%(name_or_id)s'") %
{'resource': resource,
'name_or_id': name_or_id})
raise exceptions.NotFound(
message=not_found_message)
class Client(ClientBase):
networks_path = "/networks"
network_path = "/networks/%s"
ports_path = "/ports"
port_path = "/ports/%s"
port_bindings_path = "/ports/%s/bindings"
port_binding_path = "/ports/%s/bindings/%s"
port_binding_path_activate = "/ports/%s/bindings/%s/activate"
subnets_path = "/subnets"
subnet_path = "/subnets/%s"
onboard_network_subnets_path = "/subnetpools/%s/onboard_network_subnets"
subnetpools_path = "/subnetpools"
subnetpool_path = "/subnetpools/%s"
address_scopes_path = "/address-scopes"
address_scope_path = "/address-scopes/%s"
quotas_path = "/quotas"
quota_path = "/quotas/%s"
quota_default_path = "/quotas/%s/default"
quota_details_path = "/quotas/%s/details.json"
extensions_path = "/extensions"
extension_path = "/extensions/%s"
routers_path = "/routers"
router_path = "/routers/%s"
floatingips_path = "/floatingips"
floatingip_path = "/floatingips/%s"
security_groups_path = "/security-groups"
security_group_path = "/security-groups/%s"
security_group_rules_path = "/security-group-rules"
security_group_rule_path = "/security-group-rules/%s"
segments_path = "/segments"
segment_path = "/segments/%s"
sfc_flow_classifiers_path = "/sfc/flow_classifiers"
sfc_flow_classifier_path = "/sfc/flow_classifiers/%s"
sfc_port_pairs_path = "/sfc/port_pairs"
sfc_port_pair_path = "/sfc/port_pairs/%s"
sfc_port_pair_groups_path = "/sfc/port_pair_groups"
sfc_port_pair_group_path = "/sfc/port_pair_groups/%s"
sfc_port_chains_path = "/sfc/port_chains"
sfc_port_chain_path = "/sfc/port_chains/%s"
sfc_service_graphs_path = "/sfc/service_graphs"
sfc_service_graph_path = "/sfc/service_graphs/%s"
endpoint_groups_path = "/vpn/endpoint-groups"
endpoint_group_path = "/vpn/endpoint-groups/%s"
vpnservices_path = "/vpn/vpnservices"
vpnservice_path = "/vpn/vpnservices/%s"
ipsecpolicies_path = "/vpn/ipsecpolicies"
ipsecpolicy_path = "/vpn/ipsecpolicies/%s"
ikepolicies_path = "/vpn/ikepolicies"
ikepolicy_path = "/vpn/ikepolicies/%s"
ipsec_site_connections_path = "/vpn/ipsec-site-connections"
ipsec_site_connection_path = "/vpn/ipsec-site-connections/%s"
lbaas_loadbalancers_path = "/lbaas/loadbalancers"
lbaas_loadbalancer_path = "/lbaas/loadbalancers/%s"
lbaas_loadbalancer_path_stats = "/lbaas/loadbalancers/%s/stats"
lbaas_loadbalancer_path_status = "/lbaas/loadbalancers/%s/statuses"
lbaas_listeners_path = "/lbaas/listeners"
lbaas_listener_path = "/lbaas/listeners/%s"
lbaas_l7policies_path = "/lbaas/l7policies"
lbaas_l7policy_path = lbaas_l7policies_path + "/%s"
lbaas_l7rules_path = lbaas_l7policy_path + "/rules"
lbaas_l7rule_path = lbaas_l7rules_path + "/%s"
lbaas_pools_path = "/lbaas/pools"
lbaas_pool_path = "/lbaas/pools/%s"
lbaas_healthmonitors_path = "/lbaas/healthmonitors"
lbaas_healthmonitor_path = "/lbaas/healthmonitors/%s"
lbaas_members_path = lbaas_pool_path + "/members"
lbaas_member_path = lbaas_pool_path + "/members/%s"
vips_path = "/lb/vips"
vip_path = "/lb/vips/%s"
pools_path = "/lb/pools"
pool_path = "/lb/pools/%s"
pool_path_stats = "/lb/pools/%s/stats"
members_path = "/lb/members"
member_path = "/lb/members/%s"
health_monitors_path = "/lb/health_monitors"
health_monitor_path = "/lb/health_monitors/%s"
associate_pool_health_monitors_path = "/lb/pools/%s/health_monitors"
disassociate_pool_health_monitors_path = (
"/lb/pools/%(pool)s/health_monitors/%(health_monitor)s")
qos_queues_path = "/qos-queues"
qos_queue_path = "/qos-queues/%s"
agents_path = "/agents"
agent_path = "/agents/%s"
network_gateways_path = "/network-gateways"
network_gateway_path = "/network-gateways/%s"
gateway_devices_path = "/gateway-devices"
gateway_device_path = "/gateway-devices/%s"
service_providers_path = "/service-providers"
metering_labels_path = "/metering/metering-labels"
metering_label_path = "/metering/metering-labels/%s"
metering_label_rules_path = "/metering/metering-label-rules"
metering_label_rule_path = "/metering/metering-label-rules/%s"
DHCP_NETS = '/dhcp-networks'
DHCP_AGENTS = '/dhcp-agents'
L3_ROUTERS = '/l3-routers'
L3_AGENTS = '/l3-agents'
LOADBALANCER_POOLS = '/loadbalancer-pools'
LOADBALANCER_AGENT = '/loadbalancer-agent'
AGENT_LOADBALANCERS = '/agent-loadbalancers'
LOADBALANCER_HOSTING_AGENT = '/loadbalancer-hosting-agent'
firewall_rules_path = "/fw/firewall_rules"
firewall_rule_path = "/fw/firewall_rules/%s"
firewall_policies_path = "/fw/firewall_policies"
firewall_policy_path = "/fw/firewall_policies/%s"
firewall_policy_insert_path = "/fw/firewall_policies/%s/insert_rule"
firewall_policy_remove_path = "/fw/firewall_policies/%s/remove_rule"
firewalls_path = "/fw/firewalls"
firewall_path = "/fw/firewalls/%s"
fwaas_firewall_groups_path = "/fwaas/firewall_groups"
fwaas_firewall_group_path = "/fwaas/firewall_groups/%s"
fwaas_firewall_rules_path = "/fwaas/firewall_rules"
fwaas_firewall_rule_path = "/fwaas/firewall_rules/%s"
fwaas_firewall_policies_path = "/fwaas/firewall_policies"
fwaas_firewall_policy_path = "/fwaas/firewall_policies/%s"
fwaas_firewall_policy_insert_path = \
"/fwaas/firewall_policies/%s/insert_rule"
fwaas_firewall_policy_remove_path = \
"/fwaas/firewall_policies/%s/remove_rule"
rbac_policies_path = "/rbac-policies"
rbac_policy_path = "/rbac-policies/%s"
qos_policies_path = "/qos/policies"
qos_policy_path = "/qos/policies/%s"
qos_bandwidth_limit_rules_path = "/qos/policies/%s/bandwidth_limit_rules"
qos_bandwidth_limit_rule_path = "/qos/policies/%s/bandwidth_limit_rules/%s"
qos_packet_rate_limit_rules_path = \
"/qos/policies/%s/packet_rate_limit_rules"
qos_packet_rate_limit_rule_path = \
"/qos/policies/%s/packet_rate_limit_rules/%s"
qos_dscp_marking_rules_path = "/qos/policies/%s/dscp_marking_rules"
qos_dscp_marking_rule_path = "/qos/policies/%s/dscp_marking_rules/%s"
qos_minimum_bandwidth_rules_path = \
"/qos/policies/%s/minimum_bandwidth_rules"
qos_minimum_bandwidth_rule_path = \
"/qos/policies/%s/minimum_bandwidth_rules/%s"
qos_minimum_packet_rate_rules_path = \
"/qos/policies/%s/minimum_packet_rate_rules"
qos_minimum_packet_rate_rule_path = \
"/qos/policies/%s/minimum_packet_rate_rules/%s"
qos_rule_types_path = "/qos/rule-types"
qos_rule_type_path = "/qos/rule-types/%s"
flavors_path = "/flavors"
flavor_path = "/flavors/%s"
service_profiles_path = "/service_profiles"
service_profile_path = "/service_profiles/%s"
flavor_profile_bindings_path = flavor_path + service_profiles_path
flavor_profile_binding_path = flavor_path + service_profile_path
availability_zones_path = "/availability_zones"
auto_allocated_topology_path = "/auto-allocated-topology/%s"
BGP_DRINSTANCES = "/bgp-drinstances"
BGP_DRINSTANCE = "/bgp-drinstance/%s"
BGP_DRAGENTS = "/bgp-dragents"
BGP_DRAGENT = "/bgp-dragents/%s"
bgp_speakers_path = "/bgp-speakers"
bgp_speaker_path = "/bgp-speakers/%s"
bgp_peers_path = "/bgp-peers"
bgp_peer_path = "/bgp-peers/%s"
network_ip_availabilities_path = '/network-ip-availabilities'
network_ip_availability_path = '/network-ip-availabilities/%s'
tags_path = "/%s/%s/tags"
tag_path = "/%s/%s/tags/%s"
trunks_path = "/trunks"
trunk_path = "/trunks/%s"
subports_path = "/trunks/%s/get_subports"
subports_add_path = "/trunks/%s/add_subports"
subports_remove_path = "/trunks/%s/remove_subports"
bgpvpns_path = "/bgpvpn/bgpvpns"
bgpvpn_path = "/bgpvpn/bgpvpns/%s"
bgpvpn_network_associations_path =\
"/bgpvpn/bgpvpns/%s/network_associations"
bgpvpn_network_association_path =\
"/bgpvpn/bgpvpns/%s/network_associations/%s"
bgpvpn_router_associations_path = "/bgpvpn/bgpvpns/%s/router_associations"
bgpvpn_router_association_path =\
"/bgpvpn/bgpvpns/%s/router_associations/%s"
bgpvpn_port_associations_path = "/bgpvpn/bgpvpns/%s/port_associations"
bgpvpn_port_association_path = "/bgpvpn/bgpvpns/%s/port_associations/%s"
network_logs_path = "/log/logs"
network_log_path = "/log/logs/%s"
network_loggables_path = "/log/loggable-resources"
# API has no way to report plurals, so we have to hard code them
EXTED_PLURALS = {'routers': 'router',
'floatingips': 'floatingip',
'service_types': 'service_type',
'service_definitions': 'service_definition',
'security_groups': 'security_group',
'security_group_rules': 'security_group_rule',
'segments': 'segment',
'ipsecpolicies': 'ipsecpolicy',
'ikepolicies': 'ikepolicy',
'ipsec_site_connections': 'ipsec_site_connection',
'vpnservices': 'vpnservice',
'endpoint_groups': 'endpoint_group',
'vips': 'vip',
'pools': 'pool',
'members': 'member',
'health_monitors': 'health_monitor',
'quotas': 'quota',
'service_providers': 'service_provider',
'firewall_rules': 'firewall_rule',
'firewall_policies': 'firewall_policy',
'firewalls': 'firewall',
'fwaas_firewall_rules': 'fwaas_firewall_rule',
'fwaas_firewall_policies': 'fwaas_firewall_policy',
'fwaas_firewall_groups': 'fwaas_firewall_group',
'metering_labels': 'metering_label',
'metering_label_rules': 'metering_label_rule',
'loadbalancers': 'loadbalancer',
'listeners': 'listener',
'l7rules': 'l7rule',
'l7policies': 'l7policy',
'lbaas_l7policies': 'lbaas_l7policy',
'lbaas_pools': 'lbaas_pool',
'lbaas_healthmonitors': 'lbaas_healthmonitor',
'lbaas_members': 'lbaas_member',
'healthmonitors': 'healthmonitor',
'rbac_policies': 'rbac_policy',
'address_scopes': 'address_scope',
'qos_policies': 'qos_policy',
'policies': 'policy',
'bandwidth_limit_rules': 'bandwidth_limit_rule',
'packet_rate_limit_rules': 'packet_rate_limit_rule',
'minimum_bandwidth_rules': 'minimum_bandwidth_rule',
'minimum_packet_rate_rules': 'minimum_packet_rate_rule',
'rules': 'rule',
'dscp_marking_rules': 'dscp_marking_rule',
'rule_types': 'rule_type',
'flavors': 'flavor',
'bgp_speakers': 'bgp_speaker',
'bgp_peers': 'bgp_peer',
'network_ip_availabilities': 'network_ip_availability',
'trunks': 'trunk',
'bgpvpns': 'bgpvpn',
'network_associations': 'network_association',
'router_associations': 'router_association',
'port_associations': 'port_association',
'flow_classifiers': 'flow_classifier',
'port_pairs': 'port_pair',
'port_pair_groups': 'port_pair_group',
'port_chains': 'port_chain',
'service_graphs': 'service_graph',
'logs': 'log',
'loggable_resources': 'loggable_resource',
}
def list_ext(self, collection, path, retrieve_all, **_params):
"""Client extension hook for list."""
return self.list(collection, path, retrieve_all, **_params)
def show_ext(self, path, id, **_params):
"""Client extension hook for show."""
return self.get(path % id, params=_params)
def create_ext(self, path, body=None):
"""Client extension hook for create."""
return self.post(path, body=body)
def update_ext(self, path, id, body=None):
"""Client extension hook for update."""
return self.put(path % id, body=body)
def delete_ext(self, path, id):
"""Client extension hook for delete."""
return self.delete(path % id)
def get_quotas_tenant(self, **_params):
"""Fetch project info for following quota operation."""
return self.get(self.quota_path % 'tenant', params=_params)
def list_quotas(self, **_params):
"""Fetch all projects' quotas."""
return self.get(self.quotas_path, params=_params)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def show_quota(self, project_id, **_params):
"""Fetch information of a certain project's quotas."""
return self.get(self.quota_path % (project_id), params=_params)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def show_quota_details(self, project_id, **_params):
"""Fetch information of a certain project's quota details."""
return self.get(self.quota_details_path % (project_id),
params=_params)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def show_quota_default(self, project_id, **_params):
"""Fetch information of a certain project's default quotas."""
return self.get(self.quota_default_path % (project_id), params=_params)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def update_quota(self, project_id, body=None):
"""Update a project's quotas."""
return self.put(self.quota_path % (project_id), body=body)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def delete_quota(self, project_id):
"""Delete the specified project's quota values."""
return self.delete(self.quota_path % (project_id))
def list_extensions(self, **_params):
"""Fetch a list of all extensions on server side."""
return self.get(self.extensions_path, params=_params)
def show_extension(self, ext_alias, **_params):
"""Fetches information of a certain extension."""
return self.get(self.extension_path % ext_alias, params=_params)
def list_ports(self, retrieve_all=True, **_params):
"""Fetches a list of all ports for a project."""
# Pass filters in "params" argument to do_request
return self.list('ports', self.ports_path, retrieve_all,
**_params)
def show_port(self, port, **_params):
"""Fetches information of a certain port."""
return self.get(self.port_path % (port), params=_params)
def create_port(self, body=None):
"""Creates a new port."""
return self.post(self.ports_path, body=body)
def update_port(self, port, body=None, revision_number=None):
"""Updates a port."""
return self._update_resource(self.port_path % (port), body=body,
revision_number=revision_number)
def delete_port(self, port):
"""Deletes the specified port."""
return self.delete(self.port_path % (port))
def create_port_binding(self, port_id, body=None):
"""Creates a new port binding."""
return self.post(self.port_bindings_path % port_id, body=body)
def delete_port_binding(self, port_id, host_id):
"""Deletes the specified port binding."""
return self.delete(self.port_binding_path % (port_id, host_id))
def show_port_binding(self, port_id, host_id, **_params):
"""Fetches information for a certain port binding."""
return self.get(self.port_binding_path % (port_id, host_id),
params=_params)
def list_port_bindings(self, port_id, retrieve_all=True, **_params):
"""Fetches a list of all bindings for a certain port."""
return self.list('port_bindings', self.port_bindings_path % port_id,
retrieve_all, **_params)
def activate_port_binding(self, port_id, host_id):
"""Activates a port binding."""
return self.put(self.port_binding_path_activate % (port_id, host_id))
def list_networks(self, retrieve_all=True, **_params):
"""Fetches a list of all networks for a project."""
# Pass filters in "params" argument to do_request
return self.list('networks', self.networks_path, retrieve_all,
**_params)
def show_network(self, network, **_params):
"""Fetches information of a certain network."""
return self.get(self.network_path % (network), params=_params)
def create_network(self, body=None):
"""Creates a new network."""
return self.post(self.networks_path, body=body)
def update_network(self, network, body=None, revision_number=None):
"""Updates a network."""
return self._update_resource(self.network_path % (network), body=body,
revision_number=revision_number)
def delete_network(self, network):
"""Deletes the specified network."""
return self.delete(self.network_path % (network))
def list_subnets(self, retrieve_all=True, **_params):
"""Fetches a list of all subnets for a project."""
return self.list('subnets', self.subnets_path, retrieve_all,
**_params)
def show_subnet(self, subnet, **_params):
"""Fetches information of a certain subnet."""
return self.get(self.subnet_path % (subnet), params=_params)
def create_subnet(self, body=None):
"""Creates a new subnet."""
return self.post(self.subnets_path, body=body)
def update_subnet(self, subnet, body=None, revision_number=None):
"""Updates a subnet."""
return self._update_resource(self.subnet_path % (subnet), body=body,
revision_number=revision_number)
def delete_subnet(self, subnet):
"""Deletes the specified subnet."""
return self.delete(self.subnet_path % (subnet))
def list_subnetpools(self, retrieve_all=True, **_params):
"""Fetches a list of all subnetpools for a project."""
return self.list('subnetpools', self.subnetpools_path, retrieve_all,
**_params)
def show_subnetpool(self, subnetpool, **_params):
"""Fetches information of a certain subnetpool."""
return self.get(self.subnetpool_path % (subnetpool), params=_params)
def create_subnetpool(self, body=None):
"""Creates a new subnetpool."""
return self.post(self.subnetpools_path, body=body)
def update_subnetpool(self, subnetpool, body=None, revision_number=None):
"""Updates a subnetpool."""
return self._update_resource(self.subnetpool_path % (subnetpool),
body=body,
revision_number=revision_number)
def delete_subnetpool(self, subnetpool):
"""Deletes the specified subnetpool."""
return self.delete(self.subnetpool_path % (subnetpool))
def list_routers(self, retrieve_all=True, **_params):
"""Fetches a list of all routers for a project."""
# Pass filters in "params" argument to do_request
return self.list('routers', self.routers_path, retrieve_all,
**_params)
def show_router(self, router, **_params):
"""Fetches information of a certain router."""
return self.get(self.router_path % (router), params=_params)
def create_router(self, body=None):
"""Creates a new router."""
return self.post(self.routers_path, body=body)
def update_router(self, router, body=None, revision_number=None):
"""Updates a router."""
return self._update_resource(self.router_path % (router), body=body,
revision_number=revision_number)
def delete_router(self, router):
"""Deletes the specified router."""
return self.delete(self.router_path % (router))
def list_address_scopes(self, retrieve_all=True, **_params):
"""Fetches a list of all address scopes for a project."""
return self.list('address_scopes', self.address_scopes_path,
retrieve_all, **_params)
def show_address_scope(self, address_scope, **_params):
"""Fetches information of a certain address scope."""
return self.get(self.address_scope_path % (address_scope),
params=_params)
def create_address_scope(self, body=None):
"""Creates a new address scope."""
return self.post(self.address_scopes_path, body=body)
def update_address_scope(self, address_scope, body=None):
"""Updates a address scope."""
return self.put(self.address_scope_path % (address_scope), body=body)
def delete_address_scope(self, address_scope):
"""Deletes the specified address scope."""
return self.delete(self.address_scope_path % (address_scope))
def add_interface_router(self, router, body=None):
"""Adds an internal network interface to the specified router."""
return self.put((self.router_path % router) + "/add_router_interface",
body=body)
def remove_interface_router(self, router, body=None):
"""Removes an internal network interface from the specified router."""
return self.put((self.router_path % router) +
"/remove_router_interface", body=body)
def add_extra_routes_to_router(self, router, body=None):
"""Adds extra routes to the specified router."""
return self.put((self.router_path % router) + "/add_extraroutes",
body=body)
def remove_extra_routes_from_router(self, router, body=None):
"""Removes extra routes from the specified router."""
return self.put((self.router_path % router) + "/remove_extraroutes",
body=body)
def add_gateway_router(self, router, body=None):
"""Adds an external network gateway to the specified router."""
return self.put((self.router_path % router),
body={'router': {'external_gateway_info': body}})
def remove_gateway_router(self, router):
"""Removes an external network gateway from the specified router."""
return self.put((self.router_path % router),
body={'router': {'external_gateway_info': {}}})
def list_floatingips(self, retrieve_all=True, **_params):
"""Fetches a list of all floatingips for a project."""
# Pass filters in "params" argument to do_request
return self.list('floatingips', self.floatingips_path, retrieve_all,
**_params)
def show_floatingip(self, floatingip, **_params):
"""Fetches information of a certain floatingip."""
return self.get(self.floatingip_path % (floatingip), params=_params)
def create_floatingip(self, body=None):
"""Creates a new floatingip."""
return self.post(self.floatingips_path, body=body)
def update_floatingip(self, floatingip, body=None, revision_number=None):
"""Updates a floatingip."""
return self._update_resource(self.floatingip_path % (floatingip),
body=body,
revision_number=revision_number)
def delete_floatingip(self, floatingip):
"""Deletes the specified floatingip."""
return self.delete(self.floatingip_path % (floatingip))
def create_security_group(self, body=None):
"""Creates a new security group."""
return self.post(self.security_groups_path, body=body)
def update_security_group(self, security_group, body=None,
revision_number=None):
"""Updates a security group."""
return self._update_resource(self.security_group_path %
security_group, body=body,
revision_number=revision_number)
def list_security_groups(self, retrieve_all=True, **_params):
"""Fetches a list of all security groups for a project."""
return self.list('security_groups', self.security_groups_path,
retrieve_all, **_params)
def show_security_group(self, security_group, **_params):
"""Fetches information of a certain security group."""
return self.get(self.security_group_path % (security_group),
params=_params)
def delete_security_group(self, security_group):
"""Deletes the specified security group."""
return self.delete(self.security_group_path % (security_group))
def create_security_group_rule(self, body=None):
"""Creates a new security group rule."""
return self.post(self.security_group_rules_path, body=body)
def delete_security_group_rule(self, security_group_rule):
"""Deletes the specified security group rule."""
return self.delete(self.security_group_rule_path %
(security_group_rule))
def list_security_group_rules(self, retrieve_all=True, **_params):
"""Fetches a list of all security group rules for a project."""
return self.list('security_group_rules',
self.security_group_rules_path,
retrieve_all, **_params)
def show_security_group_rule(self, security_group_rule, **_params):
"""Fetches information of a certain security group rule."""
return self.get(self.security_group_rule_path % (security_group_rule),
params=_params)
def create_segment(self, body=None):
"""Creates a new segment."""
return self.post(self.segments_path, body=body)
def update_segment(self, segment, body=None, revision_number=None):
"""Updates a segment."""
return self._update_resource(self.segment_path % segment, body=body,
revision_number=revision_number)
def list_segments(self, retrieve_all=True, **_params):
"""Fetches a list of all segments for a project."""
return self.list('segments', self.segments_path, retrieve_all,
**_params)
def show_segment(self, segment, **_params):
"""Fetches information of a certain segment."""
return self.get(self.segment_path % segment, params=_params)
def delete_segment(self, segment):
"""Deletes the specified segment."""
return self.delete(self.segment_path % segment)
def list_endpoint_groups(self, retrieve_all=True, **_params):
"""Fetches a list of all VPN endpoint groups for a project."""
return self.list('endpoint_groups', self.endpoint_groups_path,
retrieve_all, **_params)
def show_endpoint_group(self, endpointgroup, **_params):
"""Fetches information for a specific VPN endpoint group."""
return self.get(self.endpoint_group_path % endpointgroup,
params=_params)
def create_endpoint_group(self, body=None):
"""Creates a new VPN endpoint group."""
return self.post(self.endpoint_groups_path, body=body)
def update_endpoint_group(self, endpoint_group, body=None):
"""Updates a VPN endpoint group."""
return self.put(self.endpoint_group_path % endpoint_group, body=body)
def delete_endpoint_group(self, endpoint_group):
"""Deletes the specified VPN endpoint group."""
return self.delete(self.endpoint_group_path % endpoint_group)
def list_vpnservices(self, retrieve_all=True, **_params):
"""Fetches a list of all configured VPN services for a project."""
return self.list('vpnservices', self.vpnservices_path, retrieve_all,
**_params)
def show_vpnservice(self, vpnservice, **_params):
"""Fetches information of a specific VPN service."""
return self.get(self.vpnservice_path % (vpnservice), params=_params)
def create_vpnservice(self, body=None):
"""Creates a new VPN service."""
return self.post(self.vpnservices_path, body=body)
def update_vpnservice(self, vpnservice, body=None):
"""Updates a VPN service."""
return self.put(self.vpnservice_path % (vpnservice), body=body)
def delete_vpnservice(self, vpnservice):
"""Deletes the specified VPN service."""
return self.delete(self.vpnservice_path % (vpnservice))
def list_ipsec_site_connections(self, retrieve_all=True, **_params):
"""Fetches all configured IPsecSiteConnections for a project."""
return self.list('ipsec_site_connections',
self.ipsec_site_connections_path,
retrieve_all,
**_params)
def show_ipsec_site_connection(self, ipsecsite_conn, **_params):
"""Fetches information of a specific IPsecSiteConnection."""
return self.get(
self.ipsec_site_connection_path % (ipsecsite_conn), params=_params
)
def create_ipsec_site_connection(self, body=None):
"""Creates a new IPsecSiteConnection."""
return self.post(self.ipsec_site_connections_path, body=body)
def update_ipsec_site_connection(self, ipsecsite_conn, body=None):
"""Updates an IPsecSiteConnection."""
return self.put(
self.ipsec_site_connection_path % (ipsecsite_conn), body=body
)
def delete_ipsec_site_connection(self, ipsecsite_conn):
"""Deletes the specified IPsecSiteConnection."""
return self.delete(self.ipsec_site_connection_path % (ipsecsite_conn))
def list_ikepolicies(self, retrieve_all=True, **_params):
"""Fetches a list of all configured IKEPolicies for a project."""
return self.list('ikepolicies', self.ikepolicies_path, retrieve_all,
**_params)
def show_ikepolicy(self, ikepolicy, **_params):
"""Fetches information of a specific IKEPolicy."""
return self.get(self.ikepolicy_path % (ikepolicy), params=_params)
def create_ikepolicy(self, body=None):
"""Creates a new IKEPolicy."""
return self.post(self.ikepolicies_path, body=body)
def update_ikepolicy(self, ikepolicy, body=None):
"""Updates an IKEPolicy."""
return self.put(self.ikepolicy_path % (ikepolicy), body=body)
def delete_ikepolicy(self, ikepolicy):
"""Deletes the specified IKEPolicy."""
return self.delete(self.ikepolicy_path % (ikepolicy))
def list_ipsecpolicies(self, retrieve_all=True, **_params):
"""Fetches a list of all configured IPsecPolicies for a project."""
return self.list('ipsecpolicies',
self.ipsecpolicies_path,
retrieve_all,
**_params)
def show_ipsecpolicy(self, ipsecpolicy, **_params):
"""Fetches information of a specific IPsecPolicy."""
return self.get(self.ipsecpolicy_path % (ipsecpolicy), params=_params)
def create_ipsecpolicy(self, body=None):
"""Creates a new IPsecPolicy."""
return self.post(self.ipsecpolicies_path, body=body)
def update_ipsecpolicy(self, ipsecpolicy, body=None):
"""Updates an IPsecPolicy."""
return self.put(self.ipsecpolicy_path % (ipsecpolicy), body=body)
def delete_ipsecpolicy(self, ipsecpolicy):
"""Deletes the specified IPsecPolicy."""
return self.delete(self.ipsecpolicy_path % (ipsecpolicy))
def list_loadbalancers(self, retrieve_all=True, **_params):
"""Fetches a list of all loadbalancers for a project."""
return self.list('loadbalancers', self.lbaas_loadbalancers_path,
retrieve_all, **_params)
def show_loadbalancer(self, lbaas_loadbalancer, **_params):
"""Fetches information for a load balancer."""
return self.get(self.lbaas_loadbalancer_path % (lbaas_loadbalancer),
params=_params)
def create_loadbalancer(self, body=None):
"""Creates a new load balancer."""
return self.post(self.lbaas_loadbalancers_path, body=body)
def update_loadbalancer(self, lbaas_loadbalancer, body=None):
"""Updates a load balancer."""
return self.put(self.lbaas_loadbalancer_path % (lbaas_loadbalancer),
body=body)
def delete_loadbalancer(self, lbaas_loadbalancer):
"""Deletes the specified load balancer."""
return self.delete(self.lbaas_loadbalancer_path %
(lbaas_loadbalancer))
def retrieve_loadbalancer_stats(self, loadbalancer, **_params):
"""Retrieves stats for a certain load balancer."""
return self.get(self.lbaas_loadbalancer_path_stats % (loadbalancer),
params=_params)
def retrieve_loadbalancer_status(self, loadbalancer, **_params):
"""Retrieves status for a certain load balancer."""
return self.get(self.lbaas_loadbalancer_path_status % (loadbalancer),
params=_params)
def list_listeners(self, retrieve_all=True, **_params):
"""Fetches a list of all lbaas_listeners for a project."""
return self.list('listeners', self.lbaas_listeners_path,
retrieve_all, **_params)
def show_listener(self, lbaas_listener, **_params):
"""Fetches information for a lbaas_listener."""
return self.get(self.lbaas_listener_path % (lbaas_listener),
params=_params)
def create_listener(self, body=None):
"""Creates a new lbaas_listener."""
return self.post(self.lbaas_listeners_path, body=body)
def update_listener(self, lbaas_listener, body=None):
"""Updates a lbaas_listener."""
return self.put(self.lbaas_listener_path % (lbaas_listener),
body=body)
def delete_listener(self, lbaas_listener):
"""Deletes the specified lbaas_listener."""
return self.delete(self.lbaas_listener_path % (lbaas_listener))
def list_lbaas_l7policies(self, retrieve_all=True, **_params):
"""Fetches a list of all L7 policies for a listener."""
return self.list('l7policies', self.lbaas_l7policies_path,
retrieve_all, **_params)
def show_lbaas_l7policy(self, l7policy, **_params):
"""Fetches information of a certain listener's L7 policy."""
return self.get(self.lbaas_l7policy_path % l7policy,
params=_params)
def create_lbaas_l7policy(self, body=None):
"""Creates L7 policy for a certain listener."""
return self.post(self.lbaas_l7policies_path, body=body)
def update_lbaas_l7policy(self, l7policy, body=None):
"""Updates L7 policy."""
return self.put(self.lbaas_l7policy_path % l7policy,
body=body)
def delete_lbaas_l7policy(self, l7policy):
"""Deletes the specified L7 policy."""
return self.delete(self.lbaas_l7policy_path % l7policy)
def list_lbaas_l7rules(self, l7policy, retrieve_all=True, **_params):
"""Fetches a list of all rules for L7 policy."""
return self.list('rules', self.lbaas_l7rules_path % l7policy,
retrieve_all, **_params)
def show_lbaas_l7rule(self, l7rule, l7policy, **_params):
"""Fetches information of a certain L7 policy's rule."""
return self.get(self.lbaas_l7rule_path % (l7policy, l7rule),
params=_params)
def create_lbaas_l7rule(self, l7policy, body=None):
"""Creates rule for a certain L7 policy."""
return self.post(self.lbaas_l7rules_path % l7policy, body=body)
def update_lbaas_l7rule(self, l7rule, l7policy, body=None):
"""Updates L7 rule."""
return self.put(self.lbaas_l7rule_path % (l7policy, l7rule),
body=body)
def delete_lbaas_l7rule(self, l7rule, l7policy):
"""Deletes the specified L7 rule."""
return self.delete(self.lbaas_l7rule_path % (l7policy, l7rule))
def list_lbaas_pools(self, retrieve_all=True, **_params):
"""Fetches a list of all lbaas_pools for a project."""
return self.list('pools', self.lbaas_pools_path,
retrieve_all, **_params)
def show_lbaas_pool(self, lbaas_pool, **_params):
"""Fetches information for a lbaas_pool."""
return self.get(self.lbaas_pool_path % (lbaas_pool),
params=_params)
def create_lbaas_pool(self, body=None):
"""Creates a new lbaas_pool."""
return self.post(self.lbaas_pools_path, body=body)
def update_lbaas_pool(self, lbaas_pool, body=None):
"""Updates a lbaas_pool."""
return self.put(self.lbaas_pool_path % (lbaas_pool),
body=body)
def delete_lbaas_pool(self, lbaas_pool):
"""Deletes the specified lbaas_pool."""
return self.delete(self.lbaas_pool_path % (lbaas_pool))
def list_lbaas_healthmonitors(self, retrieve_all=True, **_params):
"""Fetches a list of all lbaas_healthmonitors for a project."""
return self.list('healthmonitors', self.lbaas_healthmonitors_path,
retrieve_all, **_params)
def show_lbaas_healthmonitor(self, lbaas_healthmonitor, **_params):
"""Fetches information for a lbaas_healthmonitor."""
return self.get(self.lbaas_healthmonitor_path % (lbaas_healthmonitor),
params=_params)
def create_lbaas_healthmonitor(self, body=None):
"""Creates a new lbaas_healthmonitor."""
return self.post(self.lbaas_healthmonitors_path, body=body)
def update_lbaas_healthmonitor(self, lbaas_healthmonitor, body=None):
"""Updates a lbaas_healthmonitor."""
return self.put(self.lbaas_healthmonitor_path % (lbaas_healthmonitor),
body=body)
def delete_lbaas_healthmonitor(self, lbaas_healthmonitor):
"""Deletes the specified lbaas_healthmonitor."""
return self.delete(self.lbaas_healthmonitor_path %
(lbaas_healthmonitor))
def list_lbaas_loadbalancers(self, retrieve_all=True, **_params):
"""Fetches a list of all lbaas_loadbalancers for a project."""
return self.list('loadbalancers', self.lbaas_loadbalancers_path,
retrieve_all, **_params)
def list_lbaas_members(self, lbaas_pool, retrieve_all=True, **_params):
"""Fetches a list of all lbaas_members for a project."""
return self.list('members', self.lbaas_members_path % lbaas_pool,
retrieve_all, **_params)
def show_lbaas_member(self, lbaas_member, lbaas_pool, **_params):
"""Fetches information of a certain lbaas_member."""
return self.get(self.lbaas_member_path % (lbaas_pool, lbaas_member),
params=_params)
def create_lbaas_member(self, lbaas_pool, body=None):
"""Creates a lbaas_member."""
return self.post(self.lbaas_members_path % lbaas_pool, body=body)
def update_lbaas_member(self, lbaas_member, lbaas_pool, body=None):
"""Updates a lbaas_member."""
return self.put(self.lbaas_member_path % (lbaas_pool, lbaas_member),
body=body)
def delete_lbaas_member(self, lbaas_member, lbaas_pool):
"""Deletes the specified lbaas_member."""
return self.delete(self.lbaas_member_path % (lbaas_pool, lbaas_member))
def list_vips(self, retrieve_all=True, **_params):
"""Fetches a list of all load balancer vips for a project."""
# Pass filters in "params" argument to do_request
return self.list('vips', self.vips_path, retrieve_all,
**_params)
def show_vip(self, vip, **_params):
"""Fetches information of a certain load balancer vip."""
return self.get(self.vip_path % (vip), params=_params)
def create_vip(self, body=None):
"""Creates a new load balancer vip."""
return self.post(self.vips_path, body=body)
def update_vip(self, vip, body=None):
"""Updates a load balancer vip."""
return self.put(self.vip_path % (vip), body=body)
def delete_vip(self, vip):
"""Deletes the specified load balancer vip."""
return self.delete(self.vip_path % (vip))
def list_pools(self, retrieve_all=True, **_params):
"""Fetches a list of all load balancer pools for a project."""
# Pass filters in "params" argument to do_request
return self.list('pools', self.pools_path, retrieve_all,
**_params)
def show_pool(self, pool, **_params):
"""Fetches information of a certain load balancer pool."""
return self.get(self.pool_path % (pool), params=_params)
def create_pool(self, body=None):
"""Creates a new load balancer pool."""
return self.post(self.pools_path, body=body)
def update_pool(self, pool, body=None):
"""Updates a load balancer pool."""
return self.put(self.pool_path % (pool), body=body)
def delete_pool(self, pool):
"""Deletes the specified load balancer pool."""
return self.delete(self.pool_path % (pool))
def retrieve_pool_stats(self, pool, **_params):
"""Retrieves stats for a certain load balancer pool."""
return self.get(self.pool_path_stats % (pool), params=_params)
def list_members(self, retrieve_all=True, **_params):
"""Fetches a list of all load balancer members for a project."""
# Pass filters in "params" argument to do_request
return self.list('members', self.members_path, retrieve_all,
**_params)
def show_member(self, member, **_params):
"""Fetches information of a certain load balancer member."""
return self.get(self.member_path % (member), params=_params)
def create_member(self, body=None):
"""Creates a new load balancer member."""
return self.post(self.members_path, body=body)
def update_member(self, member, body=None):
"""Updates a load balancer member."""
return self.put(self.member_path % (member), body=body)
def delete_member(self, member):
"""Deletes the specified load balancer member."""
return self.delete(self.member_path % (member))
def list_health_monitors(self, retrieve_all=True, **_params):
"""Fetches a list of all load balancer health monitors for a project.
"""
# Pass filters in "params" argument to do_request
return self.list('health_monitors', self.health_monitors_path,
retrieve_all, **_params)
def show_health_monitor(self, health_monitor, **_params):
"""Fetches information of a certain load balancer health monitor."""
return self.get(self.health_monitor_path % (health_monitor),
params=_params)
def create_health_monitor(self, body=None):
"""Creates a new load balancer health monitor."""
return self.post(self.health_monitors_path, body=body)
def update_health_monitor(self, health_monitor, body=None):
"""Updates a load balancer health monitor."""
return self.put(self.health_monitor_path % (health_monitor), body=body)
def delete_health_monitor(self, health_monitor):
"""Deletes the specified load balancer health monitor."""
return self.delete(self.health_monitor_path % (health_monitor))
def associate_health_monitor(self, pool, body):
"""Associate specified load balancer health monitor and pool."""
return self.post(self.associate_pool_health_monitors_path % (pool),
body=body)
def disassociate_health_monitor(self, pool, health_monitor):
"""Disassociate specified load balancer health monitor and pool."""
path = (self.disassociate_pool_health_monitors_path %
{'pool': pool, 'health_monitor': health_monitor})
return self.delete(path)
def create_qos_queue(self, body=None):
"""Creates a new queue."""
return self.post(self.qos_queues_path, body=body)
def list_qos_queues(self, **_params):
"""Fetches a list of all queues for a project."""
return self.get(self.qos_queues_path, params=_params)
def show_qos_queue(self, queue, **_params):
"""Fetches information of a certain queue."""
return self.get(self.qos_queue_path % (queue),
params=_params)
def delete_qos_queue(self, queue):
"""Deletes the specified queue."""
return self.delete(self.qos_queue_path % (queue))
def list_agents(self, **_params):
"""Fetches agents."""
# Pass filters in "params" argument to do_request
return self.get(self.agents_path, params=_params)
def show_agent(self, agent, **_params):
"""Fetches information of a certain agent."""
return self.get(self.agent_path % (agent), params=_params)
def update_agent(self, agent, body=None):
"""Updates an agent."""
return self.put(self.agent_path % (agent), body=body)
def delete_agent(self, agent):
"""Deletes the specified agent."""
return self.delete(self.agent_path % (agent))
def list_network_gateways(self, **_params):
"""Retrieve network gateways."""
return self.get(self.network_gateways_path, params=_params)
def show_network_gateway(self, gateway_id, **_params):
"""Fetch a network gateway."""
return self.get(self.network_gateway_path % gateway_id, params=_params)
def create_network_gateway(self, body=None):
"""Create a new network gateway."""
return self.post(self.network_gateways_path, body=body)
def update_network_gateway(self, gateway_id, body=None):
"""Update a network gateway."""
return self.put(self.network_gateway_path % gateway_id, body=body)
def delete_network_gateway(self, gateway_id):
"""Delete the specified network gateway."""
return self.delete(self.network_gateway_path % gateway_id)
def connect_network_gateway(self, gateway_id, body=None):
"""Connect a network gateway to the specified network."""
base_uri = self.network_gateway_path % gateway_id
return self.put("%s/connect_network" % base_uri, body=body)
def disconnect_network_gateway(self, gateway_id, body=None):
"""Disconnect a network from the specified gateway."""
base_uri = self.network_gateway_path % gateway_id
return self.put("%s/disconnect_network" % base_uri, body=body)
def list_gateway_devices(self, **_params):
"""Retrieve gateway devices."""
return self.get(self.gateway_devices_path, params=_params)
def show_gateway_device(self, gateway_device_id, **_params):
"""Fetch a gateway device."""
return self.get(self.gateway_device_path % gateway_device_id,
params=_params)
def create_gateway_device(self, body=None):
"""Create a new gateway device."""
return self.post(self.gateway_devices_path, body=body)
def update_gateway_device(self, gateway_device_id, body=None):
"""Updates a new gateway device."""
return self.put(self.gateway_device_path % gateway_device_id,
body=body)
def delete_gateway_device(self, gateway_device_id):
"""Delete the specified gateway device."""
return self.delete(self.gateway_device_path % gateway_device_id)
def list_dhcp_agent_hosting_networks(self, network, **_params):
"""Fetches a list of dhcp agents hosting a network."""
return self.get((self.network_path + self.DHCP_AGENTS) % network,
params=_params)
def list_networks_on_dhcp_agent(self, dhcp_agent, **_params):
"""Fetches a list of networks hosted on a DHCP agent."""
return self.get((self.agent_path + self.DHCP_NETS) % dhcp_agent,
params=_params)
def add_network_to_dhcp_agent(self, dhcp_agent, body=None):
"""Adds a network to dhcp agent."""
return self.post((self.agent_path + self.DHCP_NETS) % dhcp_agent,
body=body)
def remove_network_from_dhcp_agent(self, dhcp_agent, network_id):
"""Remove a network from dhcp agent."""
return self.delete((self.agent_path + self.DHCP_NETS + "/%s") % (
dhcp_agent, network_id))
def list_l3_agent_hosting_routers(self, router, **_params):
"""Fetches a list of L3 agents hosting a router."""
return self.get((self.router_path + self.L3_AGENTS) % router,
params=_params)
def list_routers_on_l3_agent(self, l3_agent, **_params):
"""Fetches a list of routers hosted on an L3 agent."""
return self.get((self.agent_path + self.L3_ROUTERS) % l3_agent,
params=_params)
def add_router_to_l3_agent(self, l3_agent, body):
"""Adds a router to L3 agent."""
return self.post((self.agent_path + self.L3_ROUTERS) % l3_agent,
body=body)
def list_dragents_hosting_bgp_speaker(self, bgp_speaker, **_params):
"""Fetches a list of Dynamic Routing agents hosting a BGP speaker."""
return self.get((self.bgp_speaker_path + self.BGP_DRAGENTS)
% bgp_speaker, params=_params)
def add_bgp_speaker_to_dragent(self, bgp_dragent, body):
"""Adds a BGP speaker to Dynamic Routing agent."""
return self.post((self.agent_path + self.BGP_DRINSTANCES)
% bgp_dragent, body=body)
def remove_bgp_speaker_from_dragent(self, bgp_dragent, bgpspeaker_id):
"""Removes a BGP speaker from Dynamic Routing agent."""
return self.delete((self.agent_path + self.BGP_DRINSTANCES + "/%s")
% (bgp_dragent, bgpspeaker_id))
def list_bgp_speaker_on_dragent(self, bgp_dragent, **_params):
"""Fetches a list of BGP speakers hosted by Dynamic Routing agent."""
return self.get((self.agent_path + self.BGP_DRINSTANCES)
% bgp_dragent, params=_params)
def list_firewall_rules(self, retrieve_all=True, **_params):
"""Fetches a list of all firewall rules for a project."""
# Pass filters in "params" argument to do_request
return self.list('firewall_rules', self.firewall_rules_path,
retrieve_all, **_params)
def show_firewall_rule(self, firewall_rule, **_params):
"""Fetches information of a certain firewall rule."""
return self.get(self.firewall_rule_path % (firewall_rule),
params=_params)
def create_firewall_rule(self, body=None):
"""Creates a new firewall rule."""
return self.post(self.firewall_rules_path, body=body)
def update_firewall_rule(self, firewall_rule, body=None):
"""Updates a firewall rule."""
return self.put(self.firewall_rule_path % (firewall_rule), body=body)
def delete_firewall_rule(self, firewall_rule):
"""Deletes the specified firewall rule."""
return self.delete(self.firewall_rule_path % (firewall_rule))
def list_firewall_policies(self, retrieve_all=True, **_params):
"""Fetches a list of all firewall policies for a project."""
# Pass filters in "params" argument to do_request
return self.list('firewall_policies', self.firewall_policies_path,
retrieve_all, **_params)
def show_firewall_policy(self, firewall_policy, **_params):
"""Fetches information of a certain firewall policy."""
return self.get(self.firewall_policy_path % (firewall_policy),
params=_params)
def create_firewall_policy(self, body=None):
"""Creates a new firewall policy."""
return self.post(self.firewall_policies_path, body=body)
def update_firewall_policy(self, firewall_policy, body=None):
"""Updates a firewall policy."""
return self.put(self.firewall_policy_path % (firewall_policy),
body=body)
def delete_firewall_policy(self, firewall_policy):
"""Deletes the specified firewall policy."""
return self.delete(self.firewall_policy_path % (firewall_policy))
def firewall_policy_insert_rule(self, firewall_policy, body=None):
"""Inserts specified rule into firewall policy."""
return self.put(self.firewall_policy_insert_path % (firewall_policy),
body=body)
def firewall_policy_remove_rule(self, firewall_policy, body=None):
"""Removes specified rule from firewall policy."""
return self.put(self.firewall_policy_remove_path % (firewall_policy),
body=body)
def list_firewalls(self, retrieve_all=True, **_params):
"""Fetches a list of all firewalls for a project."""
# Pass filters in "params" argument to do_request
return self.list('firewalls', self.firewalls_path, retrieve_all,
**_params)
def show_firewall(self, firewall, **_params):
"""Fetches information of a certain firewall."""
return self.get(self.firewall_path % (firewall), params=_params)
def create_firewall(self, body=None):
"""Creates a new firewall."""
return self.post(self.firewalls_path, body=body)
def update_firewall(self, firewall, body=None):
"""Updates a firewall."""
return self.put(self.firewall_path % (firewall), body=body)
def delete_firewall(self, firewall):
"""Deletes the specified firewall."""
return self.delete(self.firewall_path % (firewall))
def list_fwaas_firewall_groups(self, retrieve_all=True, **_params):
"""Fetches a list of all firewall groups for a project"""
return self.list('firewall_groups', self.fwaas_firewall_groups_path,
retrieve_all, **_params)
def show_fwaas_firewall_group(self, fwg, **_params):
"""Fetches information of a certain firewall group"""
return self.get(self.fwaas_firewall_group_path % (fwg), params=_params)
def create_fwaas_firewall_group(self, body=None):
"""Creates a new firewall group"""
return self.post(self.fwaas_firewall_groups_path, body=body)
def update_fwaas_firewall_group(self, fwg, body=None):
"""Updates a firewall group"""
return self.put(self.fwaas_firewall_group_path % (fwg), body=body)
def delete_fwaas_firewall_group(self, fwg):
"""Deletes the specified firewall group"""
return self.delete(self.fwaas_firewall_group_path % (fwg))
def list_fwaas_firewall_rules(self, retrieve_all=True, **_params):
"""Fetches a list of all firewall rules for a project"""
# Pass filters in "params" argument to do_request
return self.list('firewall_rules', self.fwaas_firewall_rules_path,
retrieve_all, **_params)
def show_fwaas_firewall_rule(self, firewall_rule, **_params):
"""Fetches information of a certain firewall rule"""
return self.get(self.fwaas_firewall_rule_path % (firewall_rule),
params=_params)
def create_fwaas_firewall_rule(self, body=None):
"""Creates a new firewall rule"""
return self.post(self.fwaas_firewall_rules_path, body=body)
def update_fwaas_firewall_rule(self, firewall_rule, body=None):
"""Updates a firewall rule"""
return self.put(self.fwaas_firewall_rule_path % (firewall_rule),
body=body)
def delete_fwaas_firewall_rule(self, firewall_rule):
"""Deletes the specified firewall rule"""
return self.delete(self.fwaas_firewall_rule_path % (firewall_rule))
def list_fwaas_firewall_policies(self, retrieve_all=True, **_params):
"""Fetches a list of all firewall policies for a project"""
# Pass filters in "params" argument to do_request
return self.list('firewall_policies',
self.fwaas_firewall_policies_path,
retrieve_all, **_params)
def show_fwaas_firewall_policy(self, firewall_policy, **_params):
"""Fetches information of a certain firewall policy"""
return self.get(self.fwaas_firewall_policy_path % (firewall_policy),
params=_params)
def create_fwaas_firewall_policy(self, body=None):
"""Creates a new firewall policy"""
return self.post(self.fwaas_firewall_policies_path, body=body)
def update_fwaas_firewall_policy(self, firewall_policy, body=None):
"""Updates a firewall policy"""
return self.put(self.fwaas_firewall_policy_path % (firewall_policy),
body=body)
def delete_fwaas_firewall_policy(self, firewall_policy):
"""Deletes the specified firewall policy"""
return self.delete(self.fwaas_firewall_policy_path % (firewall_policy))
def insert_rule_fwaas_firewall_policy(self, firewall_policy, body=None):
"""Inserts specified rule into firewall policy"""
return self.put((self.fwaas_firewall_policy_insert_path %
(firewall_policy)), body=body)
def remove_rule_fwaas_firewall_policy(self, firewall_policy, body=None):
"""Removes specified rule from firewall policy"""
return self.put((self.fwaas_firewall_policy_remove_path %
(firewall_policy)), body=body)
def remove_router_from_l3_agent(self, l3_agent, router_id):
"""Remove a router from l3 agent."""
return self.delete((self.agent_path + self.L3_ROUTERS + "/%s") % (
l3_agent, router_id))
def get_lbaas_agent_hosting_pool(self, pool, **_params):
"""Fetches a loadbalancer agent hosting a pool."""
return self.get((self.pool_path + self.LOADBALANCER_AGENT) % pool,
params=_params)
def list_pools_on_lbaas_agent(self, lbaas_agent, **_params):
"""Fetches a list of pools hosted by the loadbalancer agent."""
return self.get((self.agent_path + self.LOADBALANCER_POOLS) %
lbaas_agent, params=_params)
def get_lbaas_agent_hosting_loadbalancer(self, loadbalancer, **_params):
"""Fetches a loadbalancer agent hosting a loadbalancer."""
return self.get((self.lbaas_loadbalancer_path +
self.LOADBALANCER_HOSTING_AGENT) % loadbalancer,
params=_params)
def list_loadbalancers_on_lbaas_agent(self, lbaas_agent, **_params):
"""Fetches a list of loadbalancers hosted by the loadbalancer agent."""
return self.get((self.agent_path + self.AGENT_LOADBALANCERS) %
lbaas_agent, params=_params)
def list_service_providers(self, retrieve_all=True, **_params):
"""Fetches service providers."""
# Pass filters in "params" argument to do_request
return self.list('service_providers', self.service_providers_path,
retrieve_all, **_params)
def create_metering_label(self, body=None):
"""Creates a metering label."""
return self.post(self.metering_labels_path, body=body)
def delete_metering_label(self, label):
"""Deletes the specified metering label."""
return self.delete(self.metering_label_path % (label))
def list_metering_labels(self, retrieve_all=True, **_params):
"""Fetches a list of all metering labels for a project."""
return self.list('metering_labels', self.metering_labels_path,
retrieve_all, **_params)
def show_metering_label(self, metering_label, **_params):
"""Fetches information of a certain metering label."""
return self.get(self.metering_label_path %
(metering_label), params=_params)
def create_metering_label_rule(self, body=None):
"""Creates a metering label rule."""
return self.post(self.metering_label_rules_path, body=body)
def delete_metering_label_rule(self, rule):
"""Deletes the specified metering label rule."""
return self.delete(self.metering_label_rule_path % (rule))
def list_metering_label_rules(self, retrieve_all=True, **_params):
"""Fetches a list of all metering label rules for a label."""
return self.list('metering_label_rules',
self.metering_label_rules_path, retrieve_all,
**_params)
def show_metering_label_rule(self, metering_label_rule, **_params):
"""Fetches information of a certain metering label rule."""
return self.get(self.metering_label_rule_path %
(metering_label_rule), params=_params)
def create_rbac_policy(self, body=None):
"""Create a new RBAC policy."""
return self.post(self.rbac_policies_path, body=body)
def update_rbac_policy(self, rbac_policy_id, body=None):
"""Update a RBAC policy."""
return self.put(self.rbac_policy_path % rbac_policy_id, body=body)
def list_rbac_policies(self, retrieve_all=True, **_params):
"""Fetch a list of all RBAC policies for a project."""
return self.list('rbac_policies', self.rbac_policies_path,
retrieve_all, **_params)
def show_rbac_policy(self, rbac_policy_id, **_params):
"""Fetch information of a certain RBAC policy."""
return self.get(self.rbac_policy_path % rbac_policy_id,
params=_params)
def delete_rbac_policy(self, rbac_policy_id):
"""Delete the specified RBAC policy."""
return self.delete(self.rbac_policy_path % rbac_policy_id)
def list_qos_policies(self, retrieve_all=True, **_params):
"""Fetches a list of all qos policies for a project."""
# Pass filters in "params" argument to do_request
return self.list('policies', self.qos_policies_path,
retrieve_all, **_params)
def show_qos_policy(self, qos_policy, **_params):
"""Fetches information of a certain qos policy."""
return self.get(self.qos_policy_path % qos_policy,
params=_params)
def create_qos_policy(self, body=None):
"""Creates a new qos policy."""
return self.post(self.qos_policies_path, body=body)
def update_qos_policy(self, qos_policy, body=None, revision_number=None):
"""Updates a qos policy."""
return self._update_resource(self.qos_policy_path % qos_policy,
body=body,
revision_number=revision_number)
def delete_qos_policy(self, qos_policy):
"""Deletes the specified qos policy."""
return self.delete(self.qos_policy_path % qos_policy)
def list_qos_rule_types(self, retrieve_all=True, **_params):
"""List available qos rule types."""
return self.list('rule_types', self.qos_rule_types_path,
retrieve_all, **_params)
def list_bandwidth_limit_rules(self, policy_id,
retrieve_all=True, **_params):
"""Fetches a list of all bandwidth limit rules for the given policy."""
return self.list('bandwidth_limit_rules',
self.qos_bandwidth_limit_rules_path % policy_id,
retrieve_all, **_params)
def show_bandwidth_limit_rule(self, rule, policy, **_params):
"""Fetches information of a certain bandwidth limit rule."""
return self.get(self.qos_bandwidth_limit_rule_path %
(policy, rule), params=_params)
def create_bandwidth_limit_rule(self, policy, body=None):
"""Creates a new bandwidth limit rule."""
return self.post(self.qos_bandwidth_limit_rules_path % policy,
body=body)
def update_bandwidth_limit_rule(self, rule, policy, body=None):
"""Updates a bandwidth limit rule."""
return self.put(self.qos_bandwidth_limit_rule_path %
(policy, rule), body=body)
def delete_bandwidth_limit_rule(self, rule, policy):
"""Deletes a bandwidth limit rule."""
return self.delete(self.qos_bandwidth_limit_rule_path %
(policy, rule))
def list_dscp_marking_rules(self, policy_id,
retrieve_all=True, **_params):
"""Fetches a list of all DSCP marking rules for the given policy."""
return self.list('dscp_marking_rules',
self.qos_dscp_marking_rules_path % policy_id,
retrieve_all, **_params)
def show_dscp_marking_rule(self, rule, policy, **_params):
"""Shows information of a certain DSCP marking rule."""
return self.get(self.qos_dscp_marking_rule_path %
(policy, rule), params=_params)
def create_dscp_marking_rule(self, policy, body=None):
"""Creates a new DSCP marking rule."""
return self.post(self.qos_dscp_marking_rules_path % policy,
body=body)
def update_dscp_marking_rule(self, rule, policy, body=None):
"""Updates a DSCP marking rule."""
return self.put(self.qos_dscp_marking_rule_path %
(policy, rule), body=body)
def delete_dscp_marking_rule(self, rule, policy):
"""Deletes a DSCP marking rule."""
return self.delete(self.qos_dscp_marking_rule_path %
(policy, rule))
def list_minimum_bandwidth_rules(self, policy_id, retrieve_all=True,
**_params):
"""Fetches a list of all minimum bandwidth rules for the given policy.
"""
return self.list('minimum_bandwidth_rules',
self.qos_minimum_bandwidth_rules_path %
policy_id, retrieve_all, **_params)
def show_minimum_bandwidth_rule(self, rule, policy, body=None):
"""Fetches information of a certain minimum bandwidth rule."""
return self.get(self.qos_minimum_bandwidth_rule_path %
(policy, rule), body=body)
def create_minimum_bandwidth_rule(self, policy, body=None):
"""Creates a new minimum bandwidth rule."""
return self.post(self.qos_minimum_bandwidth_rules_path % policy,
body=body)
def list_packet_rate_limit_rules(self, policy_id, retrieve_all=True,
**_params):
"""Fetches a list of all packet rate limit rules for the given policy
"""
return self.list('packet_rate_limit_rules',
self.qos_packet_rate_limit_rules_path %
policy_id, retrieve_all, **_params)
def show_packet_rate_limit_rule(self, rule, policy, body=None):
"""Fetches information of a certain packet rate limit rule."""
return self.get(self.qos_packet_rate_limit_rule_path %
(policy, rule), body=body)
def create_packet_rate_limit_rule(self, policy, body=None):
"""Creates a new packet rate limit rule."""
return self.post(self.qos_packet_rate_limit_rules_path % policy,
body=body)
def update_packet_rate_limit_rule(self, rule, policy, body=None):
"""Updates a packet rate limit rule."""
return self.put(self.qos_packet_rate_limit_rule_path %
(policy, rule), body=body)
def delete_packet_rate_limit_rule(self, rule, policy):
"""Deletes a packet rate limit rule."""
return self.delete(self.qos_packet_rate_limit_rule_path %
(policy, rule))
def update_minimum_bandwidth_rule(self, rule, policy, body=None):
"""Updates a minimum bandwidth rule."""
return self.put(self.qos_minimum_bandwidth_rule_path %
(policy, rule), body=body)
def delete_minimum_bandwidth_rule(self, rule, policy):
"""Deletes a minimum bandwidth rule."""
return self.delete(self.qos_minimum_bandwidth_rule_path %
(policy, rule))
def list_minimum_packet_rate_rules(self, policy_id, retrieve_all=True,
**_params):
"""Fetches a list of all minimum packet rate rules for the given policy
"""
return self.list('minimum_packet_rate_rules',
self.qos_minimum_packet_rate_rules_path %
policy_id, retrieve_all, **_params)
def show_minimum_packet_rate_rule(self, rule, policy, body=None):
"""Fetches information of a certain minimum packet rate rule."""
return self.get(self.qos_minimum_packet_rate_rule_path %
(policy, rule), body=body)
def create_minimum_packet_rate_rule(self, policy, body=None):
"""Creates a new minimum packet rate rule."""
return self.post(self.qos_minimum_packet_rate_rules_path % policy,
body=body)
def update_minimum_packet_rate_rule(self, rule, policy, body=None):
"""Updates a minimum packet rate rule."""
return self.put(self.qos_minimum_packet_rate_rule_path %
(policy, rule), body=body)
def delete_minimum_packet_rate_rule(self, rule, policy):
"""Deletes a minimum packet rate rule."""
return self.delete(self.qos_minimum_packet_rate_rule_path %
(policy, rule))
def create_flavor(self, body=None):
"""Creates a new Neutron service flavor."""
return self.post(self.flavors_path, body=body)
def delete_flavor(self, flavor):
"""Deletes the specified Neutron service flavor."""
return self.delete(self.flavor_path % (flavor))
def list_flavors(self, retrieve_all=True, **_params):
"""Fetches a list of all Neutron service flavors for a project."""
return self.list('flavors', self.flavors_path, retrieve_all,
**_params)
def show_flavor(self, flavor, **_params):
"""Fetches information for a certain Neutron service flavor."""
return self.get(self.flavor_path % (flavor), params=_params)
def update_flavor(self, flavor, body):
"""Update a Neutron service flavor."""
return self.put(self.flavor_path % (flavor), body=body)
def associate_flavor(self, flavor, body):
"""Associate a Neutron service flavor with a profile."""
return self.post(self.flavor_profile_bindings_path %
(flavor), body=body)
def disassociate_flavor(self, flavor, flavor_profile):
"""Disassociate a Neutron service flavor with a profile."""
return self.delete(self.flavor_profile_binding_path %
(flavor, flavor_profile))
def create_service_profile(self, body=None):
"""Creates a new Neutron service flavor profile."""
return self.post(self.service_profiles_path, body=body)
def delete_service_profile(self, flavor_profile):
"""Deletes the specified Neutron service flavor profile."""
return self.delete(self.service_profile_path % (flavor_profile))
def list_service_profiles(self, retrieve_all=True, **_params):
"""Fetches a list of all Neutron service flavor profiles."""
return self.list('service_profiles', self.service_profiles_path,
retrieve_all, **_params)
def show_service_profile(self, flavor_profile, **_params):
"""Fetches information for a certain Neutron service flavor profile."""
return self.get(self.service_profile_path % (flavor_profile),
params=_params)
def update_service_profile(self, service_profile, body):
"""Update a Neutron service profile."""
return self.put(self.service_profile_path % (service_profile),
body=body)
def list_availability_zones(self, retrieve_all=True, **_params):
"""Fetches a list of all availability zones."""
return self.list('availability_zones', self.availability_zones_path,
retrieve_all, **_params)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def get_auto_allocated_topology(self, project_id, **_params):
"""Fetch information about a project's auto-allocated topology."""
return self.get(
self.auto_allocated_topology_path % project_id,
params=_params)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def delete_auto_allocated_topology(self, project_id, **_params):
"""Delete a project's auto-allocated topology."""
return self.delete(
self.auto_allocated_topology_path % project_id,
params=_params)
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
def validate_auto_allocated_topology_requirements(self, project_id):
"""Validate requirements for getting an auto-allocated topology."""
return self.get_auto_allocated_topology(project_id, fields=['dry-run'])
def list_bgp_speakers(self, retrieve_all=True, **_params):
"""Fetches a list of all BGP speakers for a project."""
return self.list('bgp_speakers', self.bgp_speakers_path, retrieve_all,
**_params)
def show_bgp_speaker(self, bgp_speaker_id, **_params):
"""Fetches information of a certain BGP speaker."""
return self.get(self.bgp_speaker_path % (bgp_speaker_id),
params=_params)
def create_bgp_speaker(self, body=None):
"""Creates a new BGP speaker."""
return self.post(self.bgp_speakers_path, body=body)
def update_bgp_speaker(self, bgp_speaker_id, body=None):
"""Update a BGP speaker."""
return self.put(self.bgp_speaker_path % bgp_speaker_id, body=body)
def delete_bgp_speaker(self, speaker_id):
"""Deletes the specified BGP speaker."""
return self.delete(self.bgp_speaker_path % (speaker_id))
def add_peer_to_bgp_speaker(self, speaker_id, body=None):
"""Adds a peer to BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/add_bgp_peer", body=body)
def remove_peer_from_bgp_speaker(self, speaker_id, body=None):
"""Removes a peer from BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/remove_bgp_peer", body=body)
def add_network_to_bgp_speaker(self, speaker_id, body=None):
"""Adds a network to BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/add_gateway_network", body=body)
def remove_network_from_bgp_speaker(self, speaker_id, body=None):
"""Removes a network from BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/remove_gateway_network", body=body)
def list_route_advertised_from_bgp_speaker(self, speaker_id, **_params):
"""Fetches a list of all routes advertised by BGP speaker."""
return self.get((self.bgp_speaker_path % speaker_id) +
"/get_advertised_routes", params=_params)
def list_bgp_peers(self, **_params):
"""Fetches a list of all BGP peers."""
return self.get(self.bgp_peers_path, params=_params)
def show_bgp_peer(self, peer_id, **_params):
"""Fetches information of a certain BGP peer."""
return self.get(self.bgp_peer_path % peer_id,
params=_params)
def create_bgp_peer(self, body=None):
"""Create a new BGP peer."""
return self.post(self.bgp_peers_path, body=body)
def update_bgp_peer(self, bgp_peer_id, body=None):
"""Update a BGP peer."""
return self.put(self.bgp_peer_path % bgp_peer_id, body=body)
def delete_bgp_peer(self, peer_id):
"""Deletes the specified BGP peer."""
return self.delete(self.bgp_peer_path % peer_id)
def list_network_ip_availabilities(self, retrieve_all=True, **_params):
"""Fetches IP availability information for all networks"""
return self.list('network_ip_availabilities',
self.network_ip_availabilities_path,
retrieve_all, **_params)
def show_network_ip_availability(self, network, **_params):
"""Fetches IP availability information for a specified network"""
return self.get(self.network_ip_availability_path % (network),
params=_params)
def add_tag(self, resource_type, resource_id, tag, **_params):
"""Add a tag on the resource."""
return self.put(self.tag_path % (resource_type, resource_id, tag))
def replace_tag(self, resource_type, resource_id, body, **_params):
"""Replace tags on the resource."""
return self.put(self.tags_path % (resource_type, resource_id), body)
def remove_tag(self, resource_type, resource_id, tag, **_params):
"""Remove a tag on the resource."""
return self.delete(self.tag_path % (resource_type, resource_id, tag))
def remove_tag_all(self, resource_type, resource_id, **_params):
"""Remove all tags on the resource."""
return self.delete(self.tags_path % (resource_type, resource_id))
def create_trunk(self, body=None):
"""Create a trunk port."""
return self.post(self.trunks_path, body=body)
def update_trunk(self, trunk, body=None, revision_number=None):
"""Update a trunk port."""
return self._update_resource(self.trunk_path % trunk, body=body,
revision_number=revision_number)
def delete_trunk(self, trunk):
"""Delete a trunk port."""
return self.delete(self.trunk_path % (trunk))
def list_trunks(self, retrieve_all=True, **_params):
"""Fetch a list of all trunk ports."""
return self.list('trunks', self.trunks_path, retrieve_all,
**_params)
def show_trunk(self, trunk, **_params):
"""Fetch information for a certain trunk port."""
return self.get(self.trunk_path % (trunk), params=_params)
def trunk_add_subports(self, trunk, body=None):
"""Add specified subports to the trunk."""
return self.put(self.subports_add_path % (trunk), body=body)
def trunk_remove_subports(self, trunk, body=None):
"""Removes specified subports from the trunk."""
return self.put(self.subports_remove_path % (trunk), body=body)
def trunk_get_subports(self, trunk, **_params):
"""Fetch a list of all subports attached to given trunk."""
return self.get(self.subports_path % (trunk), params=_params)
def list_bgpvpns(self, retrieve_all=True, **_params):
"""Fetches a list of all BGP VPNs for a project"""
return self.list('bgpvpns', self.bgpvpns_path, retrieve_all, **_params)
def show_bgpvpn(self, bgpvpn, **_params):
"""Fetches information of a certain BGP VPN"""
return self.get(self.bgpvpn_path % bgpvpn, params=_params)
def create_bgpvpn(self, body=None):
"""Creates a new BGP VPN"""
return self.post(self.bgpvpns_path, body=body)
def update_bgpvpn(self, bgpvpn, body=None):
"""Updates a BGP VPN"""
return self.put(self.bgpvpn_path % bgpvpn, body=body)
def delete_bgpvpn(self, bgpvpn):
"""Deletes the specified BGP VPN"""
return self.delete(self.bgpvpn_path % bgpvpn)
def list_bgpvpn_network_assocs(self, bgpvpn, retrieve_all=True, **_params):
"""Fetches a list of network associations for a given BGP VPN."""
return self.list('network_associations',
self.bgpvpn_network_associations_path % bgpvpn,
retrieve_all, **_params)
def show_bgpvpn_network_assoc(self, bgpvpn, net_assoc, **_params):
"""Fetches information of a certain BGP VPN's network association"""
return self.get(
self.bgpvpn_network_association_path % (bgpvpn, net_assoc),
params=_params)
def create_bgpvpn_network_assoc(self, bgpvpn, body=None):
"""Creates a new BGP VPN network association"""
return self.post(self.bgpvpn_network_associations_path % bgpvpn,
body=body)
def update_bgpvpn_network_assoc(self, bgpvpn, net_assoc, body=None):
"""Updates a BGP VPN network association"""
return self.put(
self.bgpvpn_network_association_path % (bgpvpn, net_assoc),
body=body)
def delete_bgpvpn_network_assoc(self, bgpvpn, net_assoc):
"""Deletes the specified BGP VPN network association"""
return self.delete(
self.bgpvpn_network_association_path % (bgpvpn, net_assoc))
def list_bgpvpn_router_assocs(self, bgpvpn, retrieve_all=True, **_params):
"""Fetches a list of router associations for a given BGP VPN."""
return self.list('router_associations',
self.bgpvpn_router_associations_path % bgpvpn,
retrieve_all, **_params)
def show_bgpvpn_router_assoc(self, bgpvpn, router_assoc, **_params):
"""Fetches information of a certain BGP VPN's router association"""
return self.get(
self.bgpvpn_router_association_path % (bgpvpn, router_assoc),
params=_params)
def create_bgpvpn_router_assoc(self, bgpvpn, body=None):
"""Creates a new BGP VPN router association"""
return self.post(self.bgpvpn_router_associations_path % bgpvpn,
body=body)
def update_bgpvpn_router_assoc(self, bgpvpn, router_assoc, body=None):
"""Updates a BGP VPN router association"""
return self.put(
self.bgpvpn_router_association_path % (bgpvpn, router_assoc),
body=body)
def delete_bgpvpn_router_assoc(self, bgpvpn, router_assoc):
"""Deletes the specified BGP VPN router association"""
return self.delete(
self.bgpvpn_router_association_path % (bgpvpn, router_assoc))
def list_bgpvpn_port_assocs(self, bgpvpn, retrieve_all=True, **_params):
"""Fetches a list of port associations for a given BGP VPN."""
return self.list('port_associations',
self.bgpvpn_port_associations_path % bgpvpn,
retrieve_all, **_params)
def show_bgpvpn_port_assoc(self, bgpvpn, port_assoc, **_params):
"""Fetches information of a certain BGP VPN's port association"""
return self.get(
self.bgpvpn_port_association_path % (bgpvpn, port_assoc),
params=_params)
def create_bgpvpn_port_assoc(self, bgpvpn, body=None):
"""Creates a new BGP VPN port association"""
return self.post(self.bgpvpn_port_associations_path % bgpvpn,
body=body)
def update_bgpvpn_port_assoc(self, bgpvpn, port_assoc, body=None):
"""Updates a BGP VPN port association"""
return self.put(
self.bgpvpn_port_association_path % (bgpvpn, port_assoc),
body=body)
def delete_bgpvpn_port_assoc(self, bgpvpn, port_assoc):
"""Deletes the specified BGP VPN port association"""
return self.delete(
self.bgpvpn_port_association_path % (bgpvpn, port_assoc))
def create_sfc_port_pair(self, body=None):
"""Creates a new Port Pair."""
return self.post(self.sfc_port_pairs_path, body=body)
def update_sfc_port_pair(self, port_pair, body=None):
"""Update a Port Pair."""
return self.put(self.sfc_port_pair_path % port_pair, body=body)
def delete_sfc_port_pair(self, port_pair):
"""Deletes the specified Port Pair."""
return self.delete(self.sfc_port_pair_path % (port_pair))
def list_sfc_port_pairs(self, retrieve_all=True, **_params):
"""Fetches a list of all Port Pairs."""
return self.list('port_pairs', self.sfc_port_pairs_path, retrieve_all,
**_params)
def show_sfc_port_pair(self, port_pair, **_params):
"""Fetches information of a certain Port Pair."""
return self.get(self.sfc_port_pair_path % (port_pair), params=_params)
def create_sfc_port_pair_group(self, body=None):
"""Creates a new Port Pair Group."""
return self.post(self.sfc_port_pair_groups_path, body=body)
def update_sfc_port_pair_group(self, port_pair_group, body=None):
"""Update a Port Pair Group."""
return self.put(self.sfc_port_pair_group_path % port_pair_group,
body=body)
def delete_sfc_port_pair_group(self, port_pair_group):
"""Deletes the specified Port Pair Group."""
return self.delete(self.sfc_port_pair_group_path % (port_pair_group))
def list_sfc_port_pair_groups(self, retrieve_all=True, **_params):
"""Fetches a list of all Port Pair Groups."""
return self.list('port_pair_groups', self.sfc_port_pair_groups_path,
retrieve_all, **_params)
def show_sfc_port_pair_group(self, port_pair_group, **_params):
"""Fetches information of a certain Port Pair Group."""
return self.get(self.sfc_port_pair_group_path % (port_pair_group),
params=_params)
def create_sfc_port_chain(self, body=None):
"""Creates a new Port Chain."""
return self.post(self.sfc_port_chains_path, body=body)
def update_sfc_port_chain(self, port_chain, body=None):
"""Update a Port Chain."""
return self.put(self.sfc_port_chain_path % port_chain, body=body)
def delete_sfc_port_chain(self, port_chain):
"""Deletes the specified Port Chain."""
return self.delete(self.sfc_port_chain_path % (port_chain))
def list_sfc_port_chains(self, retrieve_all=True, **_params):
"""Fetches a list of all Port Chains."""
return self.list('port_chains', self.sfc_port_chains_path,
retrieve_all, **_params)
def show_sfc_port_chain(self, port_chain, **_params):
"""Fetches information of a certain Port Chain."""
return self.get(self.sfc_port_chain_path % (port_chain),
params=_params)
def create_sfc_flow_classifier(self, body=None):
"""Creates a new Flow Classifier."""
return self.post(self.sfc_flow_classifiers_path, body=body)
def update_sfc_flow_classifier(self, flow_classifier, body=None):
"""Update a Flow Classifier."""
return self.put(self.sfc_flow_classifier_path % flow_classifier,
body=body)
def delete_sfc_flow_classifier(self, flow_classifier):
"""Deletes the specified Flow Classifier."""
return self.delete(self.sfc_flow_classifier_path % (flow_classifier))
def list_sfc_flow_classifiers(self, retrieve_all=True, **_params):
"""Fetches a list of all Flow Classifiers."""
return self.list('flow_classifiers', self.sfc_flow_classifiers_path,
retrieve_all, **_params)
def show_sfc_flow_classifier(self, flow_classifier, **_params):
"""Fetches information of a certain Flow Classifier."""
return self.get(self.sfc_flow_classifier_path % (flow_classifier),
params=_params)
def create_sfc_service_graph(self, body=None):
"""Create the specified Service Graph."""
return self.post(self.sfc_service_graphs_path, body=body)
def update_sfc_service_graph(self, service_graph, body=None):
"""Update a Service Graph."""
return self.put(self.sfc_service_graph_path % service_graph,
body=body)
def delete_sfc_service_graph(self, service_graph):
"""Deletes the specified Service Graph."""
return self.delete(self.sfc_service_graph_path % service_graph)
def list_sfc_service_graphs(self, retrieve_all=True, **_params):
"""Fetches a list of all Service Graphs."""
return self.list('service_graphs', self.sfc_service_graphs_path,
retrieve_all, **_params)
def show_sfc_service_graph(self, service_graph, **_params):
"""Fetches information of a certain Service Graph."""
return self.get(self.sfc_service_graph_path % service_graph,
params=_params)
def create_network_log(self, body=None):
"""Create a network log."""
return self.post(self.network_logs_path, body=body)
def delete_network_log(self, net_log):
"""Delete a network log."""
return self.delete(self.network_log_path % net_log)
def list_network_logs(self, retrieve_all=True, **_params):
"""Fetch a list of all network logs."""
return self.list(
'logs', self.network_logs_path, retrieve_all, **_params)
def show_network_log(self, net_log, **_params):
"""Fetch information for a certain network log."""
return self.get(self.network_log_path % net_log, params=_params)
def update_network_log(self, net_log, body=None):
"""Update a network log."""
return self.put(self.network_log_path % net_log, body=body)
def list_network_loggable_resources(self, retrieve_all=True, **_params):
"""Fetch a list of supported resource types for network log."""
return self.list('loggable_resources', self.network_loggables_path,
retrieve_all, **_params)
def onboard_network_subnets(self, subnetpool, body=None):
"""Onboard the specified network's subnets into a subnet pool."""
return self.put(self.onboard_network_subnets_path % (subnetpool),
body=body)
def __init__(self, **kwargs):
"""Initialize a new client for the Neutron v2.0 API."""
super(Client, self).__init__(**kwargs)
self._register_extensions(self.version)
def _update_resource(self, path, **kwargs):
revision_number = kwargs.pop('revision_number', None)
if revision_number:
headers = kwargs.setdefault('headers', {})
headers['If-Match'] = 'revision_number=%s' % revision_number
return self.put(path, **kwargs)
def extend_show(self, resource_singular, path, parent_resource):
def _fx(obj, **_params):
return self.show_ext(path, obj, **_params)
def _parent_fx(obj, parent_id, **_params):
return self.show_ext(path % parent_id, obj, **_params)
fn = _fx if not parent_resource else _parent_fx
setattr(self, "show_%s" % resource_singular, fn)
def extend_list(self, resource_plural, path, parent_resource):
def _fx(retrieve_all=True, **_params):
return self.list_ext(resource_plural, path,
retrieve_all, **_params)
def _parent_fx(parent_id, retrieve_all=True, **_params):
return self.list_ext(resource_plural, path % parent_id,
retrieve_all, **_params)
fn = _fx if not parent_resource else _parent_fx
setattr(self, "list_%s" % resource_plural, fn)
def extend_create(self, resource_singular, path, parent_resource):
def _fx(body=None):
return self.create_ext(path, body)
def _parent_fx(parent_id, body=None):
return self.create_ext(path % parent_id, body)
fn = _fx if not parent_resource else _parent_fx
setattr(self, "create_%s" % resource_singular, fn)
def extend_delete(self, resource_singular, path, parent_resource):
def _fx(obj):
return self.delete_ext(path, obj)
def _parent_fx(obj, parent_id):
return self.delete_ext(path % parent_id, obj)
fn = _fx if not parent_resource else _parent_fx
setattr(self, "delete_%s" % resource_singular, fn)
def extend_update(self, resource_singular, path, parent_resource):
def _fx(obj, body=None):
return self.update_ext(path, obj, body)
def _parent_fx(obj, parent_id, body=None):
return self.update_ext(path % parent_id, obj, body)
fn = _fx if not parent_resource else _parent_fx
setattr(self, "update_%s" % resource_singular, fn)
def _extend_client_with_module(self, module, version):
classes = inspect.getmembers(module, inspect.isclass)
for cls_name, cls in classes:
if hasattr(cls, 'versions'):
if version not in cls.versions:
continue
parent_resource = getattr(cls, 'parent_resource', None)
if issubclass(cls, client_extension.ClientExtensionList):
self.extend_list(cls.resource_plural, cls.object_path,
parent_resource)
elif issubclass(cls, client_extension.ClientExtensionCreate):
self.extend_create(cls.resource, cls.object_path,
parent_resource)
elif issubclass(cls, client_extension.ClientExtensionUpdate):
self.extend_update(cls.resource, cls.resource_path,
parent_resource)
elif issubclass(cls, client_extension.ClientExtensionDelete):
self.extend_delete(cls.resource, cls.resource_path,
parent_resource)
elif issubclass(cls, client_extension.ClientExtensionShow):
self.extend_show(cls.resource, cls.resource_path,
parent_resource)
elif issubclass(cls, client_extension.NeutronClientExtension):
setattr(self, "%s_path" % cls.resource_plural,
cls.object_path)
setattr(self, "%s_path" % cls.resource, cls.resource_path)
self.EXTED_PLURALS.update({cls.resource_plural: cls.resource})
def _register_extensions(self, version):
for name, module in itertools.chain(
client_extension._discover_via_entry_points()):
self._extend_client_with_module(module, version)
| openstack/python-neutronclient | neutronclient/v2_0/client.py | Python | apache-2.0 | 115,185 |
"""
Copyright 2017 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def modify_kurfile(data):
for k in ('train', 'validate', 'test', 'evaluate'):
if k not in data:
continue
if 'weights' in data[k]:
del data[k]['weights']
if 'provider' not in data[k]:
data[k]['provider'] = {}
data[k]['provider']['num_batches'] = 1
data[k]['provider']['batch_size'] = 2
if 'train' in data:
if 'checkpoint' in data['train']:
del data['train']['checkpoint']
if 'stop_when' not in data['train']:
data['train']['stop_when'] = {}
data['train']['stop_when']['epochs'] = 2
if 'epochs' in data['train']:
del data['train']['epochs']
if 'log' in data['train']:
del data['train']['log']
if 'evaluate' in data:
if 'destination' in data['evaluate']:
del data['evaluate']['destination']
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
| deepgram/kur | tests/examples/modkurfile.py | Python | apache-2.0 | 1,380 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import textwrap
import pytest
from pants.backend.python.tasks.checkstyle.checker import PythonCheckStyleTask, PythonFile
from pants.backend.python.tasks.checkstyle.common import CheckstylePlugin
from pants.subsystem.subsystem import Subsystem
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
class RageSubsystem(Subsystem):
options_scope = 'pycheck-pep8'
@classmethod
def register_options(cls, register):
super(PEP8Subsystem, cls).register_options(register)
register('--skip', default=False, action='store_true',
help='If enabled, skip this style checker.')
class Rage(CheckstylePlugin):
"""Dummy Checkstyle plugin that hates everything"""
subsystem = RageSubsystem
def __init__(self, python_file):
self.python_file = python_file
def nits(self):
"""Return Nits for everything you see"""
for line_no, _ in self.python_file.enumerate():
yield self.error('T999', 'I hate everything!', line_no)
@pytest.fixture()
def no_qa_line(request):
"""Py Test fixture to create a testing file for single line filters"""
request.cls.no_qa_line = PythonFile.from_statement(textwrap.dedent("""
print('This is not fine')
print('This is fine') # noqa
"""))
@pytest.fixture()
def no_qa_file(request):
"""Py Test fixture to create a testing file for whole file filters"""
request.cls.no_qa_file = PythonFile.from_statement(textwrap.dedent("""
# checkstyle: noqa
print('This is not fine')
print('This is fine')
"""))
@pytest.mark.usefixtures('no_qa_file', 'no_qa_line')
class TestPyStyleTask(PythonTaskTestBase):
@classmethod
def task_type(cls):
"""Required method"""
return PythonCheckStyleTask
def _create_context(self, target_roots=None):
return self.context(
options={
'py.check': {
'interpreter': 'python' # Interpreter required by PythonTaskTestBase
}
},
target_roots=target_roots)
def setUp(self):
"""Setup PythonCheckStyleTask with Rage Checker"""
super(TestPyStyleTask, self).setUp()
PythonCheckStyleTask.options_scope = 'py.check'
self.style_check = PythonCheckStyleTask(self._create_context(), ".")
self.style_check._plugins = [{'name': 'Troll', 'checker': Rage}]
self.style_check.options.suppress = None
def test_noqa_line_filter_length(self):
"""Verify the number of lines filtered is what we expect"""
nits = list(self.style_check.get_nits(self.no_qa_line))
assert len(nits) == 1, ('Actually got nits: {}'.format(
' '.join('{}:{}'.format(nit._line_number, nit) for nit in nits)
))
def test_noqa_line_filter_code(self):
"""Verify that the line we see has the correct code"""
nits = list(self.style_check.get_nits(self.no_qa_line))
assert nits[0].code == 'T999', 'Not handling the code correctly'
def test_noqa_file_filter(self):
"""Verify Whole file filters are applied correctly"""
nits = list(self.style_check.get_nits(self.no_qa_file))
assert len(nits) == 0, 'Expected zero nits since entire file should be ignored'
| areitz/pants | tests/python/pants_test/backend/python/tasks/checkstyle/test_noqa.py | Python | apache-2.0 | 3,397 |
'''
"network": [
{
"index": 0,
"ipv4_gateway": "",
"name": "",
"veth_pair": "",
"mtu": "",
"ipv6_gateway": "",
"flags": "up",
"ipv4": "",
"ipv6": "",
"hwaddr": "00:16:3e:1e:89:6b",
"link": "lxcbr0",
"script_up": "",
"script_down": "",
"type": "veth"
}
'''
import jsonlib
import json
import cpyutils.iputils
import random
class NetworkDefinition(jsonlib.Serializable):
_JSON_FIELDS_required = [ 'name', 'link', 'type' ]
_JSON_FIELDS_other = [ 'gateway' ]
@classmethod
def from_json(cls, json_str):
o = jsonlib.Serializable.from_json(cls(None, None, None), json_str)
if o is None:
raise Exception("could not create object from json '%s'" % json_str)
return o
def __init__(self, name, link, _type):
self._name = name
self.link = link
self.type = _type
self.gateway = None
self._last_lease = None
self._leases = []
def _get_lease(self, lease):
if self._check_hwaddr_in_leases(lease.hwaddr): return False
if self._check_ipv4_in_leases(lease.ipv4): return False
self._leases.append(lease)
return True
def _check_hwaddr_in_leases(self, hwaddr):
for lease in self._leases:
if lease.hwaddr == hwaddr: return True
return False
def _check_ipv4_in_leases(self, ipv4):
for lease in self._leases:
if lease.ipv4 == ipv4: return True
return False
def get_lease(self):
return None
def release_lease(self, lease):
for e_lease in self._leases:
if (lease.ipv4 == e_lease.ipv4) and (lease.hwaddr == e_lease.hwaddr):
self._leases.remove(e_lease)
return True
return False
class NetworkDefinition_MAC_Prefix(NetworkDefinition):
_JSON_FIELDS_default = { 'hwaddrprefix': '40:00:00' }
@staticmethod
def gen_hex_mac_prefix(original_mac):
mac = (original_mac.upper()).strip()
parts = mac.split(':')
if len(parts) > 6:
return None
if len(parts) > 1:
# let's think that it is a : separated mac
for p in parts:
if len(p) != 2:
return None
mac = ''.join(parts)
for c in mac:
if c not in '0123456789ABCDEF':
return None
return mac
@classmethod
def from_json(cls, json_str, obj = None):
if obj is None: obj = cls(None, None, None)
o = jsonlib.Serializable.from_json(obj, json_str)
mac_prefix = cls.gen_hex_mac_prefix(o.hwaddrprefix)
if mac_prefix is None: raise Exception("Bad MAC mask format %s" % o.hwaddrprefix)
o._mac_prefix = int(mac_prefix, 16)
o._mac_tail = 0
o._mac_tail_bits = (12 - len(mac_prefix)) * 4
for i in range(0, 12 - len(mac_prefix)):
o._mac_prefix = (o._mac_prefix << 4)
o._mac_tail = (o._mac_tail << 4) | 0xf
return o
def _gen_mac(self):
new_mac = ("%x" % (self._mac_prefix | (random.getrandbits(self._mac_tail_bits) & self._mac_tail))).lower()
mac_str = ':'.join([new_mac[i:i+2] for i in range(0, len(new_mac), 2)])
return mac_str
def _gen_hw(self):
max_attempts = 10
mac = self._gen_mac()
while max_attempts > 0 and self._check_hwaddr_in_leases(mac):
mac = self._gen_mac()
max_attempts = max_attempts - 1
if max_attempts == 0:
return None
return mac
def get_lease(self):
mac = self._gen_hw()
if mac is None: return None
lease = NetworkConfiguration(self, hwaddr = mac)
if not self._get_lease(lease): return None
return lease
def _iphex_to_str(iphex):
ip = []
while iphex > 0:
v = iphex & 0xff
ip.append(str(v))
iphex = iphex >> 8
return '.'.join(ip[::-1])
class NetworkDefinition_IP_Range(NetworkDefinition_MAC_Prefix):
_JSON_FIELDS_default = { 'hwaddrprefix': '40:00:00', 'ipv4mask': '192.168.1.1/24' }
@classmethod
def from_json(cls, json_str):
b = NetworkDefinition_MAC_Prefix.from_json(json_str, NetworkDefinition_IP_Range(None, None, None))
if b is None:
return None
o = jsonlib.Serializable.from_json(b, json_str)
o._ipv4, o._mask = cpyutils.iputils.str_to_ipmask(o.ipv4mask)
return o
def get_lease(self):
mac = self._gen_hw()
if mac is None: return None
v = 1
ipv4 = self._ipv4 & self._mask
max_range = 0xffffffff - self._mask
newip = _iphex_to_str(ipv4 | (v & max_range))
while (v < max_range) and self._check_ipv4_in_leases(newip):
newip = _iphex_to_str(ipv4 + (v & max_range))
v = v + 1
lease = NetworkConfiguration(self)
lease.ipv4 = newip
lease.hwaddr = mac
if not self._get_lease(lease): return None
return lease
class NetworkDefinition_Pair(NetworkDefinition):
_JSON_FIELDS_required = [ 'name', 'link', 'type' ]
_JSON_FIELDS_default = { 'iphw': [ { 'ipv4': '192.168.1.1', 'hwaddr': '40:00:00:00:00:01' } ] }
@classmethod
def from_json(cls, json_str):
o = jsonlib.Serializable.from_json(cls(None, None, None), json_str)
if o is not None:
for lease in o.iphw:
if not cpyutils.iputils.check_ip(lease['ipv4']): raise Exception("bad ip format: %s" % lease['ipv4'])
if not cpyutils.iputils.check_mac(lease['hwaddr']): raise Exception("bad hw address format: %s" % lease['hwaddr'])
else:
raise Exception("could not create object from json '%s'" % json_str)
return o
def get_lease(self):
lease = NetworkConfiguration(self)
for lease_info in self.iphw:
lease.ipv4, lease.hwaddr = (lease_info['ipv4'], lease_info['hwaddr'])
if self._get_lease(lease):
return lease
return None
class NetworkConfiguration(jsonlib.Serializable):
_JSON_FIELDS_required = [ 'link', 'hwaddr', 'type' ]
_JSON_FIELDS_default = { 'ipv4': None }
@classmethod
def from_json(cls, json_str):
o = jsonlib.Serializable.from_json(cls(None), json_str)
if o is not None:
if not cpyutils.iputils.check_mac(o.hwaddr): raise Exception("mac format is not valid")
return o
def __init__(self, network_definition, ipv4 = None, hwaddr = None):
self._network_definition = network_definition
self.link = network_definition.link
self.hwaddr = hwaddr
self.type = network_definition.type
self.ipv4 = ipv4
self.gateway = network_definition.gateway
def lxc_config(self):
config = []
config.append("lxc.network.type = %s" % self.type)
config.append("lxc.network.link = %s" % self.link)
if self.hwaddr is not None: config.append("lxc.network.hwaddr = %s" % self.hwaddr)
if self.ipv4 is not None: config.append("lxc.network.ipv4 = %s" % self.ipv4)
if self.gateway is not None: config.append("lxc.network.ipv4.gateway = %s" % self.gateway)
config.append("lxc.network.flags = up")
return "\n".join(config)
if __name__ == "__main__":
n = json.dumps( {
'name': 'public_dhcp',
'link': 'br0',
'type': 'veth',
'gateway': '10.0.0.1',
'iphw': [
{ 'ipv4': '10.0.0.1', 'hwaddr': '60:00:00:00:00:01' },
{ 'ipv4': '10.0.0.2', 'hwaddr': '60:00:00:00:00:02' }
]
}
, indent = 4)
m = NetworkDefinition_MAC_Prefix.from_json(n)
#print m.get_lease()
#print m.get_lease()
#print m.get_lease()
#print m.get_lease()
#print m.get_lease()
p = NetworkDefinition_Pair.from_json(n)
l1 = p.get_lease()
l2 = p.get_lease()
l3 = p.get_lease()
print l1, l2, l3
i = NetworkDefinition_IP_Range.from_json(n)
print i.get_lease().lxc_config()
print i.get_lease().lxc_config()
print i.get_lease().lxc_config()
print i.get_lease().lxc_config()
'''
d = NetworkDefinition.from_json(
'{\
"name": "basic", \
"link": "br0", \
"type": "veth", \
"hwaddr": "40:00:00:00:00:01"\
}')
print d
'''
# print o
# print json.dumps(o.serialize(), indent=4)
| dealfonso/lxc-pm | networkmanager.py | Python | apache-2.0 | 8,801 |
# This Python module is part of the PyRate software package.
#
# Copyright 2022 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains regression tests for comparing output from serial,
parallel and MPI PyRate runs.
"""
import shutil
import pytest
from pathlib import Path
from subprocess import check_call, CalledProcessError, run
import numpy as np
import pyrate.constants as C
from pyrate.configuration import Configuration, write_config_file
from tests.common import (
assert_same_files_produced,
assert_two_dirs_equal,
manipulate_test_conf,
MEXICO_CROPA_CONF,
TEST_CONF_GAMMA,
PY37GDAL304,
PY37GDAL302,
PYTHON3P8,
PYTHON3P7,
WORKING_DIR
)
@pytest.fixture(params=[0, 1])
def parallel(request):
return request.param
@pytest.fixture(params=[1, 4])
def local_crop(request):
return request.param
@pytest.fixture()
def modified_config(tempdir, get_lks, get_crop, orbfit_lks, orbfit_method, orbfit_degrees, ref_est_method):
def modify_params(conf_file, parallel_vs_serial, output_conf_file):
tdir = Path(tempdir())
params = manipulate_test_conf(conf_file, tdir)
if params[C.PROCESSOR] == 1: # turn on coherence for gamma
params[C.COH_MASK] = 1
params[C.PARALLEL] = parallel_vs_serial
params[C.PROCESSES] = 4
params[C.APSEST] = 1
params[C.IFG_LKSX], params[C.IFG_LKSY] = get_lks, get_lks
params[C.REFNX], params[C.REFNY] = 2, 2
params[C.IFG_CROP_OPT] = get_crop
params[C.ORBITAL_FIT_LOOKS_X], params[C.ORBITAL_FIT_LOOKS_Y] = orbfit_lks, orbfit_lks
params[C.ORBITAL_FIT] = 1
params[C.ORBITAL_FIT_METHOD] = orbfit_method
params[C.ORBITAL_FIT_DEGREE] = orbfit_degrees
params[C.REF_EST_METHOD] = ref_est_method
params[C.MAX_LOOP_LENGTH] = 3
params[C.LR_MAXSIG] = 0 # turn off pixel masking for these tests
params["rows"], params["cols"] = 3, 2
params["savenpy"] = 1
params["notiles"] = params["rows"] * params["cols"] # number of tiles
print(params)
# write new temp config
output_conf = tdir.joinpath(output_conf_file)
write_config_file(params=params, output_conf_file=output_conf)
return output_conf, params
return modify_params
@pytest.mark.mpi
@pytest.mark.slow
@pytest.mark.skipif(not PYTHON3P8, reason="Only run in one CI env")
def test_pipeline_parallel_vs_mpi(modified_config, gamma_or_mexicoa_conf=TEST_CONF_GAMMA):
"""
Tests proving single/multiprocess/mpi produce same output
"""
gamma_conf = gamma_or_mexicoa_conf
if np.random.rand() > 0.1: # skip 90% of tests randomly
pytest.skip("Randomly skipping as part of 85 percent")
if gamma_conf == MEXICO_CROPA_CONF: # skip cropA conf 95% time
if np.random.rand() > 0.5:
pytest.skip('skipped in mexicoA')
print("\n\n")
print("===x==="*10)
mpi_conf, params = modified_config(gamma_conf, 0, 'mpi_conf.conf')
run(f"mpirun -n 3 pyrate conv2tif -f {mpi_conf}", shell=True, check=True)
run(f"mpirun -n 3 pyrate prepifg -f {mpi_conf}", shell=True, check=True)
try:
run(f"mpirun -n 3 pyrate correct -f {mpi_conf}", shell=True, check=True)
run(f"mpirun -n 3 pyrate timeseries -f {mpi_conf}", shell=True, check=True)
run(f"mpirun -n 3 pyrate stack -f {mpi_conf}", shell=True, check=True)
run(f"mpirun -n 3 pyrate merge -f {mpi_conf}", shell=True, check=True)
except CalledProcessError as e:
print(e)
pytest.skip("Skipping as part of correction error")
mr_conf, params_m = modified_config(gamma_conf, 1, 'multiprocess_conf.conf')
run(f"pyrate workflow -f {mr_conf}", shell=True, check=True)
sr_conf, params_s = modified_config(gamma_conf, 0, 'singleprocess_conf.conf')
run(f"pyrate workflow -f {sr_conf}", shell=True, check=True)
# convert2tif tests, 17 interferograms
if not gamma_conf == MEXICO_CROPA_CONF:
assert_same_files_produced(params[C.INTERFEROGRAM_DIR],
params_m[C.INTERFEROGRAM_DIR], params_s[C.INTERFEROGRAM_DIR], "*_unw.tif", 17)
# dem
assert_same_files_produced(params[C.GEOMETRY_DIR],
params_m[C.GEOMETRY_DIR], params_s[C.GEOMETRY_DIR], "*_dem.tif", 1)
# if coherence masking, comprare coh files were converted
if params[C.COH_FILE_LIST] is not None:
assert_same_files_produced(params[C.COHERENCE_DIR], params_m[C.COHERENCE_DIR], params_s[C.COHERENCE_DIR],
"*_cc.tif", 17)
print("coherence files compared")
# prepifg checks
num_of_ifgs = 30 if gamma_conf == MEXICO_CROPA_CONF else 17
num_of_coh = 30 if gamma_conf == MEXICO_CROPA_CONF else 17
# check geom files
if params[C.DEMERROR]:
# check files required by dem error correction are produced
assert_same_files_produced(
params[C.GEOMETRY_DIR], params_m[C.GEOMETRY_DIR], params_s[C.GEOMETRY_DIR],
[ft + '.tif' for ft in C.GEOMETRY_OUTPUT_TYPES] + ['*dem.tif'],
7 if gamma_conf == MEXICO_CROPA_CONF else 8
)
# ifgs
assert_same_files_produced(params[C.INTERFEROGRAM_DIR], params_m[C.INTERFEROGRAM_DIR],
params_s[C.INTERFEROGRAM_DIR], ["*_ifg.tif"], num_of_ifgs)
# coherence
assert_same_files_produced(params[C.COHERENCE_DIR], params_m[C.COHERENCE_DIR],
params_s[C.COHERENCE_DIR], ["*_coh.tif"], num_of_coh)
# coherence stats
assert_same_files_produced(params[C.COHERENCE_DIR], params_m[C.COHERENCE_DIR],
params_s[C.COHERENCE_DIR], ["coh_*.tif"], 3)
num_files = 30 if gamma_conf == MEXICO_CROPA_CONF else 17
# cf.TEMP_MLOOKED_DIR will contain the temp files that can be potentially deleted later
assert_same_files_produced(params[C.TEMP_MLOOKED_DIR], params_m[C.TEMP_MLOOKED_DIR],
params_s[C.TEMP_MLOOKED_DIR], "*_ifg.tif", num_files)
# prepifg + correct steps that overwrite tifs test
# ifg phase checking in the previous step checks the correct pipeline upto APS correction
# 2 x because of aps files
assert_same_files_produced(params[C.TMPDIR], params_m[C.TMPDIR], params_s[
C.TMPDIR], "tsincr_*.npy", params['notiles'] * 2)
assert_same_files_produced(params[C.TMPDIR], params_m[C.TMPDIR], params_s[
C.TMPDIR], "tscuml_*.npy", params['notiles'])
assert_same_files_produced(params[C.TMPDIR], params_m[C.TMPDIR], params_s[
C.TMPDIR], "linear_rate_*.npy", params['notiles'])
assert_same_files_produced(params[C.TMPDIR], params_m[C.TMPDIR], params_s[
C.TMPDIR], "linear_error_*.npy", params['notiles'])
assert_same_files_produced(params[C.TMPDIR], params_m[C.TMPDIR], params_s[
C.TMPDIR], "linear_intercept_*.npy", params['notiles'])
assert_same_files_produced(params[C.TMPDIR], params_m[C.TMPDIR], params_s[
C.TMPDIR], "linear_rsquared_*.npy", params['notiles'])
assert_same_files_produced(params[C.TMPDIR], params_m[C.TMPDIR], params_s[
C.TMPDIR], "linear_samples_*.npy", params['notiles'])
assert_same_files_produced(params[C.TMPDIR], params_m[C.TMPDIR], params_s[
C.TMPDIR], "stack_rate_*.npy", params['notiles'])
assert_same_files_produced(params[C.TMPDIR], params_m[C.TMPDIR], params_s[
C.TMPDIR], "stack_error_*.npy", params['notiles'])
assert_same_files_produced(params[C.TMPDIR], params_m[C.TMPDIR], params_s[
C.TMPDIR], "stack_samples_*.npy", params['notiles'])
# compare merge step
assert_same_files_produced(params[C.VELOCITY_DIR], params_m[C.VELOCITY_DIR], params_s[
C.VELOCITY_DIR], "stack*.tif", 3)
assert_same_files_produced(params[C.VELOCITY_DIR], params_m[C.VELOCITY_DIR], params_s[
C.VELOCITY_DIR], "stack*.kml", 2)
assert_same_files_produced(params[C.VELOCITY_DIR], params_m[C.VELOCITY_DIR], params_s[
C.VELOCITY_DIR], "stack*.png", 2)
assert_same_files_produced(params[C.VELOCITY_DIR], params_m[C.VELOCITY_DIR], params_s[
C.VELOCITY_DIR], "stack*.npy", 3)
assert_same_files_produced(params[C.VELOCITY_DIR], params_m[C.VELOCITY_DIR], params_s[
C.VELOCITY_DIR], "linear_*.tif", 5)
assert_same_files_produced(params[C.VELOCITY_DIR], params_m[C.VELOCITY_DIR], params_s[
C.VELOCITY_DIR], "linear_*.kml", 3)
assert_same_files_produced(params[C.VELOCITY_DIR], params_m[C.VELOCITY_DIR], params_s[
C.VELOCITY_DIR], "linear_*.png", 3)
assert_same_files_produced(params[C.VELOCITY_DIR], params_m[C.VELOCITY_DIR], params_s[
C.VELOCITY_DIR], "linear_*.npy", 5)
if params[C.PHASE_CLOSURE]: # only in cropA
__check_equality_of_phase_closure_outputs(mpi_conf, sr_conf)
__check_equality_of_phase_closure_outputs(mpi_conf, mr_conf)
assert_same_files_produced(params[C.TIMESERIES_DIR], params_m[C.TIMESERIES_DIR], params_s[
C.TIMESERIES_DIR], "tscuml*.tif", 11) # phase closure removes one tif
assert_same_files_produced(params[C.TIMESERIES_DIR], params_m[C.TIMESERIES_DIR], params_s[
C.TIMESERIES_DIR], "tsincr*.tif", 11)
else:
assert_same_files_produced(params[C.TIMESERIES_DIR], params_m[C.TIMESERIES_DIR], params_s[
C.TIMESERIES_DIR], "tscuml*.tif", 12)
assert_same_files_produced(params[C.TIMESERIES_DIR], params_m[C.TIMESERIES_DIR], params_s[
C.TIMESERIES_DIR], "tsincr*.tif", 12)
print("==========================xxx===========================")
shutil.rmtree(params[WORKING_DIR])
shutil.rmtree(params_m[WORKING_DIR])
shutil.rmtree(params_s[WORKING_DIR])
def __check_equality_of_phase_closure_outputs(mpi_conf, sr_conf):
m_config = Configuration(mpi_conf)
s_config = Configuration(sr_conf)
m_close = m_config.closure()
s_close = s_config.closure()
m_closure = np.load(m_close.closure)
s_closure = np.load(s_close.closure)
# loops
m_loops = np.load(m_close.loops, allow_pickle=True)
s_loops = np.load(s_close.loops, allow_pickle=True)
m_weights = [m.weight for m in m_loops]
s_weights = [m.weight for m in s_loops]
np.testing.assert_array_equal(m_weights, s_weights)
for i, (m, s) in enumerate(zip(m_loops, s_loops)):
assert all(m_e == s_e for m_e, s_e in zip(m.edges, s.edges))
# closure
np.testing.assert_array_almost_equal(np.abs(m_closure), np.abs(s_closure), decimal=4)
# num_occurrences_each_ifg
m_num_occurences_each_ifg = np.load(m_close.num_occurences_each_ifg, allow_pickle=True)
s_num_occurences_each_ifg = np.load(s_close.num_occurences_each_ifg, allow_pickle=True)
np.testing.assert_array_equal(m_num_occurences_each_ifg, s_num_occurences_each_ifg)
# check ps
m_ifgs_breach_count = np.load(m_close.ifgs_breach_count)
s_ifgs_breach_count = np.load(s_close.ifgs_breach_count)
np.testing.assert_array_equal(m_ifgs_breach_count, s_ifgs_breach_count)
@pytest.fixture(params=[0, 1])
def coh_mask(request):
return request.param
@pytest.fixture()
def modified_config_short(tempdir, local_crop, get_lks, coh_mask):
orbfit_lks = 1
orbfit_method = 1
orbfit_degrees = 1
ref_est_method = 1
ref_pixel = (150.941666654, -34.218333314)
def modify_params(conf_file, parallel, output_conf_file, largetifs):
tdir = Path(tempdir())
params = manipulate_test_conf(conf_file, tdir)
params[C.COH_MASK] = coh_mask
params[C.PARALLEL] = parallel
params[C.PROCESSES] = 4
params[C.APSEST] = 1
params[C.LARGE_TIFS] = largetifs
params[C.IFG_LKSX], params[C.IFG_LKSY] = get_lks, get_lks
params[C.REFX], params[C.REFY] = ref_pixel
params[C.REFNX], params[C.REFNY] = 4, 4
params[C.IFG_CROP_OPT] = local_crop
params[C.ORBITAL_FIT_LOOKS_X], params[
C.ORBITAL_FIT_LOOKS_Y] = orbfit_lks, orbfit_lks
params[C.ORBITAL_FIT] = 1
params[C.ORBITAL_FIT_METHOD] = orbfit_method
params[C.ORBITAL_FIT_DEGREE] = orbfit_degrees
params[C.REF_EST_METHOD] = ref_est_method
params["rows"], params["cols"] = 3, 2
params["savenpy"] = 1
params["notiles"] = params["rows"] * params["cols"] # number of tiles
# print(params)
# write new temp config
output_conf = tdir.joinpath(output_conf_file)
write_config_file(params=params, output_conf_file=output_conf)
return output_conf, params
return modify_params
@pytest.fixture
def create_mpi_files():
def _create(modified_config_short, gamma_conf):
mpi_conf, params = modified_config_short(gamma_conf, 0, 'mpi_conf.conf', 1)
check_call(f"mpirun -n 3 pyrate conv2tif -f {mpi_conf}", shell=True)
check_call(f"mpirun -n 3 pyrate prepifg -f {mpi_conf}", shell=True)
try:
check_call(f"mpirun -n 3 pyrate correct -f {mpi_conf}", shell=True)
check_call(f"mpirun -n 3 pyrate timeseries -f {mpi_conf}", shell=True)
check_call(f"mpirun -n 3 pyrate stack -f {mpi_conf}", shell=True)
except CalledProcessError as c:
print(c)
pytest.skip("Skipping as we encountered a process error during CI")
check_call(f"mpirun -n 3 pyrate merge -f {mpi_conf}", shell=True)
return params
return _create
@pytest.mark.mpi
@pytest.mark.slow
@pytest.mark.skipif(not PY37GDAL304, reason="Only run in one CI env")
def test_stack_and_ts_mpi_vs_parallel_vs_serial(modified_config_short, gamma_conf, create_mpi_files, parallel):
"""
Checks performed:
1. mpi vs single process pipeline
2. mpi vs parallel (python multiprocess) pipeline.
3. Doing 1 and 2 means we have checked single vs parallel python multiprocess pipelines
4. This also checks the entire pipeline using largetifs (new prepifg) vs old perpifg (python based)
"""
if np.random.randint(0, 1000) > 300: # skip 90% of tests randomly
pytest.skip("Randomly skipping as part of 60 percent")
print("\n\n")
print("===x==="*10)
params = create_mpi_files(modified_config_short, gamma_conf)
sr_conf, params_p = modified_config_short(gamma_conf, parallel, 'parallel_conf.conf', 0)
check_call(f"pyrate workflow -f {sr_conf}", shell=True)
# convert2tif tests, 17 interferograms
assert_two_dirs_equal(params[C.INTERFEROGRAM_DIR], params_p[C.INTERFEROGRAM_DIR], "*_unw.tif", 17)
# if coherence masking, compare coh files were converted
if params[C.COH_FILE_LIST] is not None:
assert_two_dirs_equal(params[C.COHERENCE_DIR], params_p[C.COHERENCE_DIR], "*_cc.tif", 17)
print("coherence files compared")
assert_two_dirs_equal(params[C.INTERFEROGRAM_DIR], params_p[C.INTERFEROGRAM_DIR], ["*_ifg.tif"], 17)
# one original dem, another multilooked dem
assert_two_dirs_equal(params[C.GEOMETRY_DIR], params_p[C.GEOMETRY_DIR], ['*dem.tif'], 2)
assert_two_dirs_equal(params[C.GEOMETRY_DIR], params_p[C.GEOMETRY_DIR],
[t + "*.tif" for t in C.GEOMETRY_OUTPUT_TYPES], 6) # 2 dems, 6 geom
assert_two_dirs_equal(params[C.TEMP_MLOOKED_DIR], params_p[C.TEMP_MLOOKED_DIR], "*_ifg.tif", 17)
# ifg phase checking in the previous step checks the correct pipeline upto APS correction
assert_two_dirs_equal(params[C.TMPDIR], params_p[C.TMPDIR], "tsincr_*.npy", params['notiles'] * 2)
assert_two_dirs_equal(params[C.TMPDIR], params_p[C.TMPDIR], "tscuml_*.npy", params['notiles'])
assert_two_dirs_equal(params[C.TMPDIR], params_p[C.TMPDIR], "linear_rate_*.npy", params['notiles'])
assert_two_dirs_equal(params[C.TMPDIR], params_p[C.TMPDIR], "linear_error_*.npy", params['notiles'])
assert_two_dirs_equal(params[C.TMPDIR], params_p[C.TMPDIR], "linear_samples_*.npy", params['notiles'])
assert_two_dirs_equal(params[C.TMPDIR], params_p[C.TMPDIR], "linear_intercept_*.npy", params['notiles'])
assert_two_dirs_equal(params[C.TMPDIR], params_p[C.TMPDIR], "linear_rsquared_*.npy", params['notiles'])
assert_two_dirs_equal(params[C.TMPDIR], params_p[C.TMPDIR], "stack_rate_*.npy", params['notiles'])
assert_two_dirs_equal(params[C.TMPDIR], params_p[C.TMPDIR], "stack_error_*.npy", params['notiles'])
assert_two_dirs_equal(params[C.TMPDIR], params_p[C.TMPDIR], "stack_samples_*.npy", params['notiles'])
# compare merge step
assert_two_dirs_equal(params[C.VELOCITY_DIR], params_p[C.VELOCITY_DIR], "stack*.tif", 3)
assert_two_dirs_equal(params[C.VELOCITY_DIR], params_p[C.VELOCITY_DIR], "stack*.kml", 2)
assert_two_dirs_equal(params[C.VELOCITY_DIR], params_p[C.VELOCITY_DIR], "stack*.png", 2)
assert_two_dirs_equal(params[C.VELOCITY_DIR], params_p[C.VELOCITY_DIR], "stack*.npy", 3)
assert_two_dirs_equal(params[C.VELOCITY_DIR], params_p[C.VELOCITY_DIR], "linear*.tif", 5)
assert_two_dirs_equal(params[C.VELOCITY_DIR], params_p[C.VELOCITY_DIR], "linear*.kml", 3)
assert_two_dirs_equal(params[C.VELOCITY_DIR], params_p[C.VELOCITY_DIR], "linear*.png", 3)
assert_two_dirs_equal(params[C.VELOCITY_DIR], params_p[C.VELOCITY_DIR], "linear*.npy", 5)
assert_two_dirs_equal(params[C.TIMESERIES_DIR], params_p[C.TIMESERIES_DIR], "tscuml*.tif")
assert_two_dirs_equal(params[C.TIMESERIES_DIR], params_p[C.TIMESERIES_DIR], "tsincr*.tif")
assert_two_dirs_equal(params[C.TIMESERIES_DIR], params_p[C.TIMESERIES_DIR], "tscuml*.npy")
assert_two_dirs_equal(params[C.TIMESERIES_DIR], params_p[C.TIMESERIES_DIR], "tsincr*.npy")
print("==========================xxx===========================")
shutil.rmtree(params[WORKING_DIR])
shutil.rmtree(params_p[WORKING_DIR])
| GeoscienceAustralia/PyRate | tests/test_mpi_vs_multiprocess_vs_single_process.py | Python | apache-2.0 | 18,387 |
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from touchdown.core import adapters, argument, errors, plan, serializers, workspace
try:
from . import client
from paramiko import ssh_exception
except ImportError:
client = None
from touchdown.provisioner import Target
class Instance(adapters.Adapter):
pass
class Connection(Target):
resource_name = "ssh_connection"
username = argument.String(default="root", field="username")
password = argument.String(field="password")
private_key = argument.String(field="pkey", serializer=serializers.Identity())
hostname = argument.String(field="hostname")
instance = argument.Resource(Instance, field="hostname", serializer=serializers.Resource())
port = argument.Integer(field="port", default=22)
proxy = argument.Resource("touchdown.ssh.connection.Connection")
root = argument.Resource(workspace.Workspace)
def clean_private_key(self, private_key):
if private_key and client:
return client.private_key_from_string(private_key)
raise errors.InvalidParameter("Invalid SSH private key")
class ConnectionPlan(plan.Plan):
name = "describe"
resource = Connection
_client = None
def get_client(self):
if self._client:
return self._client
cli = client.Client(self)
kwargs = serializers.Resource().render(self.runner, self.resource)
if self.resource.proxy:
self.echo("Setting up connection proxy via {}".format(self.resource.proxy))
proxy = self.runner.get_plan(self.resource.proxy)
transport = proxy.get_client().get_transport()
self.echo("Setting up proxy channel to {}".format(kwargs['hostname']))
for i in range(20):
try:
kwargs['sock'] = transport.open_channel(
'direct-tcpip',
(kwargs['hostname'], kwargs['port']),
('', 0)
)
break
except ssh_exception.ChannelException:
time.sleep(i)
continue
if 'sock' not in kwargs:
raise errors.Error("Error setting up proxy channel to {} after 20 tries".format(kwargs['hostname']))
self.echo("Proxy setup")
if not self.resource.password and not self.resource.private_key:
kwargs['look_for_keys'] = True
kwargs['allow_agent'] = True
args = ["hostname={}".format(kwargs['hostname']), "username={}".format(kwargs['username'])]
if self.resource.port != 22:
args.append("port={}".format(kwargs['port']))
self.echo("Establishing ssh connection ({})".format(", ".join(args)))
cli.connect(**kwargs)
self.echo("Got connection")
self._client = cli
return cli
def get_actions(self):
if not client:
raise errors.Error("Paramiko library is required to perform operations involving ssh")
return []
| mitchellrj/touchdown | touchdown/ssh/connection.py | Python | apache-2.0 | 3,610 |
# @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
class VOptionGroup(object):
HTML_CONTENT_ALLOWED = "usehtml"
| rwl/muntjac | muntjac/terminal/gwt/client/ui/v_option_group.py | Python | apache-2.0 | 110 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.plugins.io.sensors.rotary_encoder import base_rotary_encoder
from calvin.runtime.south.plugins.async import async
from calvin.runtime.south.plugins.io.gpio import gpiopin
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class RotaryEncoder(base_rotary_encoder.RotaryEncoderBase):
"""
KY040 Rotary Encoder
"""
def __init__(self, node, turn_callback, switch_callback):
super(RotaryEncoder, self).__init__(node, turn_callback, switch_callback)
self._running = False
self._node = node
self._turn_callback = turn_callback
self._switch_callback = switch_callback
config = self._node.attributes.get_private("/hardware/ky040_rotary_encoder")
clk_pin = config.get('clk_pin', None)
dt_pin = config.get('dt_pin', None)
sw_pin = config.get('sw_pin', None)
self._clk_pin = gpiopin.GPIOPin(self._knob, clk_pin, "i", "u")
self._dt_pin = gpiopin.GPIOPin(None, dt_pin, "i", None)
self._sw_pin = gpiopin.GPIOPin(self._switch, sw_pin, "i", "u")
def start(self, frequency=0.5):
try :
self._clk_pin.detect_edge("f")
self._sw_pin.detect_edge("f")
self._running = True
# gpio.add_event_detect(self.echo_pin,
# gpio.FALLING,
# callback=self._echo_callback)
except Exception as e:
_log.error("Could not setup event detect: %r" % (e, ))
def cb_error(self, *args, **kwargs):
_log.error("%r: %r" % (args, kwargs))
def _knob(self):
if self._clk_pin.get_state():
if self._dt_pin.get_state() :
async.call_from_thread(self._turn_callback, -1)
else :
async.call_from_thread(self._turn_callback, 1)
def _switch(self):
async.call_from_thread(self._switch_callback)
def stop(self):
if self._running :
if self.retry and self.retry.iactive() :
self.retry.cancel()
try:
self._sw_pin.stop_detect()
self._dt_pin.stop_detect()
self._clk_pin.stop_detect()
except Exception as e:
_log.warning("Could not remove event detect: %r" % (e,))
self._running = False
| les69/calvin-base | calvin/runtime/south/plugins/io/sensors/rotary_encoder/platform/ky040_rotary_impl/rotary_encoder.py | Python | apache-2.0 | 3,011 |
#!/usr/env python
import sys
#let's parse strings in python!
options = []
with open("src/zopt.ggo.in") as fd:
for l in fd:
if l.startswith("option "):
option = l.split()[1].lstrip('"').rstrip('"')
options.append(option)
man = open('src/zmap.1.ronn').read()
failures = False
for option in options:
if option not in man:
failures = True
sys.stderr.write("ZMap option missing from man file: %s\n" % option)
if failures:
sys.exit(1)
| willscott/zmap | scripts/check_manfile.py | Python | apache-2.0 | 493 |
#coding=UTF-8
'''
Created on 2011-7-6
@author: Administrator
'''
from urlparse import urlparse
import cookielib
from pyquery.pyquery import PyQuery #@UnresolvedImport
import re
import datetime #@UnusedImport
import urllib2
from lxml import etree #@UnresolvedImport
from lxml.cssselect import CSSSelector #@UnresolvedImport
import simplejson as js #@UnusedImport @UnresolvedImport
from config import housetype, checkPath, makePath,fitment,toward,deposit
import threading
from BeautifulSoup import BeautifulSoup #@UnresolvedImport
import time
import gc
from jjrlog import msglogger, LinkLog
from common import postHost
homepath="e:\\home\\spider\\"
gc.enable()
class LinkCrawl(object):
def __init__(self,citycode="",kind="",upc="5",st="3"):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.upc=upc
self.endtime=str(datetime.date.today() -datetime.timedelta(days=7))
self.clinks=[]
self.pn=[]
self.citycode=citycode
self.baseUrl="http://%s.ganji.com"%self.citycode
self.kind=kind
if kind=="1":#出售
self.urlpath="/fang5/a1u2%s/"
self.folder="sell\\"
elif kind=="2":#出租
self.urlpath="/fang1/u2%s/"
self.folder="rent\\"
elif kind=="3":#求购
self.urlpath="/fang4/u2f0/a1%s/"
self.folder="buy\\"
elif kind=="4":#求租
self.urlpath="/fang2/u2f0/a1%s/"
self.folder="req\\"
def __getAllNeedLinks(self):
cond=True
idx=0
checkit="0"
while cond:
url=self.baseUrl+self.urlpath%("f"+str(idx*32))
#url="http://gz.ganji.com/fang2/u2f0/a1f768/"
# print url
try:
req=urllib2.Request(url, None, self.header)
p=self.br.open(req).read()
except:
continue
else:
check=PyQuery(p)("ul.pageLink li a.c").text()
if check==None or check==checkit:
cond=False
break
else:
checkit=check
links=PyQuery(p)("div.list dl")
p=None
# print len(links)
for link in links:
lk=self.baseUrl+PyQuery(link)(" a.list_title").attr("href")
# print lk
if self.kind=="3" or self.kind=="4":
tm=PyQuery(link)("dd span.time").text()
if re.match('''\d{2}-\d{2}''', tm):
Y=int(time.strftime('%Y', time.localtime()))
tm="%s-%s"%(Y,tm.strip())
if tm<self.endtime:
cond=False
break
elif "分钟" in tm:
pass
elif "小时" in tm:
pass
else:
cond=False
break
if not checkPath(homepath,self.folder,lk):
LinkLog.info("%s|%s"%(self.kind,lk))
try:
getContent(lk,self.citycode,self.kind,self.upc)
except Exception,e:print "ganji getContent Exception %s"%e
# fetch_quere.put({"mod":"ganji","link":lk,"citycode":self.citycode,"kind":self.kind})
# if lk not in self.clinks:
# self.clinks.append(lk)
idx=idx+1
# print len(self.clinks)
def runme(self):
#self.__initPageNum()
self.__getAllNeedLinks()
class ContentCrawl(object):
def __init__(self,links,citycode,kind,upc):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.pdb={}
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.urls=links
self.kind=kind
self.fd={}
self.citycode=citycode
self.upc=upc
if kind=="1":
self.folder="sell\\"
elif kind=="2":
self.folder="rent\\"
elif kind=="3":
self.folder="buy\\"
else:
self.folder="req\\"
#js resgx
self.xiaoqu_regex="xiaoqu : '(.*?)',"
self.address_regex="address : '(.*?)',"
self.house_room_regex="(\d+)室"
self.house_hall_regex="(\d+)厅"
self.house_toilet_regex="(\d+)卫"
self.house_desc_regex="房屋概况</p>(.*?)</p>"
self.house_floor_regex="<li>楼层: 第(\d+)层/总(\d+)层</li>"
self.house_totalarea_regex="<li>面积: (\d+) ㎡</li>"
self.house_totalarea_regex_qiu="(\d+)㎡"
self.house_type_regex3="<li>户型: (.*)</li>"
self.house_toward_regex="<li>朝向: (.*)</li>"
self.house_type_regex="<li>类型: (.*)</li>"
self.cityarea_regex="<li>区域:([\s\S]*?)</li>"
self.house_age_regex="<li>房龄: (\d+) 年</li>"
self.house_fitment_regex="<li>装修: (.*)</li>"
self.house_support_regex="<li>配置: (.*) </li>"
self.house_price_regex="<li>售价: <span>(.*)</span>.*</li>"
self.house_price_regex_2="<li>租金: <span>(.*)</span>.*</li>"
self.borough_name_regex="<li>小区:(.*)</li>"
self.house_deposit_regex="<li>租金: (.*)</li>"
self.house_price_regex_zu = "<li>期望租金: (.*)</li>"
self.borough_name_regex_reg = "<li>期望小区: (.*)</li>"
self.house_addr_regex_reg = "<li>小区地址:(.*)</li>"
self.house_price_regex_gou = "<li>期望售价: (.*)</li>"
def __addText(self,tag, no_tail=False):
text = []
if tag.text:
text.append(tag.text)
for child in tag.getchildren():
text.append(self.__addText(child))
if not no_tail and tag.tail:
text.append(tag.tail)
return "".join(text)
def getText(self,html):
text=[]
for tag in html:
text.append(self.__addText(tag, no_tail=True))
return ' '.join([t.strip() for t in text if t.strip()])
def mayGetIt(self,page):
try:
href=PyQuery(page)("a.userHistory").attr("href")
if href==None:
return False
href="http://%s.ganji.com%s"%(self.citycode,href)
resp = urllib2.urlopen(urllib2.Request(href, None, self.header)).read()
trs=PyQuery(resp)("table.tel_list tr")
except:
return True
# print "user list-------->%s| %s"%((len(trs)-1),self.urls)
if len(trs)-1>int(self.upc):
return True
else:
return False
def sell(self,url):
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
return
tree = etree.HTML(response)
soup =BeautifulSoup(response)
self.fd['house_flag'] = 1
self.fd['belong']=0
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
if re.search(self.house_floor_regex, response):
house_floor=re.search(self.house_floor_regex, response).group(1)
house_topfloor=re.search(self.house_floor_regex, response).group(2)
self.fd['house_floor'] = house_floor
self.fd['house_topfloor'] = house_topfloor
else:
self.fd['house_floor'] = None
self.fd['house_topfloor'] = None
if re.search(self.house_totalarea_regex, response):
house_totalarea=re.search(self.house_totalarea_regex, response).group(1)
self.fd['house_totalarea'] = house_totalarea
else:
self.fd['house_totalarea'] = None
#类型
if re.search(self.house_type_regex, response):
house_type=re.search(self.house_type_regex, response).group(1)
self.fd['house_type'] = housetype(house_type)
else:
self.fd['house_type'] = None
if re.search(self.house_price_regex, response):
house_price=re.search(self.house_price_regex, response).group(1)
if house_price=="面议":
house_price="0"
self.fd['house_price'] = house_price
else:
self.fd['house_price'] = None
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
#地址
if borough_name and borough_name.nextSibling:
house_addr = borough_name.nextSibling.string
self.fd['house_addr'] = re.sub("\(|\)| ","",house_addr)
else:
self.fd['house_addr'] = None
else:
if re.search(self.borough_name_regex, response):
borough_name=re.search(self.borough_name_regex, response).group(1)
self.fd['borough_name'] = re.sub("\(.*\)| ","",borough_name)
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = None
#朝向
if re.search(self.house_toward_regex, response):
house_toward=re.search(self.house_toward_regex, response).group(1)
self.fd['house_toward'] = toward(house_toward)
else:
self.fd['house_toward'] = None
if re.search(self.house_fitment_regex, response):
house_fitment=re.search(self.house_fitment_regex, response).group(1)
self.fd['house_fitment'] = fitment(house_fitment)
else:
self.fd['house_fitment'] = 2
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def buy(self,url):
self.fd['city'] = self.citycode
self.fd['house_flag'] = 3
# self.fd['belong']="1"
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
return
tree = etree.HTML(response)
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
self.fd['house_type'] = 0
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
if re.search(self.house_totalarea_regex_qiu, response):
house_totalarea=re.search(self.house_totalarea_regex_qiu, response).group(1)
self.fd['house_totalarea'] = house_totalarea
self.fd['house_totalarea_max'] = house_totalarea
self.fd['house_totalarea_min'] = house_totalarea
else:
self.fd['house_totalarea'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
if re.search(self.house_price_regex_gou, response):
house_price_zu = re.search(self.house_price_regex_gou, response).group(1)
house_price_zu = house_price_zu.replace('万','')
if house_price_zu.find("以上") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = house_price_zu.replace('以上','')
self.fd['house_price'] = self.fd['house_price_min']
elif house_price_zu.find("以下") != -1:
self.fd['house_price_max'] = house_price_zu.replace('以下','')
self.fd['house_price_min'] = 0
self.fd['house_price'] = self.fd['house_price_max']
elif house_price_zu.find("-") != -1:
self.fd['house_price_max'] = house_price_zu.split('-')[1]
self.fd['house_price_min'] = house_price_zu.split('-')[0]
self.fd['house_price'] = house_price_zu.split('-')[1]
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
else:
if re.search(self.borough_name_regex_reg, response):
borough_name=re.search(self.borough_name_regex_reg, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.house_addr_regex_reg, response):
house_addr=re.search(self.house_addr_regex_reg, response).group(1)
self.fd['house_addr'] = house_addr
else:
self.fd['house_addr'] = ''
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def rent(self,url):
self.fd['city'] = urlparse(url)[1].replace('.ganji.com',"")
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
return
tree = etree.HTML(response)
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_flag'] = 2
self.fd['house_type'] = 0
self.fd['house_floor'] = ""
self.fd['house_topfloor'] = ""
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search(self.house_totalarea_regex, response):
house_totalarea=re.search(self.house_totalarea_regex, response).group(1)
self.fd['house_totalarea'] = house_totalarea
else:
self.fd['house_totalarea'] = None
if re.search(self.house_price_regex_2, response):
house_price=re.search(self.house_price_regex_2, response).group(1)
if house_price=="面议":
house_price="0"
self.fd['house_price'] = house_price
else:
self.fd['house_price'] = None
# house_price=tree.xpath("/html/body/div[2]/div/div/ul/li/span") and tree.xpath("/html/body/div[2]/div/div/ul/li/span")[0].text.strip() or None
# v['house_price'] = house_price
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
#地址
if borough_name and borough_name.nextSibling:
house_addr = borough_name.nextSibling.string
self.fd['house_addr'] = re.sub("\(|\)| ","",house_addr)
else:
self.fd['house_addr'] = None
else:
if re.search(self.borough_name_regex, response):
borough_name=re.search(self.borough_name_regex, response).group(1)
self.fd['borough_name'] = re.sub("\(.*\)| ","",borough_name)
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = None
#朝向
if re.search(self.house_toward_regex, response):
house_toward=re.search(self.house_toward_regex, response).group(1)
self.fd['house_toward'] = toward(house_toward)
else:
self.fd['house_toward'] = None
if re.search(self.house_fitment_regex, response):
house_fitment=re.search(self.house_fitment_regex, response).group(1)
self.fd['house_fitment'] = fitment(house_fitment)
else:
self.fd['house_fitment'] = 2
if re.search(self.house_deposit_regex, response):
house_deposit=re.search(self.house_deposit_regex, response).group(1)
self.fd['house_deposit'] = deposit(house_deposit)
else:
self.fd['house_deposit'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def require(self,url):
self.fd['city'] = urlparse(url)[1].replace('.ganji.com',"")
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
return
tree = etree.HTML(response)
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_flag'] = 4
self.fd['house_type'] = 0
self.fd['house_floor'] = ""
self.fd['house_topfloor'] = ""
self.fd['house_totalarea']=0
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
self.fd['house_deposit'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
self.fd['house_totalarea'] = 0
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#没有联系方式 return
if not self.fd['owner_phone']:return
if re.search(self.house_price_regex_zu, response):
house_price_zu = re.search(self.house_price_regex_zu, response).group(1)
house_price_zu = house_price_zu.replace('元/月','')
if house_price_zu.find("以上") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = house_price_zu.replace('以上','')
self.fd['house_price'] = house_price_zu.replace('以上','')
elif house_price_zu.find("以下") != -1:
self.fd['house_price_max'] = house_price_zu.replace('以下','')
self.fd['house_price_min'] = 0
self.fd['house_price'] = house_price_zu.replace('以下','')
elif house_price_zu.find("-") != -1:
self.fd['house_price_max'] = house_price_zu.split('-')[1]
self.fd['house_price_min'] = house_price_zu.split('-')[0]
self.fd['house_price'] = house_price_zu.split('-')[1]
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if re.search(self.borough_name_regex_reg, response):
borough_name=re.search(self.borough_name_regex_reg, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.house_addr_regex_reg, response):
house_addr=re.search(self.house_addr_regex_reg, response).group(1)
self.fd['house_addr'] = house_addr
else:
self.fd['house_addr'] = ''
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def extractDict(self):
if checkPath(homepath,self.folder,self.urls):
pass
else:
try:
if self.kind=="1":
self.sell(self.urls)
elif self.kind=="2":
self.rent(self.urls)
elif self.kind=="3":
self.buy(self.urls)
else:
self.require(self.urls)
makePath(homepath,self.folder,self.urls)
#超过七天
# if (time.time() -self.fd["posttime"]) > 7*24*36000:return
except Exception,e:
msglogger.info("%s 链接采集异常"%self.urls)
# print "%s||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||"%self.urls
self.fd["c"]="houseapi"
self.fd["a"]="savehouse"
self.fd["is_checked"] = 1
self.fd["web_flag"] = "gj"
print "%s %s %s %s %s"%(("%s.soufun.com"% self.citycode),self.citycode, self.kind ,time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())), self.urls)
return self.fd
if not self.fd["is_checked"]:
for i in self.fd.items():
print i[0],i[1]
print "*"*80
# if len(self.fd)==7 or len(self.fd)==17:
# print "#####################################"
# continue
# req=urllib2.Request("http://site.jjr360.com/app.php", urllib.urlencode(self.fd))
# p=self.br.open(req).read().strip()
# print p.decode('gbk')
# print "*"*80
class fetchData(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
clinks=lc.runme()
cc=ContentCrawl(clinks,self.d["citycode"],self.d["kind"])
cc.extractDict()
class getLinksThread(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
gc.enable()
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
lc.runme()
del gc.garbage[:]
def getLinks(d):
lc=LinkCrawl(d["citycode"],d["kind"],d["st1"])
while True:
lc.runme()
del gc.garbage[:]
time.sleep(int(d["st2"]))
def getContent(clinks,citycode,kind,upc):
# return
cc=ContentCrawl(clinks,citycode,kind,upc)
fd=cc.extractDict()
res=""
try:
res=postHost(fd)
except Exception,e:
res=e
print res
msglogger.info("%s|%s|%s"%(clinks,res,fd))
del gc.garbage[:]
if __name__=="__main__":
# lc=LinkCrawl(citycode="su",kind="1")
# lc.runme()#
#url1 = "http://su.ganji.com/fang5/11071015_233901.htm"
#url2 = "http://su.ganji.com/fang1/11071017_418972.htm"
#url3 = "http://su.ganji.com/fang4/11062413_4152.htm"
#url4 = "http://su.ganji.com/fang2/11070900_21214.htm"
cc=ContentCrawl("http://su.ganji.com/fang2/11071417_21820.htm",citycode="su",kind="4")
cc.extractDict()
# while 1:
# for i in range(1,5):
# k = "%s" % str(i)
# try:
# lc=LinkCrawl(citycode="su",kind=k)
# clinks=lc.runme()
# cc=ContentCrawl(clinks,citycode="su",kind=k)
# cc.extractDict()
# except:
# pass
| ptphp/PyLib | src/webpy1/src/jjrspider/ganji.py | Python | apache-2.0 | 40,700 |
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del user command."""
import pwd
import os
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelUser(TestBrokerCommand):
def test_100_del_current_user(self):
pwrec = pwd.getpwuid(os.getuid())
self.noouttest(["del_user", "--username", pwrec[0]])
def test_105_verify_gone(self):
pwrec = pwd.getpwuid(os.getuid())
command = ["show_user", "--username", pwrec[0]]
out = self.notfoundtest(command)
self.matchoutput(out, "User %s not found." % pwrec[0], command)
def test_110_del_current_user_again(self):
pwrec = pwd.getpwuid(os.getuid())
command = ["del_user", "--username", pwrec[0]]
out = self.notfoundtest(command)
self.matchoutput(out, "User %s not found." % pwrec[0], command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelUser)
unittest.TextTestRunner(verbosity=2).run(suite)
| guillaume-philippon/aquilon | tests/broker/test_del_user.py | Python | apache-2.0 | 1,774 |
input = """
supp(B) :- on(B,table,0).
supp(B) :- on(B,B1,0), supp(B1).
on(b0,table,0) :- true.
on(b1,b0,0) :- true.
on(B,L,0) | -on(B,L,0) :- block(B), location(L).
true.
location(L) :- block(L).
location(table) :- true.
block(b0).
block(b1).
block(b2).
"""
output = """
supp(B) :- on(B,table,0).
supp(B) :- on(B,B1,0), supp(B1).
on(b0,table,0) :- true.
on(b1,b0,0) :- true.
on(B,L,0) | -on(B,L,0) :- block(B), location(L).
true.
location(L) :- block(L).
location(table) :- true.
block(b0).
block(b1).
block(b2).
"""
| veltri/DLV2 | tests/parser/grounding.7.test.py | Python | apache-2.0 | 559 |
import re
import calendar
from logs_analyzer.settings import *
from logs_analyzer.validators import *
from datetime import datetime
def get_service_settings(service_name):
"""
Get default settings for the said service
:param service_name: service name (example: nginx, apache2...)
:return: service settings if found or None
"""
if service_name in SERVICES_SWITCHER:
return SERVICES_SWITCHER.get(service_name)
else:
raise Exception("Service \""+service_name+"\" doesn't exists!")
def get_date_filter(settings, minute=datetime.now().minute, hour=datetime.now().hour,
day=datetime.now().day, month=datetime.now().month,
year=datetime.now().year):
"""
Get the date pattern that can be used to filter data from logs based on the params
:raises Exception:
:param settings: dict
:param minute: int
:param hour: int
:param day: int
:param month: int
:param year: int
:return: string
"""
if not is_valid_year(year) or not is_valid_month(month) or not is_valid_day(day) \
or not is_valid_hour(hour) or not is_valid_minute(minute):
raise Exception("Date elements aren't valid")
if minute != '*' and hour != '*':
date_format = settings['dateminutes_format']
date_filter = datetime(year, month, day, hour, minute).strftime(date_format)
elif minute == '*' and hour != '*':
date_format = settings['datehours_format']
date_filter = datetime(year, month, day, hour).strftime(date_format)
elif minute == '*' and hour == '*':
date_format = settings['datedays_format']
date_filter = datetime(year, month, day).strftime(date_format)
else:
raise Exception("Date elements aren't valid")
return date_filter
def filter_data(log_filter, data=None, filepath=None, is_casesensitive=True, is_regex=False, is_reverse=False):
"""
Filter received data/file content and return the results
:except IOError:
:except EnvironmentError:
:raises Exception:
:param log_filter: string
:param data: string
:param filepath: string
:param is_casesensitive: boolean
:param is_regex: boolean
:param is_reverse: boolean to inverse selection
:return: string
"""
return_data = ""
if filepath:
try:
with open(filepath, 'r') as file_object:
for line in file_object:
if check_match(line, log_filter, is_regex, is_casesensitive, is_reverse):
return_data += line
return return_data
except (IOError, EnvironmentError) as e:
print(e.strerror)
exit(2)
elif data:
for line in data.splitlines():
if check_match(line, log_filter, is_regex, is_casesensitive, is_reverse):
return_data += line+"\n"
return return_data
else:
raise Exception("Data and filepath values are NULL!")
def check_match(line, filter_pattern, is_regex, is_casesensitive, is_reverse):
"""
Check if line contains/matches filter pattern
:param line: string
:param filter_pattern: string
:param is_regex: boolean
:param is_casesensitive: boolean
:param is_reverse: boolean
:return: boolean
"""
if is_regex:
check_result = re.match(filter_pattern, line) if is_casesensitive \
else re.match(filter_pattern, line, re.IGNORECASE)
else:
check_result = (filter_pattern in line) if is_casesensitive else (filter_pattern.lower() in line.lower())
return check_result and not is_reverse
def get_web_requests(data, pattern, date_pattern=None, date_keys=None):
"""
Analyze data (from the logs) and return list of requests formatted as the model (pattern) defined.
:param data: string
:param pattern: string
:param date_pattern: regex|None
:param date_keys: dict|None
:return: list
"""
if date_pattern and not date_keys:
raise Exception("date_keys is not defined")
requests_dict = re.findall(pattern, data)
requests = []
for request_tuple in requests_dict:
if date_pattern:
str_datetime = __get_iso_datetime(request_tuple[1], date_pattern, date_keys)
else:
str_datetime = request_tuple[1]
requests.append({'IP': request_tuple[0], 'DATETIME': str_datetime,
'METHOD': request_tuple[2], 'ROUTE': request_tuple[3], 'CODE': request_tuple[4],
'REFERRER': request_tuple[5], 'USERAGENT': request_tuple[6]})
return requests
def get_auth_requests(data, pattern, date_pattern=None, date_keys=None):
"""
Analyze data (from the logs) and return list of auth requests formatted as the model (pattern) defined.
:param data: string
:param pattern: string
:param date_pattern:
:param date_keys:
:return: list of dicts
"""
requests_dict = re.findall(pattern, data)
requests = []
for request_tuple in requests_dict:
if date_pattern:
str_datetime = __get_iso_datetime(request_tuple[0], date_pattern, date_keys)
else:
str_datetime = request_tuple[0]
data = analyze_auth_request(request_tuple[2])
data['DATETIME'] = str_datetime
data['SERVICE'] = request_tuple[1]
requests.append(data)
return requests
def analyze_auth_request(request_info):
"""
Analyze request info and returns main data (IP, invalid user, invalid password's user, is_preauth, is_closed)
:param request_info: string
:return: dicts
"""
ipv4 = re.findall(IPv4_REGEX, request_info)
is_preauth = '[preauth]' in request_info.lower()
invalid_user = re.findall(AUTH_USER_INVALID_USER, request_info)
invalid_pass_user = re.findall(AUTH_PASS_INVALID_USER, request_info)
is_closed = 'connection closed by ' in request_info.lower()
return {'IP': ipv4[0] if ipv4 else None,
'INVALID_USER': invalid_user[0] if invalid_user else None,
'INVALID_PASS_USER': invalid_pass_user[0] if invalid_pass_user else None,
'IS_PREAUTH': is_preauth,
'IS_CLOSED': is_closed}
def __get_iso_datetime(str_date, pattern, keys):
"""
Change raw datetime from logs to ISO 8601 format.
:param str_date: string
:param pattern: regex (date_pattern from settings)
:param keys: dict (date_keys from settings)
:return: string
"""
months_dict = {v: k for k, v in enumerate(calendar.month_abbr)}
a_date = re.findall(pattern, str_date)[0]
d_datetime = datetime(int(a_date[keys['year']]) if 'year' in keys else __get_auth_year(),
months_dict[a_date[keys['month']]], int(a_date[keys['day']].strip()),
int(a_date[keys['hour']]), int(a_date[keys['minute']]), int(a_date[keys['second']]))
return d_datetime.isoformat(' ')
def __get_auth_year():
# TODO: Add support for analysis done in different terms
"""
Return the year when the requests happened so there will be no bug if the analyze is done in the new year eve,
the library was designed to be used for hourly analysis.
:return: int
"""
if datetime.now().month == 1 and datetime.now().day == 1 and datetime.now().hour == 0:
return datetime.now().year - 1
else:
return datetime.now().year
class LogsAnalyzer:
def __init__(self, service, data=None, filepath=None):
"""
Constructor, define service (nginx, apache2...), set data or filepath if needed
:param service: string: service name (nginx, apache2...)
:param data: string: data to be filtered if not from a file
:param filepath: string: file path from which the data will be loaded if data isn't defined
and you are not using the default service logs filepath
:return:
"""
self.__filters = []
self.__settings = get_service_settings(service)
self.data = data
if filepath:
self.filepath = filepath
else:
self.filepath = self.__settings['dir_path']+self.__settings['accesslog_filename']
def add_filter(self, filter_pattern, is_casesensitive=True, is_regex=False, is_reverse=False):
"""
Add filter data the filters list
:param filter_pattern: boolean
:param is_casesensitive: boolean
:param is_regex: boolean
:param is_reverse: boolean
:return:
"""
self.__filters.append({
'filter_pattern': filter_pattern,
'is_casesensitive': is_casesensitive,
'is_regex': is_regex,
'is_reverse': is_reverse
})
def add_date_filter(self, minute=datetime.now().minute, hour=datetime.now().hour,
day=datetime.now().day, month=datetime.now().month, year=datetime.now().year):
"""
Set datetime filter
:param minute: int
:param hour: int
:param day: int
:param month: int
:param year: int
"""
date_filter = get_date_filter(self.__settings, minute, hour, day, month, year)
self.add_filter(date_filter)
def get_all_filters(self):
"""
return all defined filters
:return: List
"""
return self.__filters
def get_filter(self, index):
"""
Get a filter data by index
:param index:
:return: Dictionary
"""
return self.__filters[index]
def remove_filter(self, index):
"""
Remove one filter from filters list using it's index
:param index:
:return:
"""
self.__filters.remove(index)
def clear_all_filters(self):
"""
Clear all filters
:return:
"""
self.__filters = []
def check_all_matches(self, line, filter_patterns):
"""
Check if line contains/matches all filter patterns
:param line: String
:param filter_patterns: List of dictionaries containing
:return: boolean
"""
to_return = None
for pattern_data in filter_patterns:
tmp_result = check_match(line=line, **pattern_data)
to_return = tmp_result if to_return is None else (tmp_result and to_return)
return to_return
def filter_all(self):
"""
Apply all defined patterns and return filtered data
:return: string
"""
to_return = ""
if self.data:
for line in self.data.splitlines():
if self.check_all_matches(line, self.__filters):
to_return += line+"\n"
else:
with open(self.filepath, 'r') as file_object:
for line in file_object:
if self.check_all_matches(line, self.__filters):
to_return += line
return to_return
def get_requests(self):
"""
Analyze data (from the logs) and return list of auth requests formatted as the model (pattern) defined.
:return:
"""
data = self.filter_all()
request_pattern = self.__settings['request_model']
date_pattern = self.__settings['date_pattern']
date_keys = self.__settings['date_keys']
if self.__settings['type'] == 'web0':
return get_web_requests(data, request_pattern, date_pattern, date_keys)
elif self.__settings['type'] == 'auth':
return get_auth_requests(data, request_pattern, date_pattern, date_keys)
else:
return None
| ddalu5/logs-analyzer | logs_analyzer/lib.py | Python | apache-2.0 | 11,644 |
import os
from contextlib import contextmanager
from OpenSSL import crypto, SSL
import synapse.common as s_common
from synapse.tests.common import *
import synapse.lib.certdir as s_certdir
class CertDirTest(SynTest):
@contextmanager
def getCertDir(self):
'''
Get a test CertDir object.
Yields:
s_certdir.CertDir: A certdir object based out of a temp directory.
'''
# create a temp folder and make it a cert dir
with self.getTestDir() as dirname:
s_scope.set('testdir', dirname)
cdir = s_certdir.CertDir(path=dirname)
yield cdir
def basic_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
self.nn(cert)
self.nn(key)
# Make sure the certs were generated with the expected number of bits
self.eq(cert.get_pubkey().bits(), cdir.crypto_numbits)
self.eq(key.bits(), cdir.crypto_numbits)
# Make sure the certs were generated with the correct version number
self.eq(cert.get_version(), 2)
# ensure we can sign / verify data with our keypair
buf = b'The quick brown fox jumps over the lazy dog.'
sig = crypto.sign(key, buf, 'sha256')
sig2 = crypto.sign(key, buf + b'wut', 'sha256')
self.none(crypto.verify(cert, sig, buf, 'sha256'))
self.raises(crypto.Error, crypto.verify, cert, sig2, buf, 'sha256')
# ensure that a ssl context using both cert/key match
sslcontext = SSL.Context(SSL.TLSv1_2_METHOD)
sslcontext.use_certificate(cert)
sslcontext.use_privatekey(key)
self.none(sslcontext.check_privatekey())
if cacert:
# Make sure the cert was signed by the CA
self.eq(cert.get_issuer().der(), cacert.get_subject().der())
store = crypto.X509Store()
ctx = crypto.X509StoreContext(store, cert)
# OpenSSL should NOT be able to verify the certificate if its CA is not loaded
store.add_cert(cert)
self.raises(crypto.X509StoreContextError, ctx.verify_certificate) # unable to get local issuer certificate
# Generate a separate CA that did not sign the certificate
try:
cdir.genCaCert('otherca')
except DupFileName:
pass
# OpenSSL should NOT be able to verify the certificate if its CA is not loaded
store.add_cert(cdir.getCaCert('otherca'))
self.raises(crypto.X509StoreContextError, ctx.verify_certificate) # unable to get local issuer certificate
# OpenSSL should be able to verify the certificate, once its CA is loaded
store.add_cert(cacert)
self.none(ctx.verify_certificate()) # valid
def p12_assertions(self, cdir, cert, key, p12, cacert=None):
'''
test basic p12 certificate bundle assumptions
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
p12 (crypto.PKCS12): PKCS12 object to test
cacert (crypto.X509): Corresponding CA cert (optional)
'''
self.nn(p12)
# Pull out the CA cert and keypair data
p12_cacert = None
if cacert:
p12_cacert = p12.get_ca_certificates()
self.nn(p12_cacert)
self.len(1, p12_cacert)
p12_cacert = p12_cacert[0]
self.eq(crypto.dump_certificate(crypto.FILETYPE_ASN1, cacert), crypto.dump_certificate(crypto.FILETYPE_ASN1, p12_cacert))
p12_cert = p12.get_certificate()
p12_key = p12.get_privatekey()
self.basic_assertions(cdir, p12_cert, p12_key, cacert=p12_cacert)
# Make sure that the CA cert and keypair files are the same as the CA cert and keypair contained in the p12 file
self.eq(crypto.dump_certificate(crypto.FILETYPE_ASN1, cert), crypto.dump_certificate(crypto.FILETYPE_ASN1, p12_cert))
self.eq(crypto.dump_privatekey(crypto.FILETYPE_ASN1, key), crypto.dump_privatekey(crypto.FILETYPE_ASN1, p12_key))
def user_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions for a host certificate
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
nextensions = cert.get_extension_count()
exts = {ext.get_short_name(): ext.get_data() for ext in [cert.get_extension(i) for i in range(nextensions)]}
nscertext = crypto.X509Extension(b'nsCertType', False, b'client')
keyuseext = crypto.X509Extension(b'keyUsage', False, b'digitalSignature')
extkeyuseext = crypto.X509Extension(b'extendedKeyUsage', False, b'clientAuth')
basicconext = crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE')
self.eq(exts[b'nsCertType'], nscertext.get_data())
self.eq(exts[b'keyUsage'], keyuseext.get_data())
self.eq(exts[b'extendedKeyUsage'], extkeyuseext.get_data())
self.eq(exts[b'basicConstraints'], basicconext.get_data())
self.notin(b'subjectAltName', exts)
def host_assertions(self, cdir, cert, key, cacert=None):
'''
test basic certificate assumptions for a host certificate
Args:
cdir (s_certdir.CertDir): certdir object
cert (crypto.X509): Cert to test
key (crypto.PKey): Key for the certification
cacert (crypto.X509): Corresponding CA cert (optional)
'''
nextensions = cert.get_extension_count()
exts = {ext.get_short_name(): ext.get_data() for ext in [cert.get_extension(i) for i in range(nextensions)]}
nscertext = crypto.X509Extension(b'nsCertType', False, b'server')
keyuseext = crypto.X509Extension(b'keyUsage', False, b'digitalSignature,keyEncipherment')
extkeyuseext = crypto.X509Extension(b'extendedKeyUsage', False, b'serverAuth')
basicconext = crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE')
self.eq(exts[b'nsCertType'], nscertext.get_data())
self.eq(exts[b'keyUsage'], keyuseext.get_data())
self.eq(exts[b'extendedKeyUsage'], extkeyuseext.get_data())
self.eq(exts[b'basicConstraints'], basicconext.get_data())
self.isin(b'subjectAltName', exts)
def test_certdir_cas(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
inter_name = 'testsyn-intermed'
base = cdir._getPathJoin()
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getCaCert(caname))
self.none(cdir.getCaKey(caname))
self.false(cdir.isCaCert(caname))
self.none(cdir.getCaCertPath(caname))
self.none(cdir.getCaKeyPath(caname))
# Generate a self-signed CA =======================================
cdir.genCaCert(caname)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getCaCert(caname), crypto.X509)
self.isinstance(cdir.getCaKey(caname), crypto.PKey)
self.true(cdir.isCaCert(caname))
self.eq(cdir.getCaCertPath(caname), base + '/cas/' + caname + '.crt')
self.eq(cdir.getCaKeyPath(caname), base + '/cas/' + caname + '.key')
# Run basic assertions on the CA keypair
cacert = cdir.getCaCert(caname)
cakey = cdir.getCaKey(caname)
self.basic_assertions(cdir, cacert, cakey)
# Generate intermediate CA ========================================
cdir.genCaCert(inter_name, signas=caname)
# Run basic assertions, make sure that it was signed by the root CA
inter_cacert = cdir.getCaCert(inter_name)
inter_cakey = cdir.getCaKey(inter_name)
self.basic_assertions(cdir, inter_cacert, inter_cakey, cacert=cacert)
def test_certdir_hosts(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
hostname = 'visi.vertex.link'
hostname_unsigned = 'unsigned.vertex.link'
base = cdir._getPathJoin()
cdir.genCaCert(caname)
cacert = cdir.getCaCert(caname)
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getHostCert(hostname_unsigned))
self.none(cdir.getHostKey(hostname_unsigned))
self.false(cdir.isHostCert(hostname_unsigned))
self.none(cdir.getHostCertPath(hostname_unsigned))
self.none(cdir.getHostKeyPath(hostname_unsigned))
self.none(cdir.getHostCaPath(hostname_unsigned))
# Generate a self-signed host keypair =============================
cdir.genHostCert(hostname_unsigned)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getHostCert(hostname_unsigned), crypto.X509)
self.isinstance(cdir.getHostKey(hostname_unsigned), crypto.PKey)
self.true(cdir.isHostCert(hostname_unsigned))
self.eq(cdir.getHostCertPath(hostname_unsigned), base + '/hosts/' + hostname_unsigned + '.crt')
self.eq(cdir.getHostKeyPath(hostname_unsigned), base + '/hosts/' + hostname_unsigned + '.key')
self.none(cdir.getHostCaPath(hostname_unsigned)) # the cert is self-signed, so there is no ca cert
# Run basic assertions on the host keypair
cert = cdir.getHostCert(hostname_unsigned)
key = cdir.getHostKey(hostname_unsigned)
self.basic_assertions(cdir, cert, key)
self.host_assertions(cdir, cert, key)
# Generate a signed host keypair ==================================
cdir.genHostCert(hostname, signas=caname)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getHostCert(hostname), crypto.X509)
self.isinstance(cdir.getHostKey(hostname), crypto.PKey)
self.true(cdir.isHostCert(hostname))
self.eq(cdir.getHostCertPath(hostname), base + '/hosts/' + hostname + '.crt')
self.eq(cdir.getHostKeyPath(hostname), base + '/hosts/' + hostname + '.key')
self.eq(cdir.getHostCaPath(hostname), base + '/cas/' + caname + '.crt') # the cert is signed, so there is a ca cert
# Run basic assertions on the host keypair
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.basic_assertions(cdir, cert, key, cacert=cacert)
self.host_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_users(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
username = '[email protected]'
username_unsigned = '[email protected]'
base = cdir._getPathJoin()
cdir.genCaCert(caname)
cacert = cdir.getCaCert(caname)
# Test that all the methods for loading the certificates return correct values for non-existant files
self.none(cdir.getUserCert(username_unsigned))
self.none(cdir.getUserKey(username_unsigned))
self.none(cdir.getClientCert(username_unsigned))
self.false(cdir.isUserCert(username_unsigned))
self.false(cdir.isClientCert(username_unsigned))
self.none(cdir.getUserCertPath('nope'))
self.none(cdir.getUserKeyPath('nope'))
self.none(cdir.getUserCaPath('nope'))
self.none(cdir.getUserForHost('nope', 'host.vertex.link'))
# Generate a self-signed user keypair =============================
cdir.genUserCert(username_unsigned)
self.raises(NoSuchFile, cdir.genClientCert, username_unsigned)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getUserCert(username_unsigned), crypto.X509)
self.isinstance(cdir.getUserKey(username_unsigned), crypto.PKey)
self.none(cdir.getClientCert(username_unsigned))
self.true(cdir.isUserCert(username_unsigned))
self.false(cdir.isClientCert(username_unsigned))
self.eq(cdir.getUserCertPath(username_unsigned), base + '/users/' + username_unsigned + '.crt')
self.eq(cdir.getUserKeyPath(username_unsigned), base + '/users/' + username_unsigned + '.key')
self.none(cdir.getUserCaPath(username_unsigned)) # no CA
self.eq(cdir.getUserForHost('unsigned', 'host.vertex.link'), username_unsigned)
# Run basic assertions on the host keypair
cert = cdir.getUserCert(username_unsigned)
key = cdir.getUserKey(username_unsigned)
self.basic_assertions(cdir, cert, key)
self.user_assertions(cdir, cert, key)
# Generate a signed user keypair ==================================
cdir.genUserCert(username, signas=caname)
cdir.genClientCert(username)
# Test that all the methods for loading the certificates work
self.isinstance(cdir.getUserCert(username), crypto.X509)
self.isinstance(cdir.getUserKey(username), crypto.PKey)
self.isinstance(cdir.getClientCert(username), crypto.PKCS12)
self.true(cdir.isUserCert(username))
self.true(cdir.isClientCert(username))
self.eq(cdir.getUserCertPath(username), base + '/users/' + username + '.crt')
self.eq(cdir.getUserKeyPath(username), base + '/users/' + username + '.key')
self.eq(cdir.getUserCaPath(username), base + '/cas/' + caname + '.crt')
self.eq(cdir.getUserForHost('visi', 'host.vertex.link'), username)
# Run basic assertions on the host keypair
cert = cdir.getUserCert(username)
key = cdir.getUserKey(username)
p12 = cdir.getClientCert(username)
self.basic_assertions(cdir, cert, key, cacert=cacert)
self.user_assertions(cdir, cert, key, cacert=cacert)
self.p12_assertions(cdir, cert, key, p12, cacert=cacert)
# Test missing files for generating a client cert
os.remove(base + '/users/' + username + '.key')
self.raises(NoSuchFile, cdir.genClientCert, username) # user key
os.remove(base + '/cas/' + caname + '.crt')
self.raises(NoSuchFile, cdir.genClientCert, username) # ca crt
os.remove(base + '/users/' + username + '.crt')
self.raises(NoSuchFile, cdir.genClientCert, username) # user crt
def test_certdir_hosts_sans(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
cdir.genCaCert(caname)
# Host cert with multiple SANs ====================================
hostname = 'visi.vertex.link'
sans = 'DNS:vertex.link,DNS:visi.vertex.link,DNS:vertex.link'
cdir.genHostCert(hostname, signas=caname, sans=sans)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x1f\x82\x0bvertex.link\x82\x10visi.vertex.link') # ASN.1 encoded subjectAltName data
# Host cert with no specified SANs ================================
hostname = 'visi2.vertex.link'
cdir.genHostCert(hostname, signas=caname)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x13\x82\x11visi2.vertex.link') # ASN.1 encoded subjectAltName data
# Self-signed Host cert with no specified SANs ====================
hostname = 'visi3.vertex.link'
cdir.genHostCert(hostname)
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.eq(cert.get_extension_count(), 5)
self.eq(cert.get_extension(4).get_short_name(), b'subjectAltName')
self.eq(cert.get_extension(4).get_data(), b'0\x13\x82\x11visi3.vertex.link') # ASN.1 encoded subjectAltName data
def test_certdir_hosts_csr(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
hostname = 'visi.vertex.link'
# Generate CA cert and host CSR
cdir.genCaCert(caname)
cdir.genHostCsr(hostname)
path = cdir._getPathJoin('hosts', hostname + '.csr')
xcsr = cdir._loadCsrPath(path)
# Sign the CSR as the CA
pkey, pcert = cdir.signHostCsr(xcsr, caname)
self.isinstance(pkey, crypto.PKey)
self.isinstance(pcert, crypto.X509)
# Validate the keypair
cacert = cdir.getCaCert(caname)
cert = cdir.getHostCert(hostname)
key = cdir.getHostKey(hostname)
self.basic_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_users_csr(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
caname = 'syntest'
username = '[email protected]'
# Generate CA cert and user CSR
cdir.genCaCert(caname)
cdir.genUserCsr(username)
path = cdir._getPathJoin('users', username + '.csr')
xcsr = cdir._loadCsrPath(path)
# Sign the CSR as the CA
pkey, pcert = cdir.signUserCsr(xcsr, caname)
self.isinstance(pkey, crypto.PKey)
self.isinstance(pcert, crypto.X509)
# Validate the keypair
cacert = cdir.getCaCert(caname)
cert = cdir.getUserCert(username)
key = cdir.getUserKey(username)
self.basic_assertions(cdir, cert, key, cacert=cacert)
def test_certdir_importfile(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
with self.getTestDir() as testpath:
# File doesn't exist
fpath = s_common.genpath(testpath, 'not_real.crt')
self.raises(NoSuchFile, cdir.importFile, fpath, 'cas')
# File has unsupported extension
fpath = s_common.genpath(testpath, 'coolpic.bmp')
with s_common.genfile(fpath) as fd:
self.raises(BadFileExt, cdir.importFile, fpath, 'cas')
tests = (
('cas', 'coolca.crt'),
('cas', 'coolca.key'),
('hosts', 'coolhost.crt'),
('hosts', 'coolhost.key'),
('users', 'cooluser.crt'),
('users', 'cooluser.key'),
('users', 'cooluser.p12'),
)
data = b'arbitrary data'
for ftype, fname in tests:
srcpath = s_common.genpath(testpath, fname)
dstpath = s_common.genpath(cdir.path, ftype, fname)
with s_common.genfile(srcpath) as fd:
fd.write(b'arbitrary data')
fd.seek(0)
# Make sure the file is not there
self.raises(NoSuchFile, s_common.reqfile, dstpath)
# Import it and make sure it exists
self.none(cdir.importFile(srcpath, ftype))
with s_common.reqfile(dstpath) as dstfd:
self.eq(dstfd.read(), b'arbitrary data')
# Make sure it can't be overwritten
self.raises(FileExists, cdir.importFile, srcpath, ftype)
def test_certdir_valUserCert(self):
with self.getCertDir() as cdir: # type: s_certdir.CertDir
base = cdir._getPathJoin()
cdir.genCaCert('syntest')
cdir.genCaCert('newp')
cacerts = cdir.getCaCerts()
syntestca = cdir.getCaCert('syntest')
newpca = cdir.getCaCert('newp')
self.raises(crypto.Error, cdir.valUserCert, b'')
cdir.genUserCert('cool')
path = cdir.getUserCertPath('cool')
byts = cdir._getPathBytes(path)
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts)
cdir.genUserCert('cooler', signas='syntest')
path = cdir.getUserCertPath('cooler')
byts = cdir._getPathBytes(path)
self.nn(cdir.valUserCert(byts))
self.nn(cdir.valUserCert(byts, cacerts=(syntestca,)))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=(newpca,))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=())
cdir.genUserCert('coolest', signas='newp')
path = cdir.getUserCertPath('coolest')
byts = cdir._getPathBytes(path)
self.nn(cdir.valUserCert(byts))
self.nn(cdir.valUserCert(byts, cacerts=(newpca,)))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=(syntestca,))
self.raises(crypto.X509StoreContextError, cdir.valUserCert, byts, cacerts=())
| vivisect/synapse | synapse/tests/test_lib_certdir.py | Python | apache-2.0 | 22,334 |
##############################################################################
# Copyright 2017-2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pytest
import os
import numpy as np
from unittest.mock import patch, MagicMock, Mock
import json
from pyquil.api import Job, QVMConnection
from grove.tomography.tomography import (MAX_QUBITS_PROCESS_TOMO,
default_channel_ops)
from grove.tomography.process_tomography import (DEFAULT_PROCESS_TOMO_SETTINGS,
process_tomography_programs,
do_process_tomography, ProcessTomography,
COMPLETELY_POSITIVE)
from grove.tomography.process_tomography import (TRACE_PRESERVING)
from grove.tomography.utils import (make_histogram,
sample_bad_readout, basis_state_preps,
estimate_assignment_probs, BAD_2Q_READOUT, SEED,
EPS, CNOT_PROGRAM, import_qutip, import_cvxpy)
from grove.tomography.operator_utils import make_diagonal_povm, POVM_PI_BASIS
qt = import_qutip()
cvxpy = import_cvxpy()
if not qt:
pytest.skip("Qutip not installed, skipping tests", allow_module_level=True)
if not cvxpy:
pytest.skip("CVXPY not installed, skipping tests", allow_module_level=True)
SHOTS_PATH = os.path.join(os.path.dirname(__file__), 'process_shots.json')
RESULTS_PATH = os.path.join(os.path.dirname(__file__), 'process_results.json')
sample_bad_readout = MagicMock(sample_bad_readout)
sample_bad_readout.side_effect = [np.array(shots) for shots in json.load(open(SHOTS_PATH, 'r'))]
# these mocks are set up such that a single mock Job is returned by the QVMConnection's wait_for_job
# but calling job.result() returns a different value every time via the side_effect defined below
cxn = MagicMock(QVMConnection)
job = MagicMock(Job)
job.result.side_effect = json.load(open(RESULTS_PATH, 'r'))
cxn.wait_for_job.return_value = job
def test_process_tomography():
num_qubits = len(CNOT_PROGRAM.get_qubits())
dimension = 2 ** num_qubits
tomo_seq = list(process_tomography_programs(CNOT_PROGRAM))
nsamples = 3000
np.random.seed(SEED)
# We need more samples on the readout to ensure convergence.
state_prep_hists = [make_histogram(sample_bad_readout(p, 2 * nsamples, BAD_2Q_READOUT, cxn),
dimension) for p in basis_state_preps(*range(num_qubits))]
assignment_probs = estimate_assignment_probs(state_prep_hists)
histograms = np.zeros((len(tomo_seq), dimension))
for jj, p in enumerate(tomo_seq):
histograms[jj] = make_histogram(sample_bad_readout(p, nsamples, BAD_2Q_READOUT, cxn),
dimension)
channel_ops = list(default_channel_ops(num_qubits))
histograms = histograms.reshape((len(channel_ops), len(channel_ops), dimension))
povm = make_diagonal_povm(POVM_PI_BASIS ** num_qubits, assignment_probs)
cnot_ideal = qt.cnot()
for settings in [
DEFAULT_PROCESS_TOMO_SETTINGS,
DEFAULT_PROCESS_TOMO_SETTINGS._replace(constraints={TRACE_PRESERVING}),
DEFAULT_PROCESS_TOMO_SETTINGS._replace(constraints={TRACE_PRESERVING, COMPLETELY_POSITIVE}),
]:
process_tomo = ProcessTomography.estimate_from_ssr(histograms, povm, channel_ops,
channel_ops,
settings)
assert abs(1 - process_tomo.avg_gate_fidelity(cnot_ideal)) < EPS
transfer_matrix = process_tomo.pauli_basis.transfer_matrix(qt.to_super(cnot_ideal))
assert abs(1 - process_tomo.avg_gate_fidelity(transfer_matrix)) < EPS
chi_rep = process_tomo.to_chi().data.toarray()
# When comparing to the identity, the chi representation is quadratically larger than the
# Hilbert space representation, so we take a square root.
probabilty_scale = np.sqrt(chi_rep.shape[0])
super_op_from_chi = np.zeros(process_tomo.pauli_basis.ops[0].shape, dtype=np.complex128)
for i, si in enumerate(process_tomo.pauli_basis.ops):
for j, sj in enumerate(process_tomo.pauli_basis.ops):
contribution = chi_rep[i][j] * si.data.toarray().conj().T.dot(sj.data.toarray())
super_op_from_chi += contribution / probabilty_scale
assert np.isclose(np.eye(process_tomo.pauli_basis.ops[0].shape[0]), super_op_from_chi,
atol=EPS).all()
choi_rep = process_tomo.to_choi()
# Choi matrix should be a valid density matrix, scaled by the dimension of the system.
assert np.isclose(np.trace(choi_rep.data.toarray()) / probabilty_scale, 1, atol=EPS)
super_op = process_tomo.to_super()
# The map should be trace preserving.
assert np.isclose(np.sum(super_op[0]), 1, atol=EPS)
kraus_ops = process_tomo.to_kraus()
assert np.isclose(sum(np.trace(k.conjugate().T.dot(k)) for k in kraus_ops),
kraus_ops[0].shape[0], atol=.1)
assert abs(1 - process_tomo.avg_gate_fidelity(qt.to_super(cnot_ideal))) < EPS
with patch("grove.tomography.utils.plot_pauli_transfer_matrix"), \
patch("grove.tomography.process_tomography.plt") as mplt:
mplt.subplots.return_value = Mock(), Mock()
process_tomo.plot()
def test_do_process_tomography():
nsamples = 3000
qubits = list(range(MAX_QUBITS_PROCESS_TOMO + 1))
# Test with too many qubits.
with pytest.raises(ValueError):
_ = do_process_tomography(CNOT_PROGRAM, nsamples,
cxn, qubits)
process_tomo, assignment_probs, histograms = do_process_tomography(CNOT_PROGRAM, nsamples, cxn)
cnot_ideal = qt.cnot()
assert abs(1 - process_tomo.avg_gate_fidelity(cnot_ideal)) < EPS
for histogram_collection in histograms:
for histogram in histogram_collection:
assert np.sum(histogram) == nsamples
num_qubits = len(CNOT_PROGRAM.get_qubits())
assert np.isclose(assignment_probs, np.eye(2 ** num_qubits), atol=EPS).all()
| rigetticomputing/grove | grove/tests/tomography/test_process_tomography.py | Python | apache-2.0 | 6,859 |
# Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudferry import model
from cloudferry.model import identity
@model.type_alias('volume_attachments')
class Attachment(model.Model):
object_id = model.PrimaryKey()
server = model.Reference('cloudferry.model.compute.Server',
ensure_existence=False)
volume = model.Dependency('cloudferry.model.storage.Volume')
device = model.String(required=True)
def equals(self, other):
# pylint: disable=no-member
if super(Attachment, self).equals(other):
return True
if self.server is None:
return False
return self.server.equals(other.server) and self.device == other.device
@model.type_alias('volumes')
class Volume(model.Model):
object_id = model.PrimaryKey()
name = model.String(required=True, allow_none=True)
description = model.String(required=True, allow_none=True)
availability_zone = model.String(required=True)
encrypted = model.Boolean(missing=False)
host = model.String(required=True)
size = model.Integer(required=True)
tenant = model.Dependency(identity.Tenant, required=True)
metadata = model.Dict(missing=dict)
volume_type = model.String(required=True, allow_none=True)
| SVilgelm/CloudFerry | cloudferry/model/storage.py | Python | apache-2.0 | 1,801 |
import logging
from types import FunctionType
import ray
import ray.cloudpickle as pickle
from ray.experimental.internal_kv import _internal_kv_initialized, \
_internal_kv_get, _internal_kv_put
from ray.tune.error import TuneError
TRAINABLE_CLASS = "trainable_class"
ENV_CREATOR = "env_creator"
RLLIB_MODEL = "rllib_model"
RLLIB_PREPROCESSOR = "rllib_preprocessor"
RLLIB_ACTION_DIST = "rllib_action_dist"
TEST = "__test__"
KNOWN_CATEGORIES = [
TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR,
RLLIB_ACTION_DIST, TEST
]
logger = logging.getLogger(__name__)
def has_trainable(trainable_name):
return _global_registry.contains(TRAINABLE_CLASS, trainable_name)
def get_trainable_cls(trainable_name):
validate_trainable(trainable_name)
return _global_registry.get(TRAINABLE_CLASS, trainable_name)
def validate_trainable(trainable_name):
if not has_trainable(trainable_name):
# Make sure everything rllib-related is registered.
from ray.rllib import _register_all
_register_all()
if not has_trainable(trainable_name):
raise TuneError("Unknown trainable: " + trainable_name)
def register_trainable(name, trainable, warn=True):
"""Register a trainable function or class.
This enables a class or function to be accessed on every Ray process
in the cluster.
Args:
name (str): Name to register.
trainable (obj): Function or tune.Trainable class. Functions must
take (config, status_reporter) as arguments and will be
automatically converted into a class during registration.
"""
from ray.tune.trainable import Trainable
from ray.tune.function_runner import wrap_function
if isinstance(trainable, type):
logger.debug("Detected class for trainable.")
elif isinstance(trainable, FunctionType):
logger.debug("Detected function for trainable.")
trainable = wrap_function(trainable, warn=warn)
elif callable(trainable):
logger.info(
"Detected unknown callable for trainable. Converting to class.")
trainable = wrap_function(trainable, warn=warn)
if not issubclass(trainable, Trainable):
raise TypeError("Second argument must be convertable to Trainable",
trainable)
_global_registry.register(TRAINABLE_CLASS, name, trainable)
def register_env(name, env_creator):
"""Register a custom environment for use with RLlib.
This enables the environment to be accessed on every Ray process
in the cluster.
Args:
name (str): Name to register.
env_creator (obj): Function that creates an env.
"""
if not isinstance(env_creator, FunctionType):
raise TypeError("Second argument must be a function.", env_creator)
_global_registry.register(ENV_CREATOR, name, env_creator)
def check_serializability(key, value):
_global_registry.register(TEST, key, value)
def _make_key(category, key):
"""Generate a binary key for the given category and key.
Args:
category (str): The category of the item
key (str): The unique identifier for the item
Returns:
The key to use for storing a the value.
"""
return (b"TuneRegistry:" + category.encode("ascii") + b"/" +
key.encode("ascii"))
class _Registry:
def __init__(self):
self._to_flush = {}
def register(self, category, key, value):
"""Registers the value with the global registry.
Raises:
PicklingError if unable to pickle to provided file.
"""
if category not in KNOWN_CATEGORIES:
from ray.tune import TuneError
raise TuneError("Unknown category {} not among {}".format(
category, KNOWN_CATEGORIES))
self._to_flush[(category, key)] = pickle.dumps_debug(value)
if _internal_kv_initialized():
self.flush_values()
def contains(self, category, key):
if _internal_kv_initialized():
value = _internal_kv_get(_make_key(category, key))
return value is not None
else:
return (category, key) in self._to_flush
def get(self, category, key):
if _internal_kv_initialized():
value = _internal_kv_get(_make_key(category, key))
if value is None:
raise ValueError(
"Registry value for {}/{} doesn't exist.".format(
category, key))
return pickle.loads(value)
else:
return pickle.loads(self._to_flush[(category, key)])
def flush_values(self):
for (category, key), value in self._to_flush.items():
_internal_kv_put(_make_key(category, key), value, overwrite=True)
self._to_flush.clear()
_global_registry = _Registry()
ray.worker._post_init_hooks.append(_global_registry.flush_values)
class _ParameterRegistry:
def __init__(self):
self.to_flush = {}
self.references = {}
def put(self, k, v):
self.to_flush[k] = v
if ray.is_initialized():
self.flush()
def get(self, k):
if not ray.is_initialized():
return self.to_flush[k]
return ray.get(self.references[k])
def flush(self):
for k, v in self.to_flush.items():
self.references[k] = ray.put(v)
self.to_flush.clear()
parameter_registry = _ParameterRegistry()
ray.worker._post_init_hooks.append(parameter_registry.flush)
| richardliaw/ray | python/ray/tune/registry.py | Python | apache-2.0 | 5,525 |
# Copyright (c) 2018-2021 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, division, absolute_import
from six import text_type, binary_type
from ..message import BulkFrontendMessage
class CopyData(BulkFrontendMessage):
message_id = b'd'
def __init__(self, data, unicode_error='strict'):
BulkFrontendMessage.__init__(self)
if isinstance(data, text_type):
self.bytes_ = data.encode(encoding='utf-8', errors=unicode_error)
elif isinstance(data, binary_type):
self.bytes_ = data
else:
raise TypeError("Data should be string or bytes")
def read_bytes(self):
return self.bytes_
| uber/vertica-python | vertica_python/vertica/messages/frontend_messages/copy_data.py | Python | apache-2.0 | 2,393 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_cluster_role_list import V1beta1ClusterRoleList
class TestV1beta1ClusterRoleList(unittest.TestCase):
""" V1beta1ClusterRoleList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1ClusterRoleList(self):
"""
Test V1beta1ClusterRoleList
"""
model = kubernetes.client.models.v1beta1_cluster_role_list.V1beta1ClusterRoleList()
if __name__ == '__main__':
unittest.main()
| skuda/client-python | kubernetes/test/test_v1beta1_cluster_role_list.py | Python | apache-2.0 | 917 |
# -*- coding:utf-8 -*-
from test_stub import *
class HOST(MINI):
def __init__(self, uri=None, initialized=False):
self.host_name = None
self.host_list = []
if initialized:
# if initialized is True, uri should not be None
self.uri = uri
return
super(HOST, self).__init__()
def host_ops(self, host_name, action, details_page=False):
self.navigate('minihost')
host_list = []
if isinstance(host_name, types.ListType):
host_list = host_name
else:
host_list.append(host_name)
ops_list = {'enable': u'启用',
'disable': u'停用',
'reconnect': u'重连',
'maintenance': u'维护模式',
'light': u'识别灯亮'}
test_util.test_logger('Host (%s) execute action[%s]' % (' '.join(host_list), action))
for host in host_list:
for elem in self.get_elements('ant-row-flex-middle'):
if host in elem.text:
if not details_page:
if not elem.get_element(CHECKBOX).selected:
elem.get_element(CHECKBOX).click()
else:
elem.get_element('left-part').click()
time.sleep(1)
break
if details_page:
self.get_element(MOREOPERATIONBTN).click()
time.sleep(1)
self.operate(ops_list[action])
else:
if action in ['enable', 'disable']:
self.click_button(ops_list[action])
else:
self.get_element(MOREOPERATIONBTN).click()
time.sleep(1)
self.operate(ops_list[action])
self.wait_for_element(MESSAGETOAST, timeout=300, target='disappear')
| zstackio/zstack-woodpecker | integrationtest/vm/e2e_mini/host/host.py | Python | apache-2.0 | 1,882 |
#!/usr/bin/python
"""usage: python stylechecker.py /path/to/the/c/code"""
import os
import sys
import string
import re
WHITE = '\033[97m'
CYAN = '\033[96m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
def check_file(file):
if re.search('\.[c|h]$', file) == None:
return
f = open(file)
i = 1
file_name_printed = False
for line in f:
line = line.replace('\n', '')
# check the number of columns greater than 80
if len(line) > 80:
if not file_name_printed:
print RED + file + ':' + ENDC
file_name_printed = True
print (GREEN + ' [>80]:' + BLUE + ' #%d(%d)' + WHITE + ':%s') % (i, len(line), line) + ENDC
# check the TAB key
if string.find(line, '\t') >= 0:
if not file_name_printed:
print RED + file + ':' + ENDC
file_name_printed = True
print (YELLOW + ' [TAB]:' + BLUE + ' #%d(%d)' + WHITE + ':%s') % (i, len(line), line) + ENDC
# check blank lines
if line.isspace():
if not file_name_printed:
print RED + file + ':' + ENDC
file_name_printed = True
print (CYAN + ' [BLK]:' + BLUE + ' #%d(%d)' + WHITE + ':%s') % (i, len(line), line) + ENDC
i = i + 1
f.close()
def walk_dir(dir):
for root, dirs, files in os.walk(dir):
for f in files:
s = root + '/' + f
check_file(s)
for d in dirs:
walk_dir(d)
walk_dir(sys.argv[1])
| izenecloud/nginx | tengine/contrib/stylechecker.py | Python | apache-2.0 | 1,596 |
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
fast = slow = head
for i in range(n):
fast = fast.next
if fast is None:
return head.next
while fast.next:
slow = slow.next
fast = fast.next
# print(slow.next, fast)
slow.next = slow.next.next
return head
def test(self):
intarrToTest = [1, 2, 3, 4, 5]
head = ListNode(intarrToTest[0])
x = head
for s in intarrToTest[1:]:
temp = ListNode(s)
x.next = temp
x = x.next
head = self.removeNthFromEnd(head, 4)
x = head
while x:
print(x.val, end=' ')
x = x.next
print()
test = Solution()
print(test.test())
| rx2130/Leetcode | python/19 Remove Nth Node From End of List.py | Python | apache-2.0 | 1,039 |
import datetime
from unittest import mock
import unittest
from django.utils import timezone
from unittest.mock import Mock, PropertyMock
from django.test import TestCase
from django.contrib.sessions.models import Session
# from core.tasks import send_outcome, check_anonymous
from lti.tasks import send_outcome
class CeleryTasksTest(TestCase):
@unittest.skip("skip unless fixed")
@mock.patch('mysite.celery.UserSession.objects.filter')
@mock.patch('mysite.celery.User.objects.filter')
def test_check_anonymous_user_session_no_session(self, mock_User_filter, mock_UserSession_filter):
mock_user = Mock(id=1)
call_mock_User_filter = [mock_user]
mock_session = Mock(id=2)
# user_session.session
p = PropertyMock(return_value=3, side_effect=Session.DoesNotExist('Object Does not exist'))
type(mock_session).session = p
call_mock_UserSession_filter = [mock_session]
mock_User_filter.return_value = call_mock_User_filter
mock_UserSession_filter.return_value = call_mock_UserSession_filter
mock_user_del = Mock()
mock_user.delete = mock_user_del
# response = check_anonymous()
mock_user_del.assert_called_once_with()
mock_User_filter.assert_called_with(groups__name='Temporary')
mock_UserSession_filter.assert_called_with(user__groups__name='Temporary')
@unittest.skip("skip unless fixed")
@mock.patch('mysite.celery.UserSession.objects.filter')
@mock.patch('mysite.celery.User.objects.filter')
def test_check_anonymous_user_session_has_session(self, mock_User_filter, mock_UserSession_filter):
mock_user = Mock(id=1)
call_mock_User_filter = [mock_user]
mock_session = Mock(id=2)
# user_session.session
mock_session.session.expire_date = timezone.now() - datetime.timedelta(days=1)
sess_session_del = Mock()
sess_user_del = Mock()
mock_session.session.delete = sess_session_del
mock_session.user.delete = sess_user_del
call_mock_UserSession_filter = [mock_session]
mock_User_filter.return_value = call_mock_User_filter
mock_UserSession_filter.return_value = call_mock_UserSession_filter
mock_user_del = Mock()
mock_user.delete = mock_user_del
# response = check_anonymous()
sess_session_del.assert_called_once_with()
sess_user_del.assert_called_once_with()
mock_user_del.assert_called_once_with()
mock_User_filter.assert_called_with(groups__name='Temporary')
mock_UserSession_filter.assert_called_with(user__groups__name='Temporary')
@mock.patch('lti.tasks.GradedLaunch.objects.get')
@mock.patch('lti.tasks.send_score_update')
def test_send_outcome(self, mock_send_score_update, mock_GradedLaunch_get):
get_mock_ret_val = Mock()
mock_GradedLaunch_get.return_value = get_mock_ret_val
send_outcome('0', assignment_id=1)
mock_GradedLaunch_get.assert_called_once_with(id=1)
mock_send_score_update.assert_called_once_with(get_mock_ret_val, '0')
| cjlee112/socraticqs2 | mysite/mysite/tests/celery.py | Python | apache-2.0 | 3,119 |
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ValidationFailed(ValueError):
"""User input was inconsistent with API restrictions."""
def __init__(self, msg, *args, **kwargs):
msg = msg.format(*args, **kwargs)
super(ValidationFailed, self).__init__(msg)
class ValidationProgrammingError(ValueError):
"""Caller did not map validations correctly."""
def __init__(self, msg, *args, **kwargs):
msg = msg.format(*args, **kwargs)
super(ValidationProgrammingError, self).__init__(msg)
| obulpathi/cdn1 | cdn/transport/validators/stoplight/exceptions.py | Python | apache-2.0 | 1,077 |
#!/usr/bin/python
import sys
# try to import from the default place Munki installs it
try:
from munkilib import FoundationPlist, munkicommon
except:
sys.path.append('/usr/local/munki')
from munkilib import FoundationPlist, munkicommon
import os
from datetime import datetime
FILE_LOCATION = "/Users/Shared/.msc-dnd.plist"
# Does the file exist?
if not os.path.isfile(FILE_LOCATION):
# File isn't here, set the Munki pref to False
munkicommon.set_pref('SuppressUserNotification', False)
sys.exit(0)
# If it does, get the current date?
else:
plist = FoundationPlist.readPlist(FILE_LOCATION)
if 'DNDEndDate' not in plist:
# The key we need isn't in there, remove the file, set pref and exit
os.remove(FILE_LOCATION)
munkicommon.set_pref('SuppressUserNotification', False)
sys.exit(0)
else:
# Is the current date greater than the DND date?
saved_time = datetime.strptime(str(plist['DNDEndDate']), "%Y-%m-%d %H:%M:%S +0000")
current_time = datetime.now()
if saved_time < current_time:
# print "Current time is greater"
# If yes, remove the file and set the Munki pref for suppress notifications to False
os.remove(FILE_LOCATION)
munkicommon.set_pref('SuppressUserNotification', False)
sys.exit(0)
else:
# print "Saved Time is greater"
munkicommon.set_pref('SuppressUserNotification', True)
sys.exit(0)
# If no, make sure suppress notifications is True
| grahamgilbert/munki-dnd | munki-dnd.py | Python | apache-2.0 | 1,572 |
import os
from ..libs import xlrd_tools
HERE = os.path.dirname(os.path.abspath(__file__))
def test_xlsx_xlrd():
with open(os.path.join(HERE, 'fixtures', 'test.xlsx')) as fp:
headers, data = xlrd_tools.xlsx_xlrd(fp)
assert headers[0] == {'field': 'one', 'id': 'one', 'name': 'one'}
assert data[0] == {'one': 'a', 'two': 'b', 'three': 'c'}
| chrisseto/modular-file-renderer | mfr/ext/tabular/tests/test_xlsx_tools.py | Python | apache-2.0 | 362 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import json
from oslo_config import cfg
from oslo_log import log as logging
from conveyor.clone.drivers import driver
from conveyor.clone.resources import common
from conveyor.conveyoragentclient.v1 import client as birdiegatewayclient
from conveyor.i18n import _LE
from conveyor.i18n import _LW
from conveyor import exception
from conveyor import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class OpenstackDriver(driver.BaseDriver):
def __init__(self):
super(OpenstackDriver, self).__init__()
def handle_resources(self, context, plan_id, resource_map,
sys_clone, copy_data):
LOG.debug('Begin handle resources')
undo_mgr = utils.UndoManager()
try:
self._add_extra_properties(context, resource_map,
sys_clone, copy_data, undo_mgr)
# self._set_resources_state(context, resource_map)
return undo_mgr
except Exception as e:
LOG.error(_LE('Generate template of plan failed, err:%s'), str(e))
undo_mgr._rollback()
raise exception.ExportTemplateFailed(id=plan_id, msg=str(e))
def _set_resources_state(self, context, resource_map):
for key, value in resource_map.items():
resource_type = value.type
resource_id = value.id
if resource_type == 'OS::Nova::Server':
self.compute_api.reset_state(context, resource_id, 'cloning')
elif resource_type == 'OS::Cinder::Volume':
self.volume_api.reset_state(context, resource_id, 'cloning')
elif resource_type == 'OS::Heat::Stack':
self._set_resources_state_for_stack(context, value)
def _set_resources_state_for_stack(self, context, resource):
template_str = resource.properties.get('template')
template = json.loads(template_str)
def _set_state(template):
temp_res = template.get('resources')
for key, value in temp_res.items():
res_type = value.get('type')
if res_type == 'OS::Cinder::Volume':
vid = value.get('extra_properties', {}).get('id')
if vid:
self.volume_api.reset_state(context, vid, 'cloning')
elif res_type == 'OS::Nova::Server':
sid = value.get('extra_properties', {}).get('id')
if sid:
self.compute_api.reset_state(context, sid, 'cloning')
elif res_type and res_type.startswith('file://'):
son_template = value.get('content')
son_template = json.loads(son_template)
_set_state(son_template)
_set_state(template)
def add_extra_properties_for_server(self, context, resource, resource_map,
sys_clone, copy_data,
undo_mgr):
migrate_net_map = CONF.migrate_net_map
server_properties = resource.properties
server_id = resource.id
server_extra_properties = resource.extra_properties
server_az = server_properties.get('availability_zone')
vm_state = server_extra_properties.get('vm_state')
gw_url = server_extra_properties.get('gw_url')
if not gw_url:
if vm_state == 'stopped':
gw_id, gw_ip = utils.get_next_vgw(server_az)
if not gw_id or not gw_ip:
raise exception.V2vException(message='no vgw host found')
gw_url = gw_ip + ':' + str(CONF.v2vgateway_api_listen_port)
resource.extra_properties.update({"gw_url": gw_url,
"gw_id": gw_id})
resource.extra_properties['sys_clone'] = sys_clone
resource.extra_properties['is_deacidized'] = True
block_device_mapping = server_properties.get(
'block_device_mapping_v2')
if block_device_mapping:
for block_device in block_device_mapping:
volume_name = block_device.get('volume_id').get(
'get_resource')
volume_resource = resource_map.get(volume_name)
volume_resource.extra_properties['gw_url'] = gw_url
volume_resource.extra_properties['is_deacidized'] = \
True
boot_index = block_device.get('boot_index')
dev_name = block_device.get('device_name')
if boot_index == 0 or boot_index == '0':
volume_resource.extra_properties['sys_clone'] = \
sys_clone
if sys_clone:
self._handle_dv_for_svm(context,
volume_resource,
server_id, dev_name,
gw_id, gw_ip, undo_mgr)
else:
d_copy = copy_data and volume_resource. \
extra_properties['copy_data']
volume_resource.extra_properties['copy_data'] = \
d_copy
if not d_copy:
continue
self._handle_dv_for_svm(context, volume_resource,
server_id, dev_name,
gw_id, gw_ip, undo_mgr)
else:
if migrate_net_map:
# get the availability_zone of server
server_az = server_properties.get('availability_zone')
if not server_az:
LOG.error(_LE('Not get the availability_zone'
'of server %s') % resource.id)
raise exception.AvailabilityZoneNotFound(
server_uuid=resource.id)
migrate_net_id = migrate_net_map.get(server_az)
if not migrate_net_id:
LOG.error(_LE('Not get the migrate net of server %s')
% resource.id)
raise exception.NoMigrateNetProvided(
server_uuid=resource.id)
# attach interface
LOG.debug('Attach a port of net %s to server %s',
migrate_net_id,
server_id)
obj = self.compute_api.interface_attach(context, server_id,
migrate_net_id,
None,
None)
interface_attachment = obj._info
if interface_attachment:
LOG.debug('The interface attachment info is %s '
% str(interface_attachment))
migrate_fix_ip = interface_attachment.get('fixed_ips')[0] \
.get('ip_address')
migrate_port_id = interface_attachment.get('port_id')
undo_mgr.undo_with(functools.partial
(self.compute_api.interface_detach,
context,
server_id,
migrate_port_id))
gw_url = migrate_fix_ip + ':' + str(
CONF.v2vgateway_api_listen_port)
extra_properties = {}
extra_properties['gw_url'] = gw_url
extra_properties['is_deacidized'] = True
extra_properties['migrate_port_id'] = migrate_port_id
extra_properties['sys_clone'] = sys_clone
resource.extra_properties.update(extra_properties)
# waiting port attach finished, and can ping this vm
self._await_port_status(context, migrate_port_id,
migrate_fix_ip)
# else:
# interfaces = self.neutron_api.port_list(
# context, device_id=server_id)
# host_ip = None
# for infa in interfaces:
# if host_ip:
# break
# binding_profile = infa.get("binding:profile", [])
# if binding_profile:
# host_ip = binding_profile.get('host_ip')
# if not host_ip:
# LOG.error(_LE('Not find the clone data
# ip for server'))
# raise exception.NoMigrateNetProvided(
# server_uuid=resource.id
# )
# gw_url = host_ip + ':' + str(
# CONF.v2vgateway_api_listen_port)
# extra_properties = {}
# extra_properties['gw_url'] = gw_url
# extra_properties['sys_clone'] = sys_clone
# resource.extra_properties.update(extra_properties)
block_device_mapping = server_properties.get(
'block_device_mapping_v2')
if block_device_mapping:
client = None
if gw_url:
gw_urls = gw_url.split(':')
client = birdiegatewayclient.get_birdiegateway_client(
gw_urls[0], gw_urls[1])
for block_device in block_device_mapping:
device_name = block_device.get('device_name')
volume_name = block_device.get('volume_id').get(
'get_resource')
volume_resource = resource_map.get(volume_name)
boot_index = block_device.get('boot_index')
if boot_index == 0 or boot_index == '0':
volume_resource.extra_properties['sys_clone'] = \
sys_clone
if not sys_clone:
continue
else:
d_copy = copy_data and volume_resource. \
extra_properties['copy_data']
volume_resource.extra_properties['copy_data'] = \
d_copy
if not d_copy:
continue
# need to check the vm disk name
if not client:
continue
src_dev_format = client.vservices. \
get_disk_format(device_name).get('disk_format')
src_mount_point = client. \
vservices.get_disk_mount_point(device_name). \
get('mount_point')
volume_resource.extra_properties['guest_format'] = \
src_dev_format
volume_resource.extra_properties['mount_point'] = \
src_mount_point
volume_resource.extra_properties['gw_url'] = gw_url
volume_resource.extra_properties['is_deacidized'] = \
True
sys_dev_name = client. \
vservices.get_disk_name(volume_resource.id). \
get('dev_name')
if not sys_dev_name:
sys_dev_name = device_name
volume_resource.extra_properties['sys_dev_name'] = \
sys_dev_name
def _handle_sv_for_svm(self, context, vol_res,
gw_id, gw_ip, undo_mgr):
volume_id = vol_res.id
LOG.debug('Set the volume %s shareable', volume_id)
self.volume_api.set_volume_shareable(context, volume_id, True)
undo_mgr.undo_with(functools.partial(self._set_volume_shareable,
context,
volume_id,
False))
client = birdiegatewayclient.get_birdiegateway_client(
gw_ip,
str(CONF.v2vgateway_api_listen_port)
)
disks = set(client.vservices.get_disk_name().get('dev_name'))
self.compute_api.attach_volume(context,
gw_id,
volume_id,
None)
LOG.debug('Attach the volume %s to gw host %s ', volume_id, gw_id)
undo_mgr.undo_with(functools.partial(self._detach_volume,
context,
gw_id,
volume_id))
self._wait_for_volume_status(context, volume_id, gw_id,
'in-use')
n_disks = set(client.vservices.get_disk_name().get('dev_name'))
diff_disk = n_disks - disks
LOG.debug('Begin get info for volume,the vgw ip %s' % gw_ip)
client = birdiegatewayclient.get_birdiegateway_client(
gw_ip, str(CONF.v2vgateway_api_listen_port))
# sys_dev_name = client.vservices.get_disk_name(volume_id).get(
# 'dev_name')
# sys_dev_name = device_name
# sys_dev_name = attach_resp._info.get('device')
sys_dev_name = list(diff_disk)[0] if len(diff_disk) >= 1 else None
LOG.debug("dev_name = %s", sys_dev_name)
vol_res.extra_properties['sys_dev_name'] = sys_dev_name
guest_format = client.vservices.get_disk_format(sys_dev_name) \
.get('disk_format')
if guest_format:
vol_res.extra_properties['guest_format'] = guest_format
mount_point = client.vservices.force_mount_disk(
sys_dev_name, "/opt/" + volume_id)
vol_res.extra_properties['mount_point'] = mount_point.get(
'mount_disk')
def add_extra_properties_for_stack(self, context, resource,
sys_clone, copy_data, undo_mgr):
res_prop = resource.properties
stack_id = resource.id
template = json.loads(res_prop.get('template'))
def _add_extra_prop(template, stack_id):
temp_res = template.get('resources')
for key, value in temp_res.items():
res_type = value.get('type')
if res_type == 'OS::Cinder::Volume':
# v_prop = value.get('properties')
v_exra_prop = value.get('extra_properties', {})
d_copy = copy_data and v_exra_prop['copy_data']
v_exra_prop['copy_data'] = d_copy
if not d_copy:
continue
if not v_exra_prop or not v_exra_prop.get('gw_url'):
phy_id = v_exra_prop.get('id')
res_info = self.volume_api.get(context, phy_id)
az = res_info.get('availability_zone')
gw_id, gw_ip = utils.get_next_vgw(az)
if not gw_id or not gw_ip:
raise exception.V2vException(
message='no vgw host found')
gw_url = gw_ip + ':' + str(
CONF.v2vgateway_api_listen_port)
v_exra_prop.update({"gw_url": gw_url, "gw_id": gw_id})
volume_status = res_info['status']
v_exra_prop['status'] = volume_status
value['extra_properties'] = v_exra_prop
value['id'] = phy_id
self._handle_volume_for_stack(context, value, gw_id,
gw_ip, undo_mgr)
elif res_type == 'OS::Nova::Server':
v_exra_prop = value.get('extra_properties', {})
phy_id = v_exra_prop.get('id')
server_info = self.compute_api.get_server(context, phy_id)
vm_state = server_info.get('OS-EXT-STS:vm_state', None)
v_exra_prop['vm_state'] = vm_state
value['extra_properties'] = v_exra_prop
_add_extra_prop(template, stack_id)
res_prop['template'] = json.dumps(template)
def _handle_volume_for_stack(self, context, vol_res,
gw_id, gw_ip, undo_mgr):
volume_id = vol_res.get('id')
volume_info = self.volume_api.get(context, volume_id)
volume_status = volume_info['status']
v_shareable = volume_info['shareable']
if not v_shareable and volume_status == 'in-use':
volume_attachments = volume_info.get('attachments', [])
vol_res.get('extra_properties')['attachments'] = volume_attachments
for attachment in volume_attachments:
server_id = attachment.get('server_id')
server_info = self.compute_api.get_server(context, server_id)
vm_state = server_info.get('OS-EXT-STS:vm_state', None)
if vm_state != 'stopped':
_msg = 'the server %s not stopped' % server_id
raise exception.V2vException(message=_msg)
device = attachment.get('device')
self.compute_api.detach_volume(context, server_id,
volume_id)
self._wait_for_volume_status(context, volume_id, server_id,
'available')
undo_mgr.undo_with(functools.partial(self._attach_volume,
context,
server_id,
volume_id,
device))
client = birdiegatewayclient.get_birdiegateway_client(
gw_ip,
str(CONF.v2vgateway_api_listen_port)
)
disks = set(client.vservices.get_disk_name().get('dev_name'))
LOG.debug('Attach volume %s to gw host %s', volume_id, gw_id)
attach_resp = self.compute_api.attach_volume(context,
gw_id,
volume_id,
None)
LOG.debug('The volume attachment info is %s '
% str(attach_resp))
undo_mgr.undo_with(functools.partial(self._detach_volume,
context,
gw_id,
volume_id))
self._wait_for_volume_status(context, volume_id, gw_id,
'in-use')
n_disks = set(client.vservices.get_disk_name().get('dev_name'))
diff_disk = n_disks - disks
vol_res.get('extra_properties')['status'] = 'in-use'
LOG.debug('Begin get info for volume,the vgw ip %s' % gw_ip)
sys_dev_name = list(diff_disk)[0] if len(diff_disk) >= 1 else None
LOG.debug("dev_name = %s", sys_dev_name)
# device_name = attach_resp._info.get('device')
# sys_dev_name = client.vservices.get_disk_name(volume_id).get(
# 'dev_name')
# sys_dev_name = device_name
vol_res.get('extra_properties')['sys_dev_name'] = sys_dev_name
guest_format = client.vservices.get_disk_format(sys_dev_name) \
.get('disk_format')
if guest_format:
vol_res.get('extra_properties')['guest_format'] = guest_format
mount_point = client.vservices.force_mount_disk(
sys_dev_name, "/opt/" + volume_id)
vol_res.get('extra_properties')['mount_point'] = mount_point.get(
'mount_disk')
def reset_resources(self, context, resources):
# self._reset_resources_state(context, resources)
self._handle_resources_after_clone(context, resources)
def _reset_resources_state(self, context, resources):
for key, value in resources.items():
try:
resource_type = value.get('type')
resource_id = value.get('extra_properties', {}).get('id')
if resource_type == 'OS::Nova::Server':
vm_state = value.get('extra_properties', {}) \
.get('vm_state')
self.compute_api.reset_state(context, resource_id,
vm_state)
elif resource_type == 'OS::Cinder::Volume':
volume_state = value.get('extra_properties', {}) \
.get('status')
self.volume_api.reset_state(context, resource_id,
volume_state)
elif resource_type == 'OS::Heat::Stack':
self._reset_resources_state_for_stack(context, value)
except Exception as e:
LOG.warn(_LW('Reset resource state error, Error=%(e)s'),
{'e': e})
def _reset_resources_state_for_stack(self, context, stack_res):
template_str = stack_res.get('properties', {}).get('template')
template = json.loads(template_str)
def _reset_state(template):
temp_res = template.get('resources')
for key, value in temp_res.items():
res_type = value.get('type')
if res_type == 'OS::Cinder::Volume':
vid = value.get('extra_properties', {}).get('id')
v_state = value.get('extra_properties', {}).get('status')
if vid:
self.volume_api.reset_state(context, vid, v_state)
elif res_type == 'OS::Nova::Server':
sid = value.get('extra_properties', {}).get('id')
s_state = value.get('extra_properties', {}).get('vm_state')
if sid:
self.compute_api.reset_state(context, sid, s_state)
elif res_type and res_type.startswith('file://'):
son_template = value.get('content')
son_template = json.loads(son_template)
_reset_state(son_template)
_reset_state(template)
def handle_server_after_clone(self, context, resource, resources):
self._detach_server_temporary_port(context, resource)
extra_properties = resource.get('extra_properties', {})
vm_state = extra_properties.get('vm_state')
if vm_state == 'stopped':
self._handle_volume_for_svm_after_clone(context, resource,
resources)
def handle_stack_after_clone(self, context, resource, resources):
template_str = resource.get('properties', {}).get('template')
template = json.loads(template_str)
self._handle_volume_for_stack_after_clone(context, template)
def _handle_volume_for_stack_after_clone(self, context, template):
try:
resources = template.get('resources')
for key, res in resources.items():
res_type = res.get('type')
if res_type == 'OS::Cinder::Volume':
try:
copy_data = res.get('extra_properties', {}). \
get('copy_data')
if not copy_data:
continue
attachments = res.get('extra_properties', {}) \
.get('attachments')
volume_id = res.get('extra_properties', {}) \
.get('id')
vgw_id = res.get('extra_properties').get('gw_id')
self._detach_volume(context, vgw_id, volume_id)
if attachments:
for attachment in attachments:
server_id = attachment.get('server_id')
device = attachment.get('device')
self.compute_api.attach_volume(context,
server_id,
volume_id,
device)
except Exception as e:
LOG.error(_LE('Error from handle volume of stack after'
' clone.'
'Error=%(e)s'), {'e': e})
except Exception as e:
LOG.warn('detach the volume %s from vgw %s error,'
'the volume not attached to vgw',
volume_id, vgw_id)
def _handle_volume_for_svm_after_clone(self, context,
server_resource, resources):
bdms = server_resource['properties'].get('block_device_mapping_v2', [])
vgw_id = server_resource.get('extra_properties', {}).get('gw_id')
for bdm in bdms:
volume_key = bdm.get('volume_id', {}).get('get_resource')
boot_index = bdm.get('boot_index')
device_name = bdm.get('device_name')
volume_res = resources.get(volume_key)
try:
if volume_res.get('extra_properties', {}).get('is_deacidized'):
volume_id = volume_res.get('extra_properties', {}) \
.get('id')
vgw_url = volume_res.get('extra_properties', {}) \
.get('gw_url')
sys_clone = volume_res.get('extra_properties', {}) \
.get('sys_clone')
copy_data = volume_res.get('extra_properties', {}). \
get('copy_data')
if (boot_index in ['0', 0] and not sys_clone) or \
not copy_data:
continue
vgw_ip = vgw_url.split(':')[0]
client = birdiegatewayclient.get_birdiegateway_client(
vgw_ip, str(CONF.v2vgateway_api_listen_port))
if boot_index not in ['0', 0] or sys_clone:
client.vservices._force_umount_disk(
"/opt/" + volume_id)
# if provider cloud can not detcah volume in active status
if not CONF.is_active_detach_volume:
resouce_common = common.ResourceCommon()
self.compute_api.stop_server(context, vgw_id)
resouce_common._await_instance_status(context,
vgw_id,
'SHUTOFF')
self.compute_api.detach_volume(context, vgw_id,
volume_id)
self._wait_for_volume_status(context, volume_id,
vgw_id, 'available')
server_id = server_resource.get('extra_properties', {}) \
.get('id')
self.compute_api.attach_volume(context, server_id,
volume_id,
device_name)
self._wait_for_volume_status(context, volume_id,
server_id, 'in-use')
if not CONF.is_active_detach_volume:
self.compute_api.start_server(context, vgw_id)
resouce_common._await_instance_status(context,
vgw_id,
'ACTIVE')
except Exception as e:
LOG.error(_LE('Error from handle volume of vm after'
' clone.'
'Error=%(e)s'), {'e': e})
def _detach_server_temporary_port(self, context, server_res):
# Read template file of this plan
server_id = server_res.get('extra_properties', {}).get('id')
migrate_port = server_res.get('extra_properties', {}) \
.get('migrate_port_id')
if server_res.get('extra_properties', {}).get('is_deacidized'):
if not server_id or not migrate_port:
return
try:
self.compute_api.migrate_interface_detach(context,
server_id,
migrate_port)
LOG.debug("Detach migrate port of server <%s> succeed.",
server_id)
except Exception as e:
LOG.error("Fail to detach migrate port of server <%s>. %s",
server_id, unicode(e))
| Hybrid-Cloud/conveyor | conveyor/clone/drivers/openstack/driver.py | Python | apache-2.0 | 31,506 |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset utilities."""
import functools
import pathlib
from typing import Dict, Tuple
from absl import logging
from graph_nets import graphs as tf_graphs
from graph_nets import utils_tf
import numpy as np
import scipy.sparse as sp
import tensorflow as tf
import tqdm
# pylint: disable=g-bad-import-order
import sub_sampler
Path = pathlib.Path
NUM_PAPERS = 121751666
NUM_AUTHORS = 122383112
NUM_INSTITUTIONS = 25721
EMBEDDING_SIZE = 768
NUM_CLASSES = 153
NUM_NODES = NUM_PAPERS + NUM_AUTHORS + NUM_INSTITUTIONS
NUM_EDGES = 1_728_364_232
assert NUM_NODES == 244_160_499
NUM_K_FOLD_SPLITS = 10
OFFSETS = {
"paper": 0,
"author": NUM_PAPERS,
"institution": NUM_PAPERS + NUM_AUTHORS,
}
SIZES = {
"paper": NUM_PAPERS,
"author": NUM_AUTHORS,
"institution": NUM_INSTITUTIONS
}
RAW_DIR = Path("raw")
PREPROCESSED_DIR = Path("preprocessed")
RAW_NODE_FEATURES_FILENAME = RAW_DIR / "node_feat.npy"
RAW_NODE_LABELS_FILENAME = RAW_DIR / "node_label.npy"
RAW_NODE_YEAR_FILENAME = RAW_DIR / "node_year.npy"
TRAIN_INDEX_FILENAME = RAW_DIR / "train_idx.npy"
VALID_INDEX_FILENAME = RAW_DIR / "train_idx.npy"
TEST_INDEX_FILENAME = RAW_DIR / "train_idx.npy"
EDGES_PAPER_PAPER_B = PREPROCESSED_DIR / "paper_paper_b.npz"
EDGES_PAPER_PAPER_B_T = PREPROCESSED_DIR / "paper_paper_b_t.npz"
EDGES_AUTHOR_INSTITUTION = PREPROCESSED_DIR / "author_institution.npz"
EDGES_INSTITUTION_AUTHOR = PREPROCESSED_DIR / "institution_author.npz"
EDGES_AUTHOR_PAPER = PREPROCESSED_DIR / "author_paper.npz"
EDGES_PAPER_AUTHOR = PREPROCESSED_DIR / "paper_author.npz"
PCA_PAPER_FEATURES_FILENAME = PREPROCESSED_DIR / "paper_feat_pca_129.npy"
PCA_AUTHOR_FEATURES_FILENAME = (
PREPROCESSED_DIR / "author_feat_from_paper_feat_pca_129.npy")
PCA_INSTITUTION_FEATURES_FILENAME = (
PREPROCESSED_DIR / "institution_feat_from_paper_feat_pca_129.npy")
PCA_MERGED_FEATURES_FILENAME = (
PREPROCESSED_DIR / "merged_feat_from_paper_feat_pca_129.npy")
NEIGHBOR_INDICES_FILENAME = PREPROCESSED_DIR / "neighbor_indices.npy"
NEIGHBOR_DISTANCES_FILENAME = PREPROCESSED_DIR / "neighbor_distances.npy"
FUSED_NODE_LABELS_FILENAME = PREPROCESSED_DIR / "fused_node_labels.npy"
FUSED_PAPER_EDGES_FILENAME = PREPROCESSED_DIR / "fused_paper_edges.npz"
FUSED_PAPER_EDGES_T_FILENAME = PREPROCESSED_DIR / "fused_paper_edges_t.npz"
K_FOLD_SPLITS_DIR = Path("k_fold_splits")
def get_raw_directory(data_root):
return Path(data_root) / "raw"
def get_preprocessed_directory(data_root):
return Path(data_root) / "preprocessed"
def _log_path_decorator(fn):
def _decorated_fn(path, **kwargs):
logging.info("Loading %s", path)
output = fn(path, **kwargs)
logging.info("Finish loading %s", path)
return output
return _decorated_fn
@_log_path_decorator
def load_csr(path, debug=False):
if debug:
# Dummy matrix for debugging.
return sp.csr_matrix(np.zeros([10, 10]))
return sp.load_npz(str(path))
@_log_path_decorator
def load_npy(path):
return np.load(str(path))
@functools.lru_cache()
def get_arrays(data_root="/data/",
use_fused_node_labels=True,
use_fused_node_adjacencies=True,
return_pca_embeddings=True,
k_fold_split_id=None,
return_adjacencies=True,
use_dummy_adjacencies=False):
"""Returns all arrays needed for training."""
logging.info("Starting to get files")
data_root = Path(data_root)
array_dict = {}
array_dict["paper_year"] = load_npy(data_root / RAW_NODE_YEAR_FILENAME)
if k_fold_split_id is None:
train_indices = load_npy(data_root / TRAIN_INDEX_FILENAME)
valid_indices = load_npy(data_root / VALID_INDEX_FILENAME)
else:
train_indices, valid_indices = get_train_and_valid_idx_for_split(
k_fold_split_id, num_splits=NUM_K_FOLD_SPLITS,
root_path=data_root / K_FOLD_SPLITS_DIR)
array_dict["train_indices"] = train_indices
array_dict["valid_indices"] = valid_indices
array_dict["test_indices"] = load_npy(data_root / TEST_INDEX_FILENAME)
if use_fused_node_labels:
array_dict["paper_label"] = load_npy(data_root / FUSED_NODE_LABELS_FILENAME)
else:
array_dict["paper_label"] = load_npy(data_root / RAW_NODE_LABELS_FILENAME)
if return_adjacencies:
logging.info("Starting to get adjacencies.")
if use_fused_node_adjacencies:
paper_paper_index = load_csr(
data_root / FUSED_PAPER_EDGES_FILENAME, debug=use_dummy_adjacencies)
paper_paper_index_t = load_csr(
data_root / FUSED_PAPER_EDGES_T_FILENAME, debug=use_dummy_adjacencies)
else:
paper_paper_index = load_csr(
data_root / EDGES_PAPER_PAPER_B, debug=use_dummy_adjacencies)
paper_paper_index_t = load_csr(
data_root / EDGES_PAPER_PAPER_B_T, debug=use_dummy_adjacencies)
array_dict.update(
dict(
author_institution_index=load_csr(
data_root / EDGES_AUTHOR_INSTITUTION,
debug=use_dummy_adjacencies),
institution_author_index=load_csr(
data_root / EDGES_INSTITUTION_AUTHOR,
debug=use_dummy_adjacencies),
author_paper_index=load_csr(
data_root / EDGES_AUTHOR_PAPER, debug=use_dummy_adjacencies),
paper_author_index=load_csr(
data_root / EDGES_PAPER_AUTHOR, debug=use_dummy_adjacencies),
paper_paper_index=paper_paper_index,
paper_paper_index_t=paper_paper_index_t,
))
if return_pca_embeddings:
array_dict["bert_pca_129"] = np.load(
data_root / PCA_MERGED_FEATURES_FILENAME, mmap_mode="r")
assert array_dict["bert_pca_129"].shape == (NUM_NODES, 129)
logging.info("Finish getting files")
# pytype: disable=attribute-error
assert array_dict["paper_year"].shape[0] == NUM_PAPERS
assert array_dict["paper_label"].shape[0] == NUM_PAPERS
if return_adjacencies and not use_dummy_adjacencies:
array_dict = _fix_adjacency_shapes(array_dict)
assert array_dict["paper_author_index"].shape == (NUM_PAPERS, NUM_AUTHORS)
assert array_dict["author_paper_index"].shape == (NUM_AUTHORS, NUM_PAPERS)
assert array_dict["paper_paper_index"].shape == (NUM_PAPERS, NUM_PAPERS)
assert array_dict["paper_paper_index_t"].shape == (NUM_PAPERS, NUM_PAPERS)
assert array_dict["institution_author_index"].shape == (
NUM_INSTITUTIONS, NUM_AUTHORS)
assert array_dict["author_institution_index"].shape == (
NUM_AUTHORS, NUM_INSTITUTIONS)
# pytype: enable=attribute-error
return array_dict
def add_nodes_year(graph, paper_year):
nodes = graph.nodes.copy()
indices = nodes["index"]
year = paper_year[np.minimum(indices, paper_year.shape[0] - 1)].copy()
year[nodes["type"] != 0] = 1900
nodes["year"] = year
return graph._replace(nodes=nodes)
def add_nodes_label(graph, paper_label):
nodes = graph.nodes.copy()
indices = nodes["index"]
label = paper_label[np.minimum(indices, paper_label.shape[0] - 1)]
label[nodes["type"] != 0] = 0
nodes["label"] = label
return graph._replace(nodes=nodes)
def add_nodes_embedding_from_array(graph, array):
"""Adds embeddings from the sstable_service for the indices."""
nodes = graph.nodes.copy()
indices = nodes["index"]
embedding_indices = indices.copy()
embedding_indices[nodes["type"] == 1] += NUM_PAPERS
embedding_indices[nodes["type"] == 2] += NUM_PAPERS + NUM_AUTHORS
# Gather the embeddings for the indices.
nodes["features"] = array[embedding_indices]
return graph._replace(nodes=nodes)
def get_graph_subsampling_dataset(
prefix, arrays, shuffle_indices, ratio_unlabeled_data_to_labeled_data,
max_nodes, max_edges,
**subsampler_kwargs):
"""Returns tf_dataset for online sampling."""
def generator():
labeled_indices = arrays[f"{prefix}_indices"]
if ratio_unlabeled_data_to_labeled_data > 0:
num_unlabeled_data_to_add = int(ratio_unlabeled_data_to_labeled_data *
labeled_indices.shape[0])
unlabeled_indices = np.random.choice(
NUM_PAPERS, size=num_unlabeled_data_to_add, replace=False)
root_node_indices = np.concatenate([labeled_indices, unlabeled_indices])
else:
root_node_indices = labeled_indices
if shuffle_indices:
root_node_indices = root_node_indices.copy()
np.random.shuffle(root_node_indices)
for index in root_node_indices:
graph = sub_sampler.subsample_graph(
index,
arrays["author_institution_index"],
arrays["institution_author_index"],
arrays["author_paper_index"],
arrays["paper_author_index"],
arrays["paper_paper_index"],
arrays["paper_paper_index_t"],
paper_years=arrays["paper_year"],
max_nodes=max_nodes,
max_edges=max_edges,
**subsampler_kwargs)
graph = add_nodes_label(graph, arrays["paper_label"])
graph = add_nodes_year(graph, arrays["paper_year"])
graph = tf_graphs.GraphsTuple(*graph)
yield graph
sample_graph = next(generator())
return tf.data.Dataset.from_generator(
generator,
output_signature=utils_tf.specs_from_graphs_tuple(sample_graph))
def paper_features_to_author_features(
author_paper_index, paper_features):
"""Averages paper features to authors."""
assert paper_features.shape[0] == NUM_PAPERS
assert author_paper_index.shape[0] == NUM_AUTHORS
author_features = np.zeros(
[NUM_AUTHORS, paper_features.shape[1]], dtype=paper_features.dtype)
for author_i in range(NUM_AUTHORS):
paper_indices = author_paper_index[author_i].indices
author_features[author_i] = paper_features[paper_indices].mean(
axis=0, dtype=np.float32)
if author_i % 10000 == 0:
logging.info("%d/%d", author_i, NUM_AUTHORS)
return author_features
def author_features_to_institution_features(
institution_author_index, author_features):
"""Averages author features to institutions."""
assert author_features.shape[0] == NUM_AUTHORS
assert institution_author_index.shape[0] == NUM_INSTITUTIONS
institution_features = np.zeros(
[NUM_INSTITUTIONS, author_features.shape[1]], dtype=author_features.dtype)
for institution_i in range(NUM_INSTITUTIONS):
author_indices = institution_author_index[institution_i].indices
institution_features[institution_i] = author_features[
author_indices].mean(axis=0, dtype=np.float32)
if institution_i % 10000 == 0:
logging.info("%d/%d", institution_i, NUM_INSTITUTIONS)
return institution_features
def generate_fused_paper_adjacency_matrix(neighbor_indices, neighbor_distances,
paper_paper_csr):
"""Generates fused adjacency matrix for identical nodes."""
# First construct set of identical node indices.
# NOTE: Since we take only top K=26 identical pairs for each node, this is not
# actually exhaustive. Also, if A and B are equal, and B and C are equal,
# this method would not necessarily detect A and C being equal.
# However, this should capture almost all cases.
logging.info("Generating fused paper adjacency matrix")
eps = 0.0
mask = ((neighbor_indices != np.mgrid[:neighbor_indices.shape[0], :1]) &
(neighbor_distances <= eps))
identical_pairs = list(map(tuple, np.nonzero(mask)))
del mask
# Have a csc version for fast column access.
paper_paper_csc = paper_paper_csr.tocsc()
# Construct new matrix as coo, starting off with original rows/cols.
paper_paper_coo = paper_paper_csr.tocoo()
new_rows = [paper_paper_coo.row]
new_cols = [paper_paper_coo.col]
for pair in tqdm.tqdm(identical_pairs):
# STEP ONE: First merge papers being cited by the pair.
# Add edges from second paper, to all papers cited by first paper.
cited_by_first = paper_paper_csr.getrow(pair[0]).nonzero()[1]
if cited_by_first.shape[0] > 0:
new_rows.append(pair[1] * np.ones_like(cited_by_first))
new_cols.append(cited_by_first)
# Add edges from first paper, to all papers cited by second paper.
cited_by_second = paper_paper_csr.getrow(pair[1]).nonzero()[1]
if cited_by_second.shape[0] > 0:
new_rows.append(pair[0] * np.ones_like(cited_by_second))
new_cols.append(cited_by_second)
# STEP TWO: Then merge papers that cite the pair.
# Add edges to second paper, from all papers citing the first paper.
citing_first = paper_paper_csc.getcol(pair[0]).nonzero()[0]
if citing_first.shape[0] > 0:
new_rows.append(citing_first)
new_cols.append(pair[1] * np.ones_like(citing_first))
# Add edges to first paper, from all papers citing the second paper.
citing_second = paper_paper_csc.getcol(pair[1]).nonzero()[0]
if citing_second.shape[0] > 0:
new_rows.append(citing_second)
new_cols.append(pair[0] * np.ones_like(citing_second))
logging.info("Done with adjacency loop")
paper_paper_coo_shape = paper_paper_coo.shape
del paper_paper_csr
del paper_paper_csc
del paper_paper_coo
# All done; now concatenate everything together and form new matrix.
new_rows = np.concatenate(new_rows)
new_cols = np.concatenate(new_cols)
return sp.coo_matrix(
(np.ones_like(new_rows, dtype=np.bool), (new_rows, new_cols)),
shape=paper_paper_coo_shape).tocsr()
def generate_k_fold_splits(
train_idx, valid_idx, output_path, num_splits=NUM_K_FOLD_SPLITS):
"""Generates splits adding fractions of the validation split to training."""
output_path = Path(output_path)
np.random.seed(42)
valid_idx = np.random.permutation(valid_idx)
# Split into `num_parts` (almost) identically sized arrays.
valid_idx_parts = np.array_split(valid_idx, num_splits)
for i in range(num_splits):
# Add all but the i'th subpart to training set.
new_train_idx = np.concatenate(
[train_idx, *valid_idx_parts[:i], *valid_idx_parts[i+1:]])
# i'th subpart is validation set.
new_valid_idx = valid_idx_parts[i]
train_path = output_path / f"train_idx_{i}_{num_splits}.npy"
valid_path = output_path / f"valid_idx_{i}_{num_splits}.npy"
np.save(train_path, new_train_idx)
np.save(valid_path, new_valid_idx)
logging.info("Saved: %s", train_path)
logging.info("Saved: %s", valid_path)
def get_train_and_valid_idx_for_split(
split_id: int,
num_splits: int,
root_path: str,
) -> Tuple[np.ndarray, np.ndarray]:
"""Returns train and valid indices for given split."""
new_train_idx = load_npy(f"{root_path}/train_idx_{split_id}_{num_splits}.npy")
new_valid_idx = load_npy(f"{root_path}/valid_idx_{split_id}_{num_splits}.npy")
return new_train_idx, new_valid_idx
def generate_fused_node_labels(neighbor_indices, neighbor_distances,
node_labels, train_indices, valid_indices,
test_indices):
"""Generates fused adjacency matrix for identical nodes."""
logging.info("Generating fused node labels")
valid_indices = set(valid_indices.tolist())
test_indices = set(test_indices.tolist())
valid_or_test_indices = valid_indices | test_indices
train_indices = train_indices[train_indices < neighbor_indices.shape[0]]
# Go through list of all pairs where one node is in training set, and
for i in tqdm.tqdm(train_indices):
for j in range(neighbor_indices.shape[1]):
other_index = neighbor_indices[i][j]
# if the other is not a validation or test node,
if other_index in valid_or_test_indices:
continue
# and they are identical,
if neighbor_distances[i][j] == 0:
# assign the label of the training node to the other node
node_labels[other_index] = node_labels[i]
return node_labels
def _pad_to_shape(
sparse_csr_matrix: sp.csr_matrix,
output_shape: Tuple[int, int]) -> sp.csr_matrix:
"""Pads a csr sparse matrix to the given shape."""
# We should not try to expand anything smaller.
assert np.all(sparse_csr_matrix.shape <= output_shape)
# Maybe it already has the right shape.
if sparse_csr_matrix.shape == output_shape:
return sparse_csr_matrix
# Append as many indptr elements as we need to match the leading size,
# This is achieved by just padding with copies of the last indptr element.
required_padding = output_shape[0] - sparse_csr_matrix.shape[0]
updated_indptr = np.concatenate(
[sparse_csr_matrix.indptr] +
[sparse_csr_matrix.indptr[-1:]] * required_padding,
axis=0)
# The change in trailing size does not have structural implications, it just
# determines the highest possible value for the indices, so it is sufficient
# to just pass the new output shape, with the correct trailing size.
return sp.csr.csr_matrix(
(sparse_csr_matrix.data,
sparse_csr_matrix.indices,
updated_indptr),
shape=output_shape)
def _fix_adjacency_shapes(
arrays: Dict[str, sp.csr.csr_matrix],
) -> Dict[str, sp.csr.csr_matrix]:
"""Fixes the shapes of the adjacency matrices."""
arrays = arrays.copy()
for key in ["author_institution_index",
"author_paper_index",
"paper_paper_index",
"institution_author_index",
"paper_author_index",
"paper_paper_index_t"]:
type_sender = key.split("_")[0]
type_receiver = key.split("_")[1]
arrays[key] = _pad_to_shape(
arrays[key], output_shape=(SIZES[type_sender], SIZES[type_receiver]))
return arrays
| deepmind/deepmind-research | ogb_lsc/mag/data_utils.py | Python | apache-2.0 | 18,032 |
"""
Simple demo of a scatter plot.
"""
import numpy as np
import matplotlib.pyplot as plt
N = 50
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radii
plt.scatter(x, y, s=area, c=colors, alpha=0.5)
plt.show()
| ArmstrongYang/StudyShare | Python-matplotlib/scatter_demo.py | Python | apache-2.0 | 295 |
"""REST API for DP.LA Service Hub BIBCAT Aggregator Feed"""
__author__ = "Jeremy Nelson, Mike Stabile"
import click
import datetime
import json
import math
import os
import pkg_resources
import xml.etree.ElementTree as etree
import requests
import rdflib
import urllib.parse
import reports
import bibcat.rml.processor as processor
from zipfile import ZipFile, ZIP_DEFLATED
from elasticsearch_dsl import Search, Q
from flask import abort, Flask, jsonify, request, render_template, Response
from flask import flash, url_for
from flask import flash
#from flask_cache import Cache
from resync import CapabilityList, ResourceDump, ResourceDumpManifest
from resync import ResourceList
from resync.resource import Resource
from resync.resource_list import ResourceListDupeError
from resync.dump import Dump
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile('config.py')
from rdfframework.rml import RmlManager
from rdfframework.configuration import RdfConfigManager
from rdfframework.datamanager import DefinitionManager
from rdfframework.datatypes import RdfNsManager
RmlManager().register_defs([('package_all', 'bibcat.maps')])
# Define vocabulary and definition file locations
DefinitionManager().add_file_locations([('vocabularies', ['rdf',
'rdfs',
'owl',
'schema',
'bf',
'skos',
'dcterm']),
('package_all',
'bibcat.rdfw-definitions')])
# Register RDF namespaces to use
RdfNsManager({'acl': '<http://www.w3.org/ns/auth/acl#>',
'bd': '<http://www.bigdata.com/rdf#>',
'bf': 'http://id.loc.gov/ontologies/bibframe/',
'dbo': 'http://dbpedia.org/ontology/',
'dbp': 'http://dbpedia.org/property/',
'dbr': 'http://dbpedia.org/resource/',
'dc': 'http://purl.org/dc/elements/1.1/',
'dcterm': 'http://purl.org/dc/terms/',
'dpla': 'http://dp.la/about/map/',
'edm': 'http://www.europeana.eu/schemas/edm/',
'es': 'http://knowledgelinks.io/ns/elasticsearch/',
'foaf': 'http://xmlns.com/foaf/0.1/',
'loc': 'http://id.loc.gov/authorities/',
'm21': '<http://knowledgelinks.io/ns/marc21/>',
'mads': '<http://www.loc.gov/mads/rdf/v1#>',
'mods': 'http://www.loc.gov/mods/v3#',
'ore': 'http://www.openarchives.org/ore/terms/',
'owl': 'http://www.w3.org/2002/07/owl#',
'relators': 'http://id.loc.gov/vocabulary/relators/',
'schema': 'http://schema.org/',
'skos': 'http://www.w3.org/2004/02/skos/core#',
'xsd': 'http://www.w3.org/2001/XMLSchema#'})
CONFIG_MANAGER = RdfConfigManager(app.config, verify=False)
CONNECTIONS = CONFIG_MANAGER.conns
BF = rdflib.Namespace("http://id.loc.gov/ontologies/bibframe/")
W3C_DATE = "%Y-%m-%dT%H:%M:%SZ"
__version__ = "1.0.0"
#cache = Cache(app, config={"CACHE_TYPE": "filesystem",
# "CACHE_DIR": os.path.join(PROJECT_BASE, "cache")})
def __run_query__(query):
"""Helper function returns results from sparql query"""
result = requests.post(app.config.get("TRIPLESTORE_URL"),
data={"query": query,
"format": "json"})
if result.status_code < 400:
return result.json().get('results').get('bindings')
def __get_instances__(offset=0):
"""Helper function used by siteindex and resourcedump views
Args:
offset(int): offset number of records
"""
offset = int(offset)*50000
sparql = """
SELECT DISTINCT ?instance ?date
WHERE {{
?instance rdf:type bf:Instance .
OPTIONAL {{
?instance bf:generationProcess ?process .
?process bf:generationDate ?date .
}}
}} ORDER BY ?instance
LIMIT 50000
OFFSET {0}""".format(offset)
instances = CONNECTIONS.datastore.query(sparql)
return instances
def __get_mod_date__(entity_iri=None):
if "MOD_DATE" in app.config:
return app.config.get("MOD_DATE")
return datetime.datetime.utcnow().strftime(W3C_DATE)
def __generate_profile__(instance_uri):
search = Search(using=CONNECTIONS.search.es).query(
Q("term", uri="{}#Work".format(instance_uri))).source(
["bf_hasInstance.bf_hasItem.rml_map.map4_json_ld"])
result = search.execute()
if len(result.hits.hits) < 1:
#abort(404)
#click.echo("{}#Work not found".format(instance_uri))
return
if len(result.hits.hits[0]["_source"]) < 1:
#abort(404)
#click.echo("{}#Work missing _source".format(instance_uri))
return
raw_map4 = result.hits.hits[0]["_source"]["bf_hasInstance"][0]\
["bf_hasItem"][0]["rml_map"]["map4_json_ld"]
return raw_map4
def __generate_resource_dump__():
r_dump = ResourceDump()
r_dump.ln.append({"rel": "resourcesync",
"href": url_for('capability_list')})
bindings = CONNECTIONS.datastore.query("""
SELECT (count(?s) as ?count) WHERE {
?s rdf:type bf:Instance .
?item bf:itemOf ?s .
}""")
count = int(bindings[0].get('count').get('value'))
shards = math.ceil(count/50000)
for i in range(0, shards):
zip_info = __generate_zip_file__(i)
try:
zip_modified = datetime.datetime.fromtimestamp(zip_info.get('date'))
last_mod = zip_modified.strftime("%Y-%m-%d")
except TypeError:
last_mod = zip_info.get('date')[0:10]
click.echo("Total errors {:,}".format(len(zip_info.get('errors'))))
r_dump.add(
Resource(url_for('resource_zip',
count=i*50000),
lastmod=last_mod,
mime_type="application/zip",
length=zip_info.get("size")
)
)
return r_dump
def __generate_zip_file__(offset=0):
start = datetime.datetime.utcnow()
click.echo("Started at {}".format(start.ctime()))
manifest = ResourceDumpManifest()
manifest.modified = datetime.datetime.utcnow().isoformat()
manifest.ln.append({"rel": "resourcesync",
"href": url_for('capability_list')})
file_name = "{}-{:03}.zip".format(
datetime.datetime.utcnow().toordinal(),
offset)
tmp_location = os.path.join(app.config.get("DIRECTORIES")[0].get("path"),
"dump/{}".format(file_name))
if os.path.exists(tmp_location) is True:
return {"date": os.path.getmtime(tmp_location),
"size": os.path.getsize(tmp_location)}
dump_zip = ZipFile(tmp_location,
mode="w",
compression=ZIP_DEFLATED,
allowZip64=True)
instances = __get_instances__(offset)
errors = []
click.echo("Iterating through {:,} instances".format(len(instances)))
for i,row in enumerate(instances):
instance_iri = row.get("instance").get('value')
key = instance_iri.split("/")[-1]
if not "date" in row:
last_mod = __get_mod_date__()
else:
last_mod = "{}".format(row.get("date").get("value")[0:10])
path = "resources/{}.json".format(key)
if not i%25 and i > 0:
click.echo(".", nl=False)
if not i%100:
click.echo("{:,}".format(i), nl=False)
raw_json = __generate_profile__(instance_iri)
if raw_json is None:
errors.append(instance_iri)
continue
elif len(raw_json) < 1:
click.echo(instance_iri, nl=False)
break
dump_zip.writestr(path,
raw_json)
manifest.add(
Resource(instance_iri,
lastmod=last_mod,
length="{}".format(len(raw_json)),
path=path))
dump_zip.writestr("manifest.xml", manifest.as_xml())
dump_zip.close()
end = datetime.datetime.utcnow()
zip_size = os.stat(tmp_location).st_size
click.echo("Finished at {}, total time {} min, size={}".format(
end.ctime(),
(end-start).seconds / 60.0,
i))
return {"date": datetime.datetime.utcnow().isoformat(),
"size": zip_size,
"errors": errors}
@app.template_filter("pretty_num")
def nice_number(raw_number):
if raw_number is None:
return ''
return "{:,}".format(int(raw_number))
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html", error=e), 404
@app.route("/")
def home():
"""Default page"""
result = CONNECTIONS.datastore.query(
"SELECT (COUNT(*) as ?count) WHERE {?s ?p ?o }")
count = result[0].get("count").get("value")
if int(count) < 1:
flash("Triplestore is empty, please load service hub RDF data")
return render_template("index.html",
version=__version__,
count="{:,}".format(int(count)))
@app.route("/reports/")
@app.route("/reports/<path:name>")
def reporting(name=None):
if name is None:
return render_template("reports/index.html")
report_output = reports.report_router(name)
if report_output is None:
abort(404)
return render_template(
"reports/{0}.html".format(name),
data=report_output)
@app.route("/<path:type_of>/<path:name>")
def authority_view(type_of, name=None):
"""Generates a RDF:Description view for Service Hub name,
topic, agent, and other types of BIBFRAME entities
Args:
type_of(str): Type of entity
name(str): slug of name, title, or other textual identifier
"""
if name is None:
# Display brows view of authorities
return "Browse display for {}".format(type_of)
uri = "{0}{1}/{2}".format(app.config.get("BASE_URL"),
type_of,
name)
entity_sparql = PREFIX + """
SELECT DISTINCT ?label ?value
WHERE {{
<{entity}> rdf:type {type_of} .
OPTIONAL {{
<{entity}> rdfs:label ?label
}}
OPTIONAL {{
<{entity}> rdf:value ?value
}}
}}""".format(entity=uri,
type_of="bf:{}".format(type_of.title()))
entity_results = __run_query__(entity_sparql)
if len(entity_results) < 1:
abort(404)
entity_graph = rdflib.Graph()
iri = rdflib.URIRef(uri)
entity_graph.add((iri, rdflib.RDF.type, getattr(BF, type_of.title())))
for row in entity_results:
if 'label' in row:
literal = rdflib.Literal(row.get('label').get('value'),
datatype=row.get('label').get('datatype'))
entity_graph.add((iri, rdflib.RDFS.label, literal))
if 'value' in row:
literal = rdflib.Literal(row.get('value').get('value'),
datatype=row.get('value').get('datatype'))
entity_graph.add((iri, rdflib.RDF.value, literal))
MAPv4_context["bf"] = str(BF)
raw_entity = entity_graph.serialize(format='json-ld',
context=MAPv4_context)
return Response(raw_entity, mimetype="application/json")
@app.route("/capabilitylist.xml")
def capability_list():
cap_list = CapabilityList()
cap_list.modified = __get_mod_date__()
cap_list.ln.append({"href": url_for('capability_list'),
"rel": "describedby",
"type": "application/xml"})
cap_list.add(Resource(url_for('site_index'),
capability="resourcelist"))
cap_list.add(Resource(url_for('resource_dump'),
capability="resourcedump"))
return Response(cap_list.as_xml(),
mimetype="text/xml")
@app.route("/resourcedump.xml")
def resource_dump():
xml = __generate_resource_dump__()
return Response(xml.as_xml(),
"text/xml")
@app.route("/resourcedump-<int:count>.zip")
def resource_zip(count):
zip_location = os.path.join(app.config.get("DIRECTORIES")[0].get("path"),
"dump/resour{}".format(file_name))
zip_location = os.path.join(PROJECT_BASE,
"dump/{}.zip".format(count))
if not os.path.exists(zip_location):
abort(404)
return send_file(zip_location)
@app.route("/siteindex.xml")
#@cache.cached(timeout=86400) # Cached for 1 day
def site_index():
"""Generates siteindex XML, each sitemap has a maximum of 50k links
dynamically generates the necessary number of sitemaps in the
template"""
result = CONNECTIONS.datastore.query("""SELECT (count(?work) as ?count)
WHERE {
?work rdf:type bf:Work .
?instance bf:instanceOf ?work .
?item bf:itemOf ?instance . }""")
count = int(result[0].get('count').get('value'))
shards = math.ceil(count/50000)
mod_date = app.config.get('MOD_DATE')
if mod_date is None:
mod_date=datetime.datetime.utcnow().strftime("%Y-%m-%d")
xml = render_template("siteindex.xml",
count=range(1, shards+1),
last_modified=mod_date)
return Response(xml, mimetype="text/xml")
@app.route("/sitemap<int:offset>.xml", methods=["GET"])
#@cache.cached(timeout=86400)
def sitemap(offset=0):
if offset > 0:
offset = offset - 1
instances = __get_instances__(offset)
resource_list = ResourceList()
dedups = 0
for i,row in enumerate(instances):
instance = row.get('instance')
if "date" in row:
last_mod = row.get("date").get("value")[0:10]
else:
last_mod = datetime.datetime.utcnow().strftime(
W3C_DATE)
try:
resource_list.add(
Resource("{}.json".format(instance.get("value")),
lastmod=last_mod)
)
except ResourceListDupeError:
dedups += 1
continue
xml = resource_list.as_xml()
return Response(xml, mimetype="text/xml")
@app.route("/<path:uid>.json")
def detail(uid=None):
"""Generates DPLA Map V4 JSON-LD"""
if uid.startswith('favicon'):
return ''
click.echo("UID is {}".format(uid))
if uid is None:
abort(404)
uri = app.config.get("BASE_URL") + uid
raw_map_4 = __generate_profile__(uri)
return Response(raw_map_4, mimetype="application/json")
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| KnowledgeLinks/dpla-service-hub | api.py | Python | apache-2.0 | 14,727 |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
Functions in this module are imported into the nova.db namespace. Call these
functions from nova.db namespace, not the nova.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
"""
from eventlet import tpool
from oslo.config import cfg
from nova.cells import rpcapi as cells_rpcapi
from nova.openstack.common.db import api as db_api
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
db_opts = [
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('instance_name_template',
default='instance-%08x',
help='Template string to be used to generate instance names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
tpool_opts = [
cfg.BoolOpt('use_tpool',
default=False,
deprecated_name='dbapi_use_tpool',
deprecated_group='DEFAULT',
help='Enable the experimental use of thread pooling for '
'all DB API calls'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(tpool_opts, 'database')
CONF.import_opt('backend', 'nova.openstack.common.db.options',
group='database')
_BACKEND_MAPPING = {'sqlalchemy': 'nova.db.sqlalchemy.api'}
class NovaDBAPI(object):
"""Nova's DB API wrapper class.
This wraps the oslo DB API with an option to be able to use eventlet's
thread pooling. Since the CONF variable may not be loaded at the time
this class is instantiated, we must look at it on the first DB API call.
"""
def __init__(self):
self.__db_api = None
@property
def _db_api(self):
if not self.__db_api:
nova_db_api = db_api.DBAPI(CONF.database.backend,
backend_mapping=_BACKEND_MAPPING)
if CONF.database.use_tpool:
self.__db_api = tpool.Proxy(nova_db_api)
else:
self.__db_api = nova_db_api
return self.__db_api
def __getattr__(self, key):
return getattr(self._db_api, key)
IMPL = NovaDBAPI()
LOG = logging.getLogger(__name__)
# The maximum value a signed INT type may have
MAX_INT = 0x7FFFFFFF
###################
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return IMPL.constraint(**conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return IMPL.equal_any(*values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return IMPL.not_equal(*values)
###################
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id, with_compute_node=False):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id,
with_compute_node=with_compute_node)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return IMPL.service_get_all_by_host(context, host)
def service_get_by_compute_host(context, host):
"""Get the service entry for a given compute host.
Returns the service entry joined with the compute_node entry.
"""
return IMPL.service_get_by_compute_host(context, host)
def service_get_by_args(context, host, binary):
"""Get the state of a service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on a service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
###################
def compute_node_get(context, compute_id):
"""Get a compute node by its id.
:param context: The security context
:param compute_id: ID of the compute node
:returns: Dictionary-like object containing properties of the compute node,
including its corresponding service
Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
"""
return IMPL.compute_node_get(context, compute_id)
def compute_node_get_by_service_id(context, service_id):
"""Get a compute node by its associated service id.
:param context: The security context
:param service_id: ID of the associated service
:returns: Dictionary-like object containing properties of the compute node,
including its corresponding service and statistics
Raises ServiceNotFound if service with the given ID doesn't exist.
"""
return IMPL.compute_node_get_by_service_id(context, service_id)
def compute_node_get_all(context, no_date_fields=False):
"""Get all computeNodes.
:param context: The security context
:param no_date_fields: If set to True, excludes 'created_at', 'updated_at',
'deleted_at' and 'deleted' fields from the output,
thus significantly reducing its size.
Set to False by default
:returns: List of dictionaries each containing compute node properties,
including corresponding service
"""
return IMPL.compute_node_get_all(context, no_date_fields)
def compute_node_search_by_hypervisor(context, hypervisor_match):
"""Get compute nodes by hypervisor hostname.
:param context: The security context
:param hypervisor_match: The hypervisor hostname
:returns: List of dictionary-like objects each containing compute node
properties, including corresponding service
"""
return IMPL.compute_node_search_by_hypervisor(context, hypervisor_match)
def compute_node_create(context, values):
"""Create a compute node from the values dictionary.
:param context: The security context
:param values: Dictionary containing compute node properties
:returns: Dictionary-like object containing the properties of the created
node, including its corresponding service and statistics
"""
return IMPL.compute_node_create(context, values)
def compute_node_update(context, compute_id, values):
"""Set the given properties on a compute node and update it.
:param context: The security context
:param compute_id: ID of the compute node
:param values: Dictionary containing compute node properties to be updated
:returns: Dictionary-like object containing the properties of the updated
compute node, including its corresponding service and statistics
Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
"""
return IMPL.compute_node_update(context, compute_id, values)
def compute_node_delete(context, compute_id):
"""Delete a compute node from the database.
:param context: The security context
:param compute_id: ID of the compute node
Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
"""
return IMPL.compute_node_delete(context, compute_id)
def compute_node_statistics(context):
"""Get aggregate statistics over all compute nodes.
:param context: The security context
:returns: Dictionary containing compute node characteristics summed up
over all the compute nodes, e.g. 'vcpus', 'free_ram_mb' etc.
"""
return IMPL.compute_node_statistics(context)
###################
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
def certificate_get_all_by_project(context, project_id):
"""Get all certificates for a project."""
return IMPL.certificate_get_all_by_project(context, project_id)
def certificate_get_all_by_user(context, user_id):
"""Get all certificates for a user."""
return IMPL.certificate_get_all_by_user(context, user_id)
def certificate_get_all_by_user_and_project(context, user_id, project_id):
"""Get all certificates for a user and project."""
return IMPL.certificate_get_all_by_user_and_project(context,
user_id,
project_id)
###################
def floating_ip_get(context, id):
return IMPL.floating_ip_get(context, id)
def floating_ip_get_pools(context):
"""Returns a list of floating ip pools."""
return IMPL.floating_ip_get_pools(context)
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
"""Allocate free floating ip from specified pool and return the address.
Raises if one is not available.
"""
return IMPL.floating_ip_allocate_address(context, project_id, pool,
auto_assigned)
def floating_ip_bulk_create(context, ips):
"""Create a lot of floating ips from the values dictionary."""
return IMPL.floating_ip_bulk_create(context, ips)
def floating_ip_bulk_destroy(context, ips):
"""Destroy a lot of floating ips from the values dictionary."""
return IMPL.floating_ip_bulk_destroy(context, ips)
def floating_ip_create(context, values):
"""Create a floating ip from the values dictionary."""
return IMPL.floating_ip_create(context, values)
def floating_ip_deallocate(context, address):
"""Deallocate a floating ip by address."""
return IMPL.floating_ip_deallocate(context, address)
def floating_ip_destroy(context, address):
"""Destroy the floating_ip or raise if it does not exist."""
return IMPL.floating_ip_destroy(context, address)
def floating_ip_disassociate(context, address):
"""Disassociate a floating ip from a fixed ip by address.
:returns: the fixed ip record joined to network record or None
if the ip was not associated to an ip.
"""
return IMPL.floating_ip_disassociate(context, address)
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
"""Associate a floating ip to a fixed_ip by address.
:returns: the fixed ip record joined to network record or None
if the ip was already associated to the fixed ip.
"""
return IMPL.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
host)
def floating_ip_get_all(context):
"""Get all floating ips."""
return IMPL.floating_ip_get_all(context)
def floating_ip_get_all_by_host(context, host):
"""Get all floating ips by host."""
return IMPL.floating_ip_get_all_by_host(context, host)
def floating_ip_get_all_by_project(context, project_id):
"""Get all floating ips by project."""
return IMPL.floating_ip_get_all_by_project(context, project_id)
def floating_ip_get_by_address(context, address):
"""Get a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_get_by_address(context, address)
def floating_ip_get_by_fixed_address(context, fixed_address):
"""Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_address(context, fixed_address)
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
"""Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id)
def floating_ip_update(context, address, values):
"""Update a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_update(context, address, values)
def floating_ip_set_auto_assigned(context, address):
"""Set auto_assigned flag to floating ip."""
return IMPL.floating_ip_set_auto_assigned(context, address)
def dnsdomain_list(context):
"""Get a list of all zones in our database, public and private."""
return IMPL.dnsdomain_list(context)
def dnsdomain_get_all(context):
"""Get a list of all dnsdomains in our database."""
return IMPL.dnsdomain_get_all(context)
def dnsdomain_register_for_zone(context, fqdomain, zone):
"""Associated a DNS domain with an availability zone."""
return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone)
def dnsdomain_register_for_project(context, fqdomain, project):
"""Associated a DNS domain with a project id."""
return IMPL.dnsdomain_register_for_project(context, fqdomain, project)
def dnsdomain_unregister(context, fqdomain):
"""Purge associations for the specified DNS zone."""
return IMPL.dnsdomain_unregister(context, fqdomain)
def dnsdomain_get(context, fqdomain):
"""Get the db record for the specified domain."""
return IMPL.dnsdomain_get(context, fqdomain)
####################
def migration_update(context, id, values):
"""Update a migration instance."""
return IMPL.migration_update(context, id, values)
def migration_create(context, values):
"""Create a migration record."""
return IMPL.migration_create(context, values)
def migration_get(context, migration_id):
"""Finds a migration by the id."""
return IMPL.migration_get(context, migration_id)
def migration_get_by_instance_and_status(context, instance_uuid, status):
"""Finds a migration by the instance uuid its migrating."""
return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
status)
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute, use_slave=False):
"""Finds all unconfirmed migrations within the confirmation window for
a specific destination compute host.
"""
return IMPL.migration_get_unconfirmed_by_dest_compute(context,
confirm_window, dest_compute, use_slave=use_slave)
def migration_get_in_progress_by_host_and_node(context, host, node):
"""Finds all migrations for the given host + node that are not yet
confirmed or reverted.
"""
return IMPL.migration_get_in_progress_by_host_and_node(context, host, node)
def migration_get_all_by_filters(context, filters):
"""Finds all migrations in progress."""
return IMPL.migration_get_all_by_filters(context, filters)
####################
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Associate fixed ip to instance.
Raises if fixed ip is not available.
"""
return IMPL.fixed_ip_associate(context, address, instance_uuid, network_id,
reserved)
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
"""Find free ip in network and associate it to instance or host.
Raises if one is not available.
"""
return IMPL.fixed_ip_associate_pool(context, network_id,
instance_uuid, host)
def fixed_ip_create(context, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_create(context, values)
def fixed_ip_bulk_create(context, ips):
"""Create a lot of fixed ips from the values dictionary."""
return IMPL.fixed_ip_bulk_create(context, ips)
def fixed_ip_disassociate(context, address):
"""Disassociate a fixed ip from an instance by address."""
return IMPL.fixed_ip_disassociate(context, address)
def fixed_ip_disassociate_all_by_timeout(context, host, time):
"""Disassociate old fixed ips from host."""
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
def fixed_ip_get(context, id, get_network=False):
"""Get fixed ip by id or raise if it does not exist.
If get_network is true, also return the associated network.
"""
return IMPL.fixed_ip_get(context, id, get_network)
def fixed_ip_get_all(context):
"""Get all defined fixed ips."""
return IMPL.fixed_ip_get_all(context)
def fixed_ip_get_by_address(context, address, columns_to_join=None):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
def fixed_ip_get_by_address_detailed(context, address):
"""Get detailed fixed ip info by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address_detailed(context, address)
def fixed_ip_get_by_floating_address(context, floating_address):
"""Get a fixed ip by a floating address."""
return IMPL.fixed_ip_get_by_floating_address(context, floating_address)
def fixed_ip_get_by_instance(context, instance_uuid):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
def fixed_ip_get_by_host(context, host):
"""Get fixed ips by compute host."""
return IMPL.fixed_ip_get_by_host(context, host)
def fixed_ip_get_by_network_host(context, network_uuid, host):
"""Get fixed ip for a host in a network."""
return IMPL.fixed_ip_get_by_network_host(context, network_uuid, host)
def fixed_ips_by_virtual_interface(context, vif_id):
"""Get fixed ips by virtual interface or raise if none exist."""
return IMPL.fixed_ips_by_virtual_interface(context, vif_id)
def fixed_ip_update(context, address, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_update(context, address, values)
####################
def virtual_interface_create(context, values):
"""Create a virtual interface record in the database."""
return IMPL.virtual_interface_create(context, values)
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table."""
return IMPL.virtual_interface_get(context, vif_id)
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table filtering on address."""
return IMPL.virtual_interface_get_by_address(context, address)
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table filtering on vif uuid."""
return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
def virtual_interface_get_by_instance(context, instance_id, use_slave=False):
"""Gets all virtual_interfaces for instance."""
return IMPL.virtual_interface_get_by_instance(context, instance_id,
use_slave=use_slave)
def virtual_interface_get_by_instance_and_network(context, instance_id,
network_id):
"""Gets all virtual interfaces for instance."""
return IMPL.virtual_interface_get_by_instance_and_network(context,
instance_id,
network_id)
def virtual_interface_delete_by_instance(context, instance_id):
"""Delete virtual interface records associated with instance."""
return IMPL.virtual_interface_delete_by_instance(context, instance_id)
def virtual_interface_get_all(context):
"""Gets all virtual interfaces from the table."""
return IMPL.virtual_interface_get_all(context)
####################
def instance_create(context, values):
"""Create an instance from the values dictionary."""
return IMPL.instance_create(context, values)
def instance_destroy(context, instance_uuid, constraint=None,
update_cells=True):
"""Destroy the instance or raise if it does not exist."""
rv = IMPL.instance_destroy(context, instance_uuid, constraint)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_destroy_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance destroy"))
return rv
def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get_by_uuid(context, uuid,
columns_to_join, use_slave=use_slave)
def instance_get(context, instance_id, columns_to_join=None):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id,
columns_to_join=columns_to_join)
def instance_get_all(context, columns_to_join=None):
"""Get all instances."""
return IMPL.instance_get_all(context, columns_to_join=columns_to_join)
def instance_get_all_by_filters(context, filters, sort_key='created_at',
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False):
"""Get all instances that match all filters."""
return IMPL.instance_get_all_by_filters(context, filters, sort_key,
sort_dir, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
use_slave=use_slave)
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Get instances and joins active during a certain time window.
Specifying a project_id will filter for a certain project.
Specifying a host will filter for instances on a given compute host.
"""
return IMPL.instance_get_active_by_window_joined(context, begin, end,
project_id, host)
def instance_get_all_by_host(context, host,
columns_to_join=None, use_slave=False):
"""Get all instances belonging to a host."""
return IMPL.instance_get_all_by_host(context, host,
columns_to_join,
use_slave=use_slave)
def instance_get_all_by_host_and_node(context, host, node):
"""Get all instances belonging to a node."""
return IMPL.instance_get_all_by_host_and_node(context, host, node)
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
def instance_get_floating_address(context, instance_id):
"""Get the first floating ip address of an instance."""
return IMPL.instance_get_floating_address(context, instance_id)
def instance_floating_address_get_all(context, instance_uuid):
"""Get all floating ip addresses of an instance."""
return IMPL.instance_floating_address_get_all(context, instance_uuid)
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
def instance_get_all_hung_in_rebooting(context, reboot_window):
"""Get all instances stuck in a rebooting state."""
return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
def instance_update(context, instance_uuid, values, update_cells=True):
"""Set the given properties on an instance and update it.
Raises NotFound if instance does not exist.
"""
rv = IMPL.instance_update(context, instance_uuid, values)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_update_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance update"))
return rv
# FIXME(comstud): 'update_cells' is temporary as we transition to using
# objects. When everything is using Instance.save(), we can remove the
# argument and the RPC to nova-cells.
def instance_update_and_get_original(context, instance_uuid, values,
update_cells=True,
columns_to_join=None):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_uuid: = instance id or uuid
:param values: = dict containing column values
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
rv = IMPL.instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=columns_to_join)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_update_at_top(context, rv[1])
except Exception:
LOG.exception(_("Failed to notify cells of instance update"))
return rv
def instance_add_security_group(context, instance_id, security_group_id):
"""Associate the given security group with the given instance."""
return IMPL.instance_add_security_group(context, instance_id,
security_group_id)
def instance_remove_security_group(context, instance_id, security_group_id):
"""Disassociate the given security group from the given instance."""
return IMPL.instance_remove_security_group(context, instance_id,
security_group_id)
####################
def instance_group_create(context, values, policies=None, metadata=None,
members=None):
"""Create a new group with metadata.
Each group will receive a unique uuid. This will be used for access to the
group.
"""
return IMPL.instance_group_create(context, values, policies, metadata,
members)
def instance_group_get(context, group_uuid):
"""Get a specific group by id."""
return IMPL.instance_group_get(context, group_uuid)
def instance_group_update(context, group_uuid, values):
"""Update the attributes of an group."""
return IMPL.instance_group_update(context, group_uuid, values)
def instance_group_delete(context, group_uuid):
"""Delete an group."""
return IMPL.instance_group_delete(context, group_uuid)
def instance_group_get_all(context):
"""Get all groups."""
return IMPL.instance_group_get_all(context)
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups for a specific project_id."""
return IMPL.instance_group_get_all_by_project_id(context, project_id)
def instance_group_metadata_add(context, group_uuid, metadata,
set_delete=False):
"""Add metadata to the group."""
return IMPL.instance_group_metadata_add(context, group_uuid, metadata,
set_delete)
def instance_group_metadata_delete(context, group_uuid, key):
"""Delete metadata from the group."""
return IMPL.instance_group_metadata_delete(context, group_uuid, key)
def instance_group_metadata_get(context, group_uuid):
"""Get the metadata from the group."""
return IMPL.instance_group_metadata_get(context, group_uuid)
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
"""Add members to the group."""
return IMPL.instance_group_members_add(context, group_uuid, members,
set_delete=set_delete)
def instance_group_member_delete(context, group_uuid, instance_id):
"""Delete a specific member from the group."""
return IMPL.instance_group_member_delete(context, group_uuid, instance_id)
def instance_group_members_get(context, group_uuid):
"""Get the members from the group."""
return IMPL.instance_group_members_get(context, group_uuid)
def instance_group_policies_add(context, group_uuid, policies,
set_delete=False):
"""Add policies to the group."""
return IMPL.instance_group_policies_add(context, group_uuid, policies,
set_delete=set_delete)
def instance_group_policy_delete(context, group_uuid, policy):
"""Delete a specific policy from the group."""
return IMPL.instance_group_policy_delete(context, group_uuid, policy)
def instance_group_policies_get(context, group_uuid):
"""Get the policies from the group."""
return IMPL.instance_group_policies_get(context, group_uuid)
###################
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
"""
return IMPL.instance_info_cache_get(context, instance_uuid)
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
return IMPL.instance_info_cache_update(context, instance_uuid, values)
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
"""
return IMPL.instance_info_cache_delete(context, instance_uuid)
###################
def key_pair_create(context, values):
"""Create a key_pair from the values dictionary."""
return IMPL.key_pair_create(context, values)
def key_pair_destroy(context, user_id, name):
"""Destroy the key_pair or raise if it does not exist."""
return IMPL.key_pair_destroy(context, user_id, name)
def key_pair_get(context, user_id, name):
"""Get a key_pair or raise if it does not exist."""
return IMPL.key_pair_get(context, user_id, name)
def key_pair_get_all_by_user(context, user_id):
"""Get all key_pairs by user."""
return IMPL.key_pair_get_all_by_user(context, user_id)
def key_pair_count_by_user(context, user_id):
"""Count number of key pairs for the given user ID."""
return IMPL.key_pair_count_by_user(context, user_id)
####################
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a free network to a project."""
return IMPL.network_associate(context, project_id, network_id, force)
def network_count_reserved_ips(context, network_id):
"""Return the number of reserved ips in the network."""
return IMPL.network_count_reserved_ips(context, network_id)
def network_create_safe(context, values):
"""Create a network from the values dict.
The network is only returned if the create succeeds. If the create violates
constraints because the network already exists, no exception is raised.
"""
return IMPL.network_create_safe(context, values)
def network_delete_safe(context, network_id):
"""Delete network with key network_id.
This method assumes that the network is not associated with any project
"""
return IMPL.network_delete_safe(context, network_id)
def network_disassociate(context, network_id, disassociate_host=True,
disassociate_project=True):
"""Disassociate the network from project or host
Raises if it does not exist.
"""
return IMPL.network_disassociate(context, network_id, disassociate_host,
disassociate_project)
def network_get(context, network_id, project_only="allow_none"):
"""Get a network or raise if it does not exist."""
return IMPL.network_get(context, network_id, project_only=project_only)
def network_get_all(context, project_only="allow_none"):
"""Return all defined networks."""
return IMPL.network_get_all(context, project_only)
def network_get_all_by_uuids(context, network_uuids,
project_only="allow_none"):
"""Return networks by ids."""
return IMPL.network_get_all_by_uuids(context, network_uuids,
project_only=project_only)
# pylint: disable=C0103
def network_in_use_on_host(context, network_id, host=None):
"""Indicates if a network is currently in use on host."""
return IMPL.network_in_use_on_host(context, network_id, host)
def network_get_associated_fixed_ips(context, network_id, host=None):
"""Get all network's ips that have been associated."""
return IMPL.network_get_associated_fixed_ips(context, network_id, host)
def network_get_by_uuid(context, uuid):
"""Get a network by uuid or raise if it does not exist."""
return IMPL.network_get_by_uuid(context, uuid)
def network_get_by_cidr(context, cidr):
"""Get a network by cidr or raise if it does not exist."""
return IMPL.network_get_by_cidr(context, cidr)
def network_get_all_by_host(context, host):
"""All networks for which the given host is the network host."""
return IMPL.network_get_all_by_host(context, host)
def network_set_host(context, network_id, host_id):
"""Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
def network_update(context, network_id, values):
"""Set the given properties on a network and update it.
Raises NotFound if network does not exist.
"""
return IMPL.network_update(context, network_id, values)
###############
def quota_create(context, project_id, resource, limit, user_id=None):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit,
user_id=user_id)
def quota_get(context, project_id, resource, user_id=None):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource, user_id=user_id)
def quota_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all quotas associated with a given project and user."""
return IMPL.quota_get_all_by_project_and_user(context, project_id, user_id)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_get_all(context, project_id):
"""Retrieve all user quotas associated with a given project."""
return IMPL.quota_get_all(context, project_id)
def quota_update(context, project_id, resource, limit, user_id=None):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit,
user_id=user_id)
###################
def quota_usage_get(context, project_id, resource, user_id=None):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource, user_id=user_id)
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project_and_user(context,
project_id, user_id)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
"""Update a quota usage or raise if it does not exist."""
return IMPL.quota_usage_update(context, project_id, user_id, resource,
**kwargs)
###################
def quota_reserve(context, resources, quotas, user_quotas, deltas, expire,
until_refresh, max_age, project_id=None, user_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, user_quotas, deltas,
expire, until_refresh, max_age,
project_id=project_id, user_id=user_id)
def reservation_commit(context, reservations, project_id=None, user_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id,
user_id=user_id)
def reservation_rollback(context, reservations, project_id=None, user_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
"""Destroy all quotas associated with a given project and user."""
return IMPL.quota_destroy_all_by_project_and_user(context,
project_id, user_id)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
###################
def ec2_volume_create(context, volume_id, forced_id=None):
return IMPL.ec2_volume_create(context, volume_id, forced_id)
def ec2_volume_get_by_id(context, volume_id):
return IMPL.ec2_volume_get_by_id(context, volume_id)
def ec2_volume_get_by_uuid(context, volume_uuid):
return IMPL.ec2_volume_get_by_uuid(context, volume_uuid)
def get_snapshot_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_snapshot_uuid_by_ec2_id(context, ec2_id)
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id)
def ec2_snapshot_create(context, snapshot_id, forced_id=None):
return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
####################
def block_device_mapping_create(context, values, legacy=True):
"""Create an entry of block device mapping."""
return IMPL.block_device_mapping_create(context, values, legacy)
def block_device_mapping_update(context, bdm_id, values, legacy=True):
"""Update an entry of block device mapping."""
return IMPL.block_device_mapping_update(context, bdm_id, values, legacy)
def block_device_mapping_update_or_create(context, values, legacy=True):
"""Update an entry of block device mapping.
If not existed, create a new entry
"""
return IMPL.block_device_mapping_update_or_create(context, values, legacy)
def block_device_mapping_get_all_by_instance(context, instance_uuid,
use_slave=False):
"""Get all block device mapping belonging to an instance."""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid,
use_slave)
def block_device_mapping_get_by_volume_id(context, volume_id,
columns_to_join=None):
"""Get block device mapping for a given volume."""
return IMPL.block_device_mapping_get_by_volume_id(context, volume_id,
columns_to_join)
def block_device_mapping_destroy(context, bdm_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy(context, bdm_id)
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_device(
context, instance_uuid, device_name)
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_volume(
context, instance_uuid, volume_id)
####################
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)
def security_group_get(context, security_group_id, columns_to_join=None):
"""Get security group by its id."""
return IMPL.security_group_get(context, security_group_id,
columns_to_join)
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
"""Returns a security group with the specified name from a project."""
return IMPL.security_group_get_by_name(context, project_id, group_name,
columns_to_join=None)
def security_group_get_by_project(context, project_id):
"""Get all security groups belonging to a project."""
return IMPL.security_group_get_by_project(context, project_id)
def security_group_get_by_instance(context, instance_uuid):
"""Get security groups to which the instance is assigned."""
return IMPL.security_group_get_by_instance(context, instance_uuid)
def security_group_in_use(context, group_id):
"""Indicates if a security group is currently in use."""
return IMPL.security_group_in_use(context, group_id)
def security_group_create(context, values):
"""Create a new security group."""
return IMPL.security_group_create(context, values)
def security_group_update(context, security_group_id, values,
columns_to_join=None):
"""Update a security group."""
return IMPL.security_group_update(context, security_group_id, values,
columns_to_join=columns_to_join)
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id.
Returns a tuple with the first element being a bool indicating
if the default security group previously existed. Second
element is the dict used to create the default security group.
"""
return IMPL.security_group_ensure_default(context)
def security_group_destroy(context, security_group_id):
"""Deletes a security group."""
return IMPL.security_group_destroy(context, security_group_id)
####################
def security_group_rule_create(context, values):
"""Create a new security group."""
return IMPL.security_group_rule_create(context, values)
def security_group_rule_get_by_security_group(context, security_group_id,
columns_to_join=None):
"""Get all rules for a given security group."""
return IMPL.security_group_rule_get_by_security_group(
context, security_group_id, columns_to_join=columns_to_join)
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
"""Get all rules that grant access to the given security group."""
return IMPL.security_group_rule_get_by_security_group_grantee(context,
security_group_id)
def security_group_rule_destroy(context, security_group_rule_id):
"""Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
def security_group_rule_get(context, security_group_rule_id):
"""Gets a security group rule."""
return IMPL.security_group_rule_get(context, security_group_rule_id)
def security_group_rule_count_by_group(context, security_group_id):
"""Count rules in a given security group."""
return IMPL.security_group_rule_count_by_group(context, security_group_id)
###################
def security_group_default_rule_get(context, security_group_rule_default_id):
return IMPL.security_group_default_rule_get(context,
security_group_rule_default_id)
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
return IMPL.security_group_default_rule_destroy(
context, security_group_rule_default_id)
def security_group_default_rule_create(context, values):
return IMPL.security_group_default_rule_create(context, values)
def security_group_default_rule_list(context):
return IMPL.security_group_default_rule_list(context)
###################
def provider_fw_rule_create(context, rule):
"""Add a firewall rule at the provider level (all hosts & instances)."""
return IMPL.provider_fw_rule_create(context, rule)
def provider_fw_rule_get_all(context):
"""Get all provider-level firewall rules."""
return IMPL.provider_fw_rule_get_all(context)
def provider_fw_rule_destroy(context, rule_id):
"""Delete a provider firewall rule from the database."""
return IMPL.provider_fw_rule_destroy(context, rule_id)
###################
def project_get_networks(context, project_id, associate=True):
"""Return the network associated with the project.
If associate is true, it will attempt to associate a new
network if one is not found, otherwise it returns None.
"""
return IMPL.project_get_networks(context, project_id, associate)
###################
def console_pool_create(context, values):
"""Create console pool."""
return IMPL.console_pool_create(context, values)
def console_pool_get_by_host_type(context, compute_host, proxy_host,
console_type):
"""Fetch a console pool for a given proxy host, compute host, and type."""
return IMPL.console_pool_get_by_host_type(context,
compute_host,
proxy_host,
console_type)
def console_pool_get_all_by_host_type(context, host, console_type):
"""Fetch all pools for given proxy host and type."""
return IMPL.console_pool_get_all_by_host_type(context,
host,
console_type)
def console_create(context, values):
"""Create a console."""
return IMPL.console_create(context, values)
def console_delete(context, console_id):
"""Delete a console."""
return IMPL.console_delete(context, console_id)
def console_get_by_pool_instance(context, pool_id, instance_uuid):
"""Get console entry for a given instance and pool."""
return IMPL.console_get_by_pool_instance(context, pool_id, instance_uuid)
def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
"""Get consoles for a given instance."""
return IMPL.console_get_all_by_instance(context, instance_uuid,
columns_to_join)
def console_get(context, console_id, instance_uuid=None):
"""Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_uuid)
##################
def flavor_create(context, values, projects=None):
"""Create a new instance type."""
return IMPL.flavor_create(context, values, projects=projects)
def flavor_get_all(context, inactive=False, filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
"""Get all instance flavors."""
return IMPL.flavor_get_all(
context, inactive=inactive, filters=filters, sort_key=sort_key,
sort_dir=sort_dir, limit=limit, marker=marker)
def flavor_get(context, id):
"""Get instance type by id."""
return IMPL.flavor_get(context, id)
def flavor_get_by_name(context, name):
"""Get instance type by name."""
return IMPL.flavor_get_by_name(context, name)
def flavor_get_by_flavor_id(context, id, read_deleted=None):
"""Get instance type by flavor id."""
return IMPL.flavor_get_by_flavor_id(context, id, read_deleted)
def flavor_destroy(context, name):
"""Delete an instance type."""
return IMPL.flavor_destroy(context, name)
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access by flavor id."""
return IMPL.flavor_access_get_by_flavor_id(context, flavor_id)
def flavor_access_add(context, flavor_id, project_id):
"""Add flavor access for project."""
return IMPL.flavor_access_add(context, flavor_id, project_id)
def flavor_access_remove(context, flavor_id, project_id):
"""Remove flavor access for project."""
return IMPL.flavor_access_remove(context, flavor_id, project_id)
def flavor_extra_specs_get(context, flavor_id):
"""Get all extra specs for an instance type."""
return IMPL.flavor_extra_specs_get(context, flavor_id)
def flavor_extra_specs_get_item(context, flavor_id, key):
"""Get extra specs by key and flavor_id."""
return IMPL.flavor_extra_specs_get_item(context, flavor_id, key)
def flavor_extra_specs_delete(context, flavor_id, key):
"""Delete the given extra specs item."""
IMPL.flavor_extra_specs_delete(context, flavor_id, key)
def flavor_extra_specs_update_or_create(context, flavor_id,
extra_specs):
"""Create or update instance type extra specs.
This adds or modifies the key/value pairs specified in the
extra specs dict argument
"""
IMPL.flavor_extra_specs_update_or_create(context, flavor_id,
extra_specs)
####################
def pci_device_get_by_addr(context, node_id, dev_addr):
"""Get PCI device by address."""
return IMPL.pci_device_get_by_addr(context, node_id, dev_addr)
def pci_device_get_by_id(context, id):
"""Get PCI device by id."""
return IMPL.pci_device_get_by_id(context, id)
def pci_device_get_all_by_node(context, node_id):
"""Get all PCI devices for one host."""
return IMPL.pci_device_get_all_by_node(context, node_id)
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
"""Get PCI devices allocated to instance."""
return IMPL.pci_device_get_all_by_instance_uuid(context, instance_uuid)
def pci_device_destroy(context, node_id, address):
"""Delete a PCI device record."""
return IMPL.pci_device_destroy(context, node_id, address)
def pci_device_update(context, node_id, address, value):
"""Update a pci device."""
return IMPL.pci_device_update(context, node_id, address, value)
###################
def cell_create(context, values):
"""Create a new child Cell entry."""
return IMPL.cell_create(context, values)
def cell_update(context, cell_name, values):
"""Update a child Cell entry."""
return IMPL.cell_update(context, cell_name, values)
def cell_delete(context, cell_name):
"""Delete a child Cell."""
return IMPL.cell_delete(context, cell_name)
def cell_get(context, cell_name):
"""Get a specific child Cell."""
return IMPL.cell_get(context, cell_name)
def cell_get_all(context):
"""Get all child Cells."""
return IMPL.cell_get_all(context)
####################
def instance_metadata_get(context, instance_uuid):
"""Get all metadata for an instance."""
return IMPL.instance_metadata_get(context, instance_uuid)
def instance_metadata_delete(context, instance_uuid, key):
"""Delete the given metadata item."""
IMPL.instance_metadata_delete(context, instance_uuid, key)
def instance_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.instance_metadata_update(context, instance_uuid,
metadata, delete)
####################
def instance_system_metadata_get(context, instance_uuid):
"""Get all system metadata for an instance."""
return IMPL.instance_system_metadata_get(context, instance_uuid)
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.instance_system_metadata_update(
context, instance_uuid, metadata, delete)
####################
def agent_build_create(context, values):
"""Create a new agent build entry."""
return IMPL.agent_build_create(context, values)
def agent_build_get_by_triple(context, hypervisor, os, architecture):
"""Get agent build by hypervisor/OS/architecture triple."""
return IMPL.agent_build_get_by_triple(context, hypervisor, os,
architecture)
def agent_build_get_all(context, hypervisor=None):
"""Get all agent builds."""
return IMPL.agent_build_get_all(context, hypervisor)
def agent_build_destroy(context, agent_update_id):
"""Destroy agent build entry."""
IMPL.agent_build_destroy(context, agent_update_id)
def agent_build_update(context, agent_build_id, values):
"""Update agent build entry."""
IMPL.agent_build_update(context, agent_build_id, values)
####################
def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
"""Return bw usage for instance and mac in a given audit period."""
return IMPL.bw_usage_get(context, uuid, start_period, mac)
def bw_usage_get_by_uuids(context, uuids, start_period):
"""Return bw usages for instance(s) in a given audit period."""
return IMPL.bw_usage_get_by_uuids(context, uuids, start_period)
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None,
update_cells=True):
"""Update cached bandwidth usage for an instance's network based on mac
address. Creates new record if needed.
"""
rv = IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in,
bw_out, last_ctr_in, last_ctr_out, last_refreshed=last_refreshed)
if update_cells:
try:
cells_rpcapi.CellsAPI().bw_usage_update_at_top(context,
uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed)
except Exception:
LOG.exception(_("Failed to notify cells of bw_usage update"))
return rv
###################
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return IMPL.vol_get_usage_by_time(context, begin)
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
"""Update cached volume usage for a volume
Creates new record if needed.
"""
return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req,
wr_bytes, instance_id, project_id, user_id,
availability_zone,
update_totals=update_totals)
###################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
return IMPL.s3_image_get(context, image_id)
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
return IMPL.s3_image_get_by_uuid(context, image_uuid)
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
return IMPL.s3_image_create(context, image_uuid)
####################
def aggregate_create(context, values, metadata=None):
"""Create a new aggregate with metadata."""
return IMPL.aggregate_create(context, values, metadata)
def aggregate_get(context, aggregate_id):
"""Get a specific aggregate by id."""
return IMPL.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(context, host, key=None):
"""Get a list of aggregates that host belongs to."""
return IMPL.aggregate_get_by_host(context, host, key)
def aggregate_metadata_get_by_host(context, host, key=None):
"""Get metadata for all aggregates that host belongs to.
Returns a dictionary where each value is a set, this is to cover the case
where there two aggregates have different values for the same key.
Optional key filter
"""
return IMPL.aggregate_metadata_get_by_host(context, host, key)
def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key):
"""Get metadata for an aggregate by metadata key."""
return IMPL.aggregate_metadata_get_by_metadata_key(context, aggregate_id,
key)
def aggregate_host_get_by_metadata_key(context, key):
"""Get hosts with a specific metadata key metadata for all aggregates.
Returns a dictionary where each key is a hostname and each value is a set
of the key values
return value: {machine: set( az1, az2 )}
"""
return IMPL.aggregate_host_get_by_metadata_key(context, key)
def aggregate_update(context, aggregate_id, values):
"""Update the attributes of an aggregates.
If values contains a metadata key, it updates the aggregate metadata too.
"""
return IMPL.aggregate_update(context, aggregate_id, values)
def aggregate_delete(context, aggregate_id):
"""Delete an aggregate."""
return IMPL.aggregate_delete(context, aggregate_id)
def aggregate_get_all(context):
"""Get all aggregates."""
return IMPL.aggregate_get_all(context)
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
"""Add/update metadata. If set_delete=True, it adds only."""
IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete)
def aggregate_metadata_get(context, aggregate_id):
"""Get metadata for the specified aggregate."""
return IMPL.aggregate_metadata_get(context, aggregate_id)
def aggregate_metadata_delete(context, aggregate_id, key):
"""Delete the given metadata key."""
IMPL.aggregate_metadata_delete(context, aggregate_id, key)
def aggregate_host_add(context, aggregate_id, host):
"""Add host to the aggregate."""
IMPL.aggregate_host_add(context, aggregate_id, host)
def aggregate_host_get_all(context, aggregate_id):
"""Get hosts for the specified aggregate."""
return IMPL.aggregate_host_get_all(context, aggregate_id)
def aggregate_host_delete(context, aggregate_id, host):
"""Delete the given host from the aggregate."""
IMPL.aggregate_host_delete(context, aggregate_id, host)
####################
def instance_fault_create(context, values):
"""Create a new Instance Fault."""
return IMPL.instance_fault_create(context, values)
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids)
####################
def action_start(context, values):
"""Start an action for an instance."""
return IMPL.action_start(context, values)
def action_finish(context, values):
"""Finish an action for an instance."""
return IMPL.action_finish(context, values)
def actions_get(context, uuid):
"""Get all instance actions for the provided instance."""
return IMPL.actions_get(context, uuid)
def action_get_by_request_id(context, uuid, request_id):
"""Get the action by request_id and given instance."""
return IMPL.action_get_by_request_id(context, uuid, request_id)
def action_event_start(context, values):
"""Start an event on an instance action."""
return IMPL.action_event_start(context, values)
def action_event_finish(context, values):
"""Finish an event on an instance action."""
return IMPL.action_event_finish(context, values)
def action_events_get(context, action_id):
"""Get the events by action id."""
return IMPL.action_events_get(context, action_id)
def action_event_get_by_id(context, action_id, event_id):
return IMPL.action_event_get_by_id(context, action_id, event_id)
####################
def get_ec2_instance_id_by_uuid(context, instance_id):
"""Get ec2 id through uuid from instance_id_mappings table."""
return IMPL.get_ec2_instance_id_by_uuid(context, instance_id)
def get_instance_uuid_by_ec2_id(context, ec2_id):
"""Get uuid through ec2 id from instance_id_mappings table."""
return IMPL.get_instance_uuid_by_ec2_id(context, ec2_id)
def ec2_instance_create(context, instance_uuid, id=None):
"""Create the ec2 id to instance uuid mapping on demand."""
return IMPL.ec2_instance_create(context, instance_uuid, id)
def ec2_instance_get_by_uuid(context, instance_uuid):
return IMPL.ec2_instance_get_by_uuid(context, instance_uuid)
def ec2_instance_get_by_id(context, instance_id):
return IMPL.ec2_instance_get_by_id(context, instance_id)
####################
def task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message=None):
"""Mark a task as complete for a given host/time period."""
return IMPL.task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message)
def task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items=None,
message=None):
"""Mark a task as started for a given host/time period."""
return IMPL.task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items,
message)
def task_log_get_all(context, task_name, period_beginning,
period_ending, host=None, state=None):
return IMPL.task_log_get_all(context, task_name, period_beginning,
period_ending, host, state)
def task_log_get(context, task_name, period_beginning,
period_ending, host, state=None):
return IMPL.task_log_get(context, task_name, period_beginning,
period_ending, host, state)
####################
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to corresponding shadow
tables.
:returns: number of rows archived.
"""
return IMPL.archive_deleted_rows(context, max_rows=max_rows)
def archive_deleted_rows_for_table(context, tablename, max_rows=None):
"""Move up to max_rows rows from tablename to corresponding shadow
table.
:returns: number of rows archived.
"""
return IMPL.archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows)
| tanglei528/nova | nova/db/api.py | Python | apache-2.0 | 65,070 |
#!/usr/bin/python3
import MySQLdb
import os
import re
db = MySQLdb.connect("etos39.cn.ao.ericsson.se","automation","automation","gerrit_data_new")
# db = MySQLdb.connect("localhost","root","root","work" )
cursor = db.cursor()
cursor.execute('SELECT reviewer_username FROM comments GROUP BY reviewer_username')
usersList = cursor.fetchall()
UM = {}
for users in usersList:
for user in users:
if user != None:
outPut = os.popen('/usr/bin/ldapsearch -x -LLL -D "uid=COVESEOS,ou=Users,ou=Internal,o=ericsson" -w 1qaz\@WSX -b "uid='+user+',ou=Users,ou=Internal,o=ericsson" -h ecd.ericsson.se -p 389|grep eriOperationalManager:|awk \'{print $2}\'','r')
if outPut != None:
try:
param = []
param=(str(user),str(outPut.read()))
rule=re.compile(r'[^a-zA-z]')
username = rule.sub('',str(user))
managername = rule.sub('',param[1])
print(username)
cursor.execute("""INSERT INTO person(username,manager)VALUES(%s,%s)""",(username,managername))
db.commit()
except Exception as e:
print e
db.rollback() | KiviMao/kivi | Script/Show-Comments-story/Get-All-User.py | Python | apache-2.0 | 1,268 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import json
import copy
import time
from collections import defaultdict, OrderedDict
import requests
import _jsonnet # pylint: disable=import-error
LINUX_PRICING_URLS = [
# Deprecated instances (JSON format)
'https://aws.amazon.com/ec2/pricing/json/linux-od.json',
# Previous generation instances (JavaScript file)
'https://a0.awsstatic.com/pricing/1/ec2/previous-generation/linux-od.min.js',
# New generation instances (JavaScript file)
# Using other endpoint atm
# 'https://a0.awsstatic.com/pricing/1/ec2/linux-od.min.js'
]
EC2_REGIONS = [
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2',
'us-gov-west-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'eu-north-1',
'eu-central-1',
'ca-central-1',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'ap-northeast-2',
'ap-south-1',
'sa-east-1',
'cn-north-1',
]
EC2_INSTANCE_TYPES = [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.4xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'i3.large',
'i3.xlarge',
'i3.2xlarge',
'i3.4xlarge',
'i3.8xlarge',
'i3.16large',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
# Maps EC2 region name to region name used in the pricing file
REGION_NAME_MAP = {
'us-east': 'ec2_us_east',
'us-east-1': 'ec2_us_east',
'us-east-2': 'ec2_us_east_ohio',
'us-west': 'ec2_us_west',
'us-west-1': 'ec2_us_west',
'us-west-2': 'ec2_us_west_oregon',
'eu-west-1': 'ec2_eu_west',
'eu-west-2': 'ec2_eu_west_london',
'eu-west-3': 'ec2_eu_west_3',
'eu-ireland': 'ec2_eu_west',
'eu-central-1': 'ec2_eu_central',
'ca-central-1': 'ec2_ca_central_1',
'apac-sin': 'ec2_ap_southeast',
'ap-southeast-1': 'ec2_ap_southeast',
'apac-syd': 'ec2_ap_southeast_2',
'ap-southeast-2': 'ec2_ap_southeast_2',
'apac-tokyo': 'ec2_ap_northeast',
'ap-northeast-1': 'ec2_ap_northeast',
'ap-northeast-2': 'ec2_ap_northeast',
'ap-south-1': 'ec2_ap_south_1',
'sa-east-1': 'ec2_sa_east',
'us-gov-west-1': 'ec2_us_govwest',
'cn-north-1': 'ec2_cn_north',
}
INSTANCE_SIZES = [
'micro',
'small',
'medium',
'large',
'xlarge',
'x-large',
'extra-large'
]
RE_NUMERIC_OTHER = re.compile(r'(?:([0-9]+)|([-A-Z_a-z]+)|([^-0-9A-Z_a-z]+))')
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json')
PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH)
def scrape_ec2_pricing():
result = defaultdict(OrderedDict)
os_map = {'linux': 'ec2_linux', 'windows-std': 'ec2_windows'}
for item in os_map.values():
result[item] = {}
for url in LINUX_PRICING_URLS:
response = requests.get(url)
if re.match(r'.*?\.json$', url):
data = response.json()
print("Sample response: %s..." % (str(data)[:100]))
elif re.match(r'.*?\.js$', url):
data = response.content.decode('utf-8')
print("Sample response: %s..." % (data[:100]))
match = re.match(r'^.*callback\((.*?)\);?$', data,
re.MULTILINE | re.DOTALL)
data = match.group(1)
# NOTE: We used to use demjson, but it's not working under Python 3 and new version of
# setuptools anymore so we use jsonnet
# demjson supports non-strict mode and can parse unquoted objects
data = json.loads(_jsonnet.evaluate_snippet('snippet', data))
regions = data['config']['regions']
for region_data in regions:
region_name = region_data['region']
instance_types = region_data['instanceTypes']
for instance_type in instance_types:
sizes = instance_type['sizes']
for size in sizes:
if not result['ec2_linux'].get(size['size'], False):
result['ec2_linux'][size['size']] = {}
price = size['valueColumns'][0]['prices']['USD']
if str(price).lower() == 'n/a':
# Price not available
continue
result['ec2_linux'][size['size']][
region_name] = float(price)
res = defaultdict(OrderedDict)
url = ('https://calculator.aws/pricing/1.0/'
'ec2/region/{}/ondemand/{}/index.json')
instances = set()
for OS in ['linux', 'windows-std']:
res[os_map[OS]] = {}
for region in EC2_REGIONS:
res[os_map[OS]][region] = {}
full_url = url.format(region, OS)
response = requests.get(full_url)
if response.status_code != 200:
print("Skipping URL %s which returned non 200-status code (%s)" %
(full_url, response.status_code))
continue
data = response.json()
for entry in data['prices']:
instance_type = entry['attributes'].get(
'aws:ec2:instanceType', "")
instances.add(instance_type)
price = entry['price'].get('USD', 0)
res[os_map[OS]][region][instance_type] = price
for item in os_map.values():
for instance in instances:
if not result[item].get(instance, False):
result[item][instance] = {}
for region in EC2_REGIONS:
if res[item][region].get(instance, False):
result[item][instance][region] = float(res[
item][region][instance])
return result
def update_pricing_file(pricing_file_path, pricing_data):
with open(pricing_file_path, 'r') as fp:
content = fp.read()
data = json.loads(content)
original_data = copy.deepcopy(data)
data['compute'].update(pricing_data)
if data == original_data:
# Nothing has changed, bail out early and don't update "updated" attribute
print("Nothing has changed, skipping update.")
return
data['updated'] = int(time.time())
# Always sort the pricing info
data = sort_nested_dict(data)
content = json.dumps(data, indent=4)
lines = content.splitlines()
lines = [line.rstrip() for line in lines]
content = '\n'.join(lines)
with open(pricing_file_path, 'w') as fp:
fp.write(content)
def sort_nested_dict(value):
"""
Recursively sort a nested dict.
"""
result = OrderedDict()
for key, value in sorted(value.items(), key=sort_key_by_numeric_other):
if isinstance(value, (dict, OrderedDict)):
result[key] = sort_nested_dict(value)
else:
result[key] = value
return result
def sort_key_by_numeric_other(key_value):
"""
Split key into numeric, alpha and other part and sort accordingly.
"""
result = []
for (numeric, alpha, other) in RE_NUMERIC_OTHER.findall(key_value[0]):
numeric = int(numeric) if numeric else -1
alpha = INSTANCE_SIZES.index(alpha) if alpha in INSTANCE_SIZES else alpha
alpha = str(alpha)
item = tuple([numeric, alpha, other])
result.append(item)
return tuple(result)
def main():
print('Scraping EC2 pricing data (this may take up to 2 minutes)....')
pricing_data = scrape_ec2_pricing()
update_pricing_file(pricing_file_path=PRICING_FILE_PATH,
pricing_data=pricing_data)
print('Pricing data updated')
if __name__ == '__main__':
main()
| Kami/libcloud | contrib/scrape-ec2-prices.py | Python | apache-2.0 | 9,128 |
import logging
import pika
import sys
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
from .. import rabbitutils
import esgfpid.defaults as defaults
from esgfpid.utils import loginfo, logdebug, logtrace, logerror, logwarn, log_every_x_times
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
'''
The RabbitFeeder is responsible for publishing messages to RabbitMQ.
It is very simple. Basically the only method it exposes
(except for some simple getter/setter which is rarely ever used)
is publish_message(), which is called from the main thread.
'''
class RabbitFeeder(object):
def __init__(self, thread, statemachine, nodemanager):
self.thread = thread
'''
Read-only.
Before publishing a message, we check the state, and we log
the state. '''
self.statemachine = statemachine
self.nodemanager = nodemanager
'''
The deliver_number is important. It defines the number of the message
that is used to identify it between this client and the RabbitMQ
server (e.g. so the correct messages are deleted upon confirmation).
It makes sure that rabbit server and this client talk about the
same message.
NEVER EVER INCREMENT OR OTHERWISE MODIFY THIS!
From the RabbitMQ docs:
"The delivery tag is valid only within the channel from which
the message was received. I.e. a client MUST NOT receive a
message on one channel and then acknowledge it on another."
Source: https://www.rabbitmq.com/amqp-0-9-1-reference.html '''
self.__delivery_number = 1
# Logging
self.__first_publication_trigger = True
self.__logcounter_success = 0 # counts successful publishes!
self.__logcounter_trigger = 0 # counts triggers!
self.__LOGFREQUENCY = 10
self.__have_not_warned_about_connection_fail_yet = True
self.__have_not_warned_about_force_close_yet = True
'''
Triggers the publication of one message to RabbitMQ, if the
state machine currently allows this.
The message is fetched from the Queue of unpublished messages.
So far, whenever the library wants to publish messages, it
fires as many of these "publish_message" events as messages
were published (and some extra, to be sure).
If some of these triggers cannot be acted upon, as the module
is not in a state where it is allowed to publish, the triggers
should be fired as soon as the module is in available state
again.
# TODO: Find better way to make sure enough publish events are fired.
Are we sure there is not ever a way to have some messages
in the unpublished Queue that could be sent, but aren't, because
no event was fired for them? For example, if an exception occurs
during publish, and the message was put back - will there ever
be an event to trigger its publication? I don't think so.
Interim solution (hack):
(a) At the moment, for every message that the publisher hands
over, I fire two events (rabbitthread).
(b) During the close-down algorithm, if there is unpublished
messages, I fire publish events, to make sure they are
published (thread_shutter).
'''
def publish_message(self):
try:
return self.__publish_message()
except Exception as e:
logwarn(LOGGER, 'Error in feeder.publish_message(): %s: %s', e.__class__.__name__, repr(e))
raise e
def __publish_message(self):
self.__logcounter_trigger += 1
if self.statemachine.is_NOT_STARTED_YET() or self.statemachine.is_WAITING_TO_BE_AVAILABLE():
log_every_x_times(LOGGER, self.__logcounter_trigger, self.__LOGFREQUENCY, 'Received early trigger for feeding the rabbit (trigger %i).', self.__logcounter_trigger)
self.__log_why_cannot_feed_the_rabbit_now()
elif self.statemachine.is_AVAILABLE() or self.statemachine.is_AVAILABLE_BUT_WANTS_TO_STOP():
log_every_x_times(LOGGER, self.__logcounter_trigger, self.__LOGFREQUENCY, 'Received trigger for publishing message to RabbitMQ (trigger %i).', self.__logcounter_trigger)
self.__log_publication_trigger()
self.__publish_message_to_channel()
elif self.statemachine.is_PERMANENTLY_UNAVAILABLE() or self.statemachine.is_FORCE_FINISHED():
log_every_x_times(LOGGER, self.__logcounter_trigger, self.__LOGFREQUENCY, 'Received late trigger for feeding the rabbit (trigger %i).', self.__logcounter_trigger)
self.__log_why_cannot_feed_the_rabbit_now()
''' This method only logs. '''
def __log_publication_trigger(self):
if self.__first_publication_trigger:
logdebug(LOGGER, 'Received first trigger for publishing message to RabbitMQ.')
self.__first_publication_trigger = False
logtrace(LOGGER, 'Received trigger for publishing message to RabbitMQ, and module is ready to accept it.')
''' This method only logs, depending on the state machine's state.'''
def __log_why_cannot_feed_the_rabbit_now(self):
log_every_x_times(LOGGER, self.__logcounter_trigger, self.__LOGFREQUENCY, 'Cannot publish message to RabbitMQ (trigger no. %i).', self.__logcounter_trigger)
if self.statemachine.is_WAITING_TO_BE_AVAILABLE():
logdebug(LOGGER, 'Cannot publish message to RabbitMQ yet, as the connection is not ready.')
elif self.statemachine.is_NOT_STARTED_YET():
logerror(LOGGER, 'Cannot publish message to RabbitMQ, as the thread is not running yet.')
elif self.statemachine.is_PERMANENTLY_UNAVAILABLE() or self.statemachine.is_FORCE_FINISHED():
if self.statemachine.detail_could_not_connect:
logtrace(LOGGER, 'Could not publish message to RabbitMQ, as the connection failed.')
if self.__have_not_warned_about_connection_fail_yet:
logwarn(LOGGER, 'Could not publish message(s) to RabbitMQ. The connection failed definitively.')
self.__have_not_warned_about_connection_fail_yet = False
elif self.statemachine.get_detail_closed_by_publisher():
logtrace(LOGGER, 'Cannot publish message to RabbitMQ, as the connection was closed by the user.')
if self.__have_not_warned_about_force_close_yet:
logwarn(LOGGER, 'Could not publish message(s) to RabbitMQ. The sender was closed by the user.')
self.__have_not_warned_about_force_close_yet = False
else:
if self.thread._channel is None:
logerror(LOGGER, 'Very unexpected. Could not publish message(s) to RabbitMQ. There is no channel.')
'''
Retrieves a message from stack and tries to publish it
to RabbitMQ.
In case of failure, it is put back. In case of success,
it is handed on to the confirm module that is responsible
for waiting for RabbitMQ's confirmation.
Note: The publish may cause an error if the Channel was closed.
A closed Channel should be handled in the on_channel_close()
callback, but we catch it here in case the clean up was not quick enough.
'''
def __publish_message_to_channel(self):
# Find a message to publish.
# If no messages left, well, nothing to publish!
try:
message = self.__get_message_from_stack()
except queue.Empty as e:
logtrace(LOGGER, 'Queue empty. No more messages to be published.')
return
# Now try to publish it.
# If anything goes wrong, you need to put it back to
# the stack of unpublished messages!
try:
success = self.__try_publishing_otherwise_put_back_to_stack(message)
if success:
self.__postparations_after_successful_feeding(message)
# Treat various errors that may occur during publishing:
except pika.exceptions.ChannelClosed as e:
logwarn(LOGGER, 'Cannot publish message %i to RabbitMQ because the Channel is closed (%s)', self.__delivery_number+1, repr(e))
except AttributeError as e:
if self.thread._channel is None:
logwarn(LOGGER, 'Cannot publish message %i to RabbitMQ because there is no channel.', self.__delivery_number+1)
else:
logwarn(LOGGER, 'Cannot publish message %i to RabbitMQ (unexpected error %s:%s)', self.__delivery_number+1, e.__class__.__name__, repr(e))
except AssertionError as e:
logwarn(LOGGER, 'Cannot publish message to RabbitMQ %i because of AssertionError: "%s"', self.__delivery_number+1,e)
if 'A non-string value was supplied for self.exchange' in repr(e):
exch = self.thread.get_exchange_name()
logwarn(LOGGER, 'Exchange was "%s" (type %s)', exch, type(exch))
'''
Retrieve an unpublished message from stack.
Note: May block for up to 2 seconds.
:return: A message from the stack of unpublished messages.
:raises: queue.Empty.
'''
def __get_message_from_stack(self, seconds=0):
message = self.thread.get_message_from_unpublished_stack(seconds)
logtrace(LOGGER, 'Found message to be published. Now left in queue to be published: %i messages.', self.thread.get_num_unpublished())
return message
'''
This tries to publish the message and puts it back into the
Queue if it failed.
:param message: Message to be sent.
:raises: pika.exceptions.ChannelClosed, if the Channel is closed.
'''
def __try_publishing_otherwise_put_back_to_stack(self, message):
try:
# Getting message info:
properties = self.nodemanager.get_properties_for_message_publications()
routing_key, msg_string = rabbitutils.get_routing_key_and_string_message_from_message_if_possible(message)
routing_key = self.nodemanager.adapt_routing_key_for_untrusted(routing_key)
# Logging
logtrace(LOGGER, 'Publishing message %i (key %s) (body %s)...', self.__delivery_number+1, routing_key, msg_string) # +1 because it will be incremented after the publish.
log_every_x_times(LOGGER, self.__logcounter_trigger, self.__LOGFREQUENCY, 'Trying actual publish... (trigger no. %i).', self.__logcounter_trigger)
logtrace(LOGGER, '(Publish to channel no. %i).', self.thread._channel.channel_number)
# Actual publish to exchange
self.thread._channel.basic_publish(
exchange=self.thread.get_exchange_name(),
routing_key=routing_key,
body=msg_string,
properties=properties,
mandatory=defaults.RABBIT_MANDATORY_DELIVERY
)
return True
# If anything went wrong, put it back into the stack of
# unpublished messages before re-raising the exception
# for further handling:
except Exception as e:
success = False
logwarn(LOGGER, 'Message was not published. Putting back to queue. Reason: %s: "%s"',e.__class__.__name__, repr(e))
self.thread.put_one_message_into_queue_of_unsent_messages(message)
logtrace(LOGGER, 'Now (after putting back) left in queue to be published: %i messages.', self.thread.get_num_unpublished())
raise e
'''
If a publish was successful, pass it to the confirmer module
and in increment delivery_number for the next message.
'''
def __postparations_after_successful_feeding(self, msg):
# Pass the successfully published message and its delivery_number
# to the confirmer module, to wait for its confirmation.
# Increase the delivery number for the next message.
self.thread.put_to_unconfirmed_delivery_tags(self.__delivery_number)
self.thread.put_to_unconfirmed_messages_dict(self.__delivery_number, msg)
self.__delivery_number += 1
# Logging
self.__logcounter_success += 1
log_every_x_times(LOGGER, self.__logcounter_success, self.__LOGFREQUENCY, 'Actual publish to channel done (trigger no. %i, publish no. %i).', self.__logcounter_trigger, self.__logcounter_success)
logtrace(LOGGER, 'Publishing messages %i to RabbitMQ... done.', self.__delivery_number-1)
if (self.__delivery_number-1 == 1):
loginfo(LOGGER, 'First message published to RabbitMQ.')
logdebug(LOGGER, 'Message published (no. %i)', self.__delivery_number-1)
'''
Reset the delivery_number for the messages.
This must be called on a reconnection / channel reopen!
And may not be called during any other situation!
The number is not sent along to the RabbitMQ server, but
the server keeps track of the delivery number
separately on its side.
That's why it is important to make sure it is incremented
and reset exactly the same way (incremented at each successfully
published message, and reset to one at channel reopen).
(called by the builder during reconnection / channel reopen).
'''
def reset_delivery_number(self):
self.__delivery_number = 1
| IS-ENES-Data/esgf-pid | esgfpid/rabbit/asynchronous/thread_feeder.py | Python | apache-2.0 | 13,289 |
from typing import ClassVar, FrozenSet
from ..config import Config
from .dependency import IngressClassesDependency, SecretDependency, ServiceDependency
from .k8sobject import KubernetesGVK, KubernetesObject
from .k8sprocessor import ManagedKubernetesProcessor
from .resource import NormalizedResource, ResourceManager
class IngressClassProcessor (ManagedKubernetesProcessor):
CONTROLLER: ClassVar[str] = 'getambassador.io/ingress-controller'
ingress_classes_dep: IngressClassesDependency
def __init__(self, manager: ResourceManager) -> None:
super().__init__(manager)
self.ingress_classes_dep = self.deps.provide(IngressClassesDependency)
def kinds(self) -> FrozenSet[KubernetesGVK]:
return frozenset([
KubernetesGVK('networking.k8s.io/v1beta1', 'IngressClass'),
KubernetesGVK('networking.k8s.io/v1', 'IngressClass'),
])
def _process(self, obj: KubernetesObject) -> None:
# We only want to deal with IngressClasses that belong to "spec.controller: getambassador.io/ingress-controller"
if obj.spec.get('controller', '').lower() != self.CONTROLLER:
self.logger.debug(f'ignoring IngressClass {obj.name} without controller - getambassador.io/ingress-controller')
return
if obj.ambassador_id != Config.ambassador_id:
self.logger.debug(f'IngressClass {obj.name} does not have Ambassador ID {Config.ambassador_id}, ignoring...')
return
# TODO: Do we intend to use this parameter in any way?
# `parameters` is of type TypedLocalObjectReference,
# meaning it links to another k8s resource in the same namespace.
# https://godoc.org/k8s.io/api/core/v1#TypedLocalObjectReference
#
# In this case, the resource referenced by TypedLocalObjectReference
# should not be namespaced, as IngressClass is a non-namespaced resource.
#
# It was designed to reference a CRD for this specific ingress-controller
# implementation... although usage is optional and not prescribed.
ingress_parameters = obj.spec.get('parameters', {})
self.logger.debug(f'Handling IngressClass {obj.name} with parameters {ingress_parameters}...')
self.aconf.incr_count('k8s_ingress_class')
# Don't emit this directly. We use it when we handle ingresses below. If
# we want to use the parameters, we should add them to this dependency
# type.
self.ingress_classes_dep.ingress_classes.add(obj.name)
class IngressProcessor (ManagedKubernetesProcessor):
service_dep: ServiceDependency
ingress_classes_dep: IngressClassesDependency
def __init__(self, manager: ResourceManager) -> None:
super().__init__(manager)
self.deps.want(SecretDependency)
self.service_dep = self.deps.want(ServiceDependency)
self.ingress_classes_dep = self.deps.want(IngressClassesDependency)
def kinds(self) -> FrozenSet[KubernetesGVK]:
return frozenset([
KubernetesGVK('extensions/v1beta1', 'Ingress'),
KubernetesGVK('networking.k8s.io/v1beta1', 'Ingress'),
KubernetesGVK('networking.k8s.io/v1', 'Ingress'),
])
def _update_status(self, obj: KubernetesObject) -> None:
service_status = None
if not self.service_dep.ambassador_service or not self.service_dep.ambassador_service.name:
self.logger.error(f"Unable to set Ingress {obj.name}'s load balancer, could not find Ambassador service")
else:
service_status = self.service_dep.ambassador_service.status
if obj.status != service_status:
if service_status:
status_update = (obj.gvk.kind, obj.namespace, service_status)
self.logger.debug(f"Updating Ingress {obj.name} status to {status_update}")
self.aconf.k8s_status_updates[f'{obj.name}.{obj.namespace}'] = status_update
else:
self.logger.debug(f"Not reconciling Ingress {obj.name}: observed and current statuses are in sync")
def _process(self, obj: KubernetesObject) -> None:
ingress_class_name = obj.spec.get('ingressClassName', '')
has_ingress_class = ingress_class_name in self.ingress_classes_dep.ingress_classes
has_ambassador_ingress_class_annotation = obj.annotations.get('kubernetes.io/ingress.class', '').lower() == 'ambassador'
# check the Ingress resource has either:
# - a `kubernetes.io/ingress.class: "ambassador"` annotation
# - a `spec.ingressClassName` that references an IngressClass with
# `spec.controller: getambassador.io/ingress-controller`
#
# also worth noting, the kube-apiserver might assign the `spec.ingressClassName` if unspecified
# and only 1 IngressClass has the following annotation:
# annotations:
# ingressclass.kubernetes.io/is-default-class: "true"
if not (has_ingress_class or has_ambassador_ingress_class_annotation):
self.logger.debug(f'ignoring Ingress {obj.name} without annotation (kubernetes.io/ingress.class: "ambassador") or IngressClass controller (getambassador.io/ingress-controller)')
return
# We don't want to deal with non-matching Ambassador IDs
if obj.ambassador_id != Config.ambassador_id:
self.logger.debug(f"Ingress {obj.name} does not have Ambassador ID {Config.ambassador_id}, ignoring...")
return
self.logger.debug(f"Handling Ingress {obj.name}...")
self.aconf.incr_count('k8s_ingress')
ingress_tls = obj.spec.get('tls', [])
for tls_count, tls in enumerate(ingress_tls):
tls_secret = tls.get('secretName', None)
if tls_secret is not None:
for host_count, host in enumerate(tls.get('hosts', ['*'])):
tls_unique_identifier = f"{obj.name}-{tls_count}-{host_count}"
spec = {
'ambassador_id': [obj.ambassador_id],
'hostname': host,
'acmeProvider': {
'authority': 'none'
},
'tlsSecret': {
'name': tls_secret
},
'requestPolicy': {
'insecure': {
'action': 'Route'
}
}
}
ingress_host = NormalizedResource.from_data(
'Host',
tls_unique_identifier,
namespace=obj.namespace,
labels=obj.labels,
spec=spec,
)
self.logger.debug(f"Generated Host from ingress {obj.name}: {ingress_host}")
self.manager.emit(ingress_host)
# parse ingress.spec.defaultBackend
# using ingress.spec.backend as a fallback, for older versions of the Ingress resource.
default_backend = obj.spec.get('defaultBackend', obj.spec.get('backend', {}))
db_service_name = default_backend.get('serviceName', None)
db_service_port = default_backend.get('servicePort', None)
if db_service_name is not None and db_service_port is not None:
db_mapping_identifier = f"{obj.name}-default-backend"
default_backend_mapping = NormalizedResource.from_data(
'Mapping',
db_mapping_identifier,
namespace=obj.namespace,
labels=obj.labels,
spec={
'ambassador_id': obj.ambassador_id,
'prefix': '/',
'service': f'{db_service_name}.{obj.namespace}:{db_service_port}'
},
)
self.logger.debug(f"Generated mapping from Ingress {obj.name}: {default_backend_mapping}")
self.manager.emit(default_backend_mapping)
# parse ingress.spec.rules
ingress_rules = obj.spec.get('rules', [])
for rule_count, rule in enumerate(ingress_rules):
rule_http = rule.get('http', {})
rule_host = rule.get('host', None)
http_paths = rule_http.get('paths', [])
for path_count, path in enumerate(http_paths):
path_backend = path.get('backend', {})
path_type = path.get('pathType', 'ImplementationSpecific')
service_name = path_backend.get('serviceName', None)
service_port = path_backend.get('servicePort', None)
path_location = path.get('path', '/')
if not service_name or not service_port or not path_location:
continue
unique_suffix = f"{rule_count}-{path_count}"
mapping_identifier = f"{obj.name}-{unique_suffix}"
# For cases where `pathType: Exact`,
# otherwise `Prefix` and `ImplementationSpecific` are handled as regular Mapping prefixes
is_exact_prefix = True if path_type == 'Exact' else False
spec = {
'ambassador_id': obj.ambassador_id,
'prefix': path_location,
'prefix_exact': is_exact_prefix,
'precedence': 1 if is_exact_prefix else 0, # Make sure exact paths are evaluated before prefix
'service': f'{service_name}.{obj.namespace}:{service_port}'
}
if rule_host is not None:
if rule_host.startswith('*.'):
# Ingress allow specifying hosts with a single wildcard as the first label in the hostname.
# Transform the rule_host into a host_regex:
# *.star.com becomes ^[a-z0-9]([-a-z0-9]*[a-z0-9])?\.star\.com$
spec['host'] = rule_host\
.replace('.', '\\.')\
.replace('*', '^[a-z0-9]([-a-z0-9]*[a-z0-9])?', 1) + '$'
spec['host_regex'] = True
else:
spec['host'] = rule_host
path_mapping = NormalizedResource.from_data(
'Mapping',
mapping_identifier,
namespace=obj.namespace,
labels=obj.labels,
spec=spec,
)
self.logger.debug(f"Generated mapping from Ingress {obj.name}: {path_mapping}")
self.manager.emit(path_mapping)
# let's make arrangements to update Ingress' status now
self._update_status(obj)
# Let's see if our Ingress resource has Ambassador annotations on it
self.manager.emit_annotated(NormalizedResource.from_kubernetes_object_annotation(obj))
| datawire/ambassador | python/ambassador/fetch/ingress.py | Python | apache-2.0 | 11,005 |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a samples controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
from gluon.contrib.user_agent_parser import mobilize
import os,sys,types
import string,operator
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib as mpl
import bokeh
#from bokeh.plotting import *
home = os.path.expanduser("~")
datapath = os.path.join(request.folder,'static/results')
from applications.epitopemap.modules.mhcpredict import base, sequtils, tepitope
methods = ['tepitope','netmhciipan','iedbmhc1','bcell']#,'threading'] #'iedbmhc2'
iedbmethods = ['IEDB_recommended','consensus','ann','smm','arb','netmhcpan']
bcellmethods = ['Chou-Fasman', 'Emini', 'Karplus-Schulz',
'Kolaskar-Tongaonkar', 'Parker', 'Bepipred']
colors = {'tepitope':'green','netmhciipan':'orange',
'iedbmhc1':'blue','iedbmhc2':'pink','threading':'purple'}
colormaps={'tepitope':'Greens','netmhciipan':'Oranges','iedbmhc2':'Pinks',
'threading':'Purples','iedbmhc1':'Blues'}
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
"""
if request.user_agent().is_mobile:
response.view.replace('.html','.mobile.html')
form = quicksearch()
return dict(message=T('Menu'),searchform=form)
def register():
return dict(form=auth.register())
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
auth.settings.registration_requires_approval = True
adminmail = '[email protected]'
auth.settings.register_onaccept = lambda form: mail.send(to=adminmail,
subject='New user registered for %s application' % (request.application),
message="new user email is %s" % (form.vars.email))
return dict(form=auth())
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@auth.requires_membership("editor_group")
def list_users():
btn = lambda row: A("Edit", _href=URL('manage_user', args=row.auth_user.id))
db.auth_user.edit = Field.Virtual(btn)
rows = db(db.auth_user).select()
headers = ["ID", "Name", "Last Name", "Email","registration", "Edit"]
fields = ['id', 'first_name', 'last_name', "email", "registration_key", "edit"]
table = TABLE(THEAD(TR(*[B(header) for header in headers])),
TBODY(*[TR(*[TD(row[field]) for field in fields]) \
for row in rows]))
table["_class"] = "table table-striped table-bordered table-condensed"
return dict(table=table)
@auth.requires_membership("editor_group")
def manage_user():
user_id = request.args(0) or redirect(URL('list_users'))
form = SQLFORM(db.auth_user, user_id).process()
membership_panel = LOAD(request.controller,
'manage_membership.html',
args=[user_id],
ajax=True)
return dict(form=form,membership_panel=membership_panel)
@auth.requires_membership("editor_group")
def manage_membership():
user_id = request.args(0) or redirect(URL('list_users'))
db.auth_membership.user_id.default = int(user_id)
db.auth_membership.user_id.writable = False
form = SQLFORM.grid(db.auth_membership.user_id == user_id,
args=[user_id],
searchable=False,
deletable=False,
details=False,
selectable=False,
csv=False,
user_signature=True) # change to True in production
return form
@auth.requires_signature()
def data():
return dict(form=crud())
def mpld3Plot(fig, objects=None):
"""mpld3 html plot from figure"""
import mpld3
html = mpld3.fig_to_html(fig)
htmllabels = []
if objects!=None and len(objects)>0:
bars,labels = zip(*objects)
tooltip = MyToolTip(bars, labels)
plugins.connect(fig, tooltip)
return html
def mplPlot(fig):
"""Convert matplitlib plot to bokeh"""
from bokeh import mpl
plot = mpl.to_bokeh(fig)
return plot
def embedPlot_old(plot):
"""Embed plot method for older versions of bokeh"""
from bokeh.resources import Resources
from bokeh.embed import autoload_static
fp = os.path.join(request.folder,'static/temp/')
fp = os.path.join(fp, plot._id+".js")
res = Resources("relative")
res.js_files = ["../static/js/bokeh.min.js"]
res.css_files = ["../static/css/bokeh.min.css"]
jspath = os.path.join('../static/temp/', plot._id+".js")
js,tag = autoload_static(plot, res, jspath)
with open(fp, "w") as f:
f.write(js)
print
return js,tag
def embedPlot(plot):
"""Embed plot method for new version of bokeh (tested on 0.11)"""
from bokeh.embed import components
script, div = components(plot)
#inject the required bokeh js and css files
response.files.append(URL('static','css/bokeh.min.css'))
response.files.append(URL('static','js/bokeh.min.js'))
response.include_files()
return script, div
def plotRegions(plot, regions=None):
"""Plot regions of interest"""
h=27
y=.5+h/2.0
w=20
colors = {'negative':'#FF3333', 'positive':'#0099FF'}
rv0655 = {'negative':[66,77,171,198,251], 'positive':[231]}
rv3676 = {'negative':[197], 'positive':[42,117,204]}
rv0757 = {'negative':[73,175], 'positive':[125,210]}
rv3584 = {'negative':[72], 'positive':[43,49]}
rv3390 = {'positive':[178,185]}
reg = rv3584
for r in reg:
x = reg[r]
x = [i+w/2 for i in x]
plot.rect(x,y, width=w, height=h,color=colors[r],
line_color='black',alpha=0.4,legend=r)
plot.legend.label_text_font_size = '15pt'
return
def plotAnnotations(plot,annotation):
#print annotation
h=1.8
y=.4+h/2.0
if 'signalp' in annotation:
x = annotation['signalp'].values()
#source = ColumnDataSource(data=dict(x=x,y=y))
plot.rect(x,y, width=.5, height=h,color='purple',line_color='red',alpha=0.7,legend='signalp')
if 'tmhmm' in annotation:
vals = annotation['tmhmm']
x=[i[0]+(i[1]-i[0])/2.0 for i in vals]
w=[i[1]-i[0] for i in vals]
#print x,w,y
plot.rect(x,y, width=w, height=h,color='blue',line_color='blue',alpha=0.6,legend='tmhmm')
if 'pfam27' in annotation:
vals = annotation['pfam27']
#print vals
text = [i[0] for i in vals]
x=[i[1]+(i[2]-i[1])/2.0 for i in vals]
w=[i[2]-i[1] for i in vals]
print x,w,y
plot.rect(x,y, width=w, height=h,color='white',line_color='black',alpha=0.6)
plot.text(x,y, text=text, text_font_size='9pt', angle=0, text_alpha=.8,
text_baseline='middle',text_align='center')
return
def plotBCell(plot,pred,height):
"""Line plot of b cell predictions - no allele stuff"""
x = pred.data.Position
#print pred.data[:20]
#source = ColumnDataSource(data=dict(x=x,y=y))
y=pred.data.Score
h=height
y = y+abs(min(y))
y = y*(h/max(y))+3
plot.line(x, y, line_color="red", line_width=2, alpha=0.6,legend='bcell')
return
def plotTracks(preds,tag,n=3,title=None,width=820,height=None,
seqdepot=None,bcell=None,exp=None):
"""Plot epitopes as parallel tracks"""
from bokeh.models import Range1d,HoverTool,FactorRange,Grid,GridPlot,ColumnDataSource
from bokeh.plotting import Figure
alls=1
if title == None:
title=tag
for m in preds:
alls += len(preds[m].data.groupby('allele'))
if height==None:
height = 130+10*alls
yrange = Range1d(start=0, end=alls+3)
plot = Figure(title=title,title_text_font_size="11pt",plot_width=width,
plot_height=height, y_range=yrange,
y_axis_label='allele',
tools="xpan, xwheel_zoom, resize, hover, reset, save",
background_fill="#FAFAFA",
toolbar_location="below")
h=3
if bcell != None:
plotBCell(plot, bcell, alls)
if seqdepot != None:
plotAnnotations(plot,seqdepot)
if exp is not None:
plotExp(plot, exp)
#plotRegions(plot)
#lists for hover data
#we plot all rects at once
x=[];y=[];allele=[];widths=[];clrs=[];peptide=[]
predictor=[];position=[];score=[];leg=[]
l=80
for m in preds:
pred = preds[m]
cmap = mpl.cm.get_cmap(colormaps[m])
df = pred.data
sckey = pred.scorekey
pb = pred.getPromiscuousBinders(data=df,n=n)
if len(pb) == 0:
continue
l = pred.getLength()
grps = df.groupby('allele')
alleles = grps.groups.keys()
if len(pb)==0:
continue
c=colors[m]
leg.append(m)
for a,g in grps:
b = pred.getBinders(data=g)
b = b[b.pos.isin(pb.pos)] #only promiscuous
b.sort('pos',inplace=True)
scores = b[sckey].values
score.extend(scores)
pos = b['pos'].values
position.extend(pos)
x.extend(pos+(l/2.0)) #offset as coords are rect centers
widths.extend([l for i in scores])
clrs.extend([c for i in scores])
y.extend([h+0.5 for i in scores])
alls = [a for i in scores]
allele.extend(alls)
peptide.extend(list(b.peptide.values))
predictor.extend([m for i in scores])
h+=1
source = ColumnDataSource(data=dict(x=x,y=y,allele=allele,peptide=peptide,
predictor=predictor,position=position,score=score))
plot.rect(x,y, width=widths, height=0.8,
#x_range=Range1d(start=1, end=seqlen+l),
color=clrs,line_color='gray',alpha=0.7,source=source)
hover = plot.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("allele", "@allele"),
("position", "@position"),
("peptide", "@peptide"),
("score", "@score"),
("predictor", "@predictor"),
])
seqlen = pred.data.pos.max()+l
plot.set(x_range=Range1d(start=0, end=seqlen+1))
plot.xaxis.major_label_text_font_size = "8pt"
plot.xaxis.major_label_text_font_style = "bold"
plot.ygrid.grid_line_color = None
plot.yaxis.major_label_text_font_size = '0pt'
plot.xaxis.major_label_orientation = np.pi/4
#js,html = embedPlot(plot)
script, div = embedPlot(plot)
return script, div
#return plot, html
def plotEmpty(width=850):
"""Plot an empty plot"""
from bokeh.models import Range1d
plot = figure(title='',plot_width=width, plot_height=10,
y_range=Range1d(start=1, end=100),
tools="xpan, xwheel_zoom, resize, hover, reset",
background_fill="white")
x=range(100); y=2
rect(x,y, width=1, height=0.8,color='white')
js,html = embedPlot(plot)
print plot
return html
def plots():
"""Use as component to plot predictions for given request"""
print 'plot request'
print request.vars
label = request.vars.label
#if we have no data
if label == 'dummy':
figure = plotEmpty()
return dict(figure=figure)
g = request.vars.genome
tag = request.vars.tag
gene = request.vars.gene
title=None
if gene != None:
t = getTagbyGene(g,gene) #override tag with gene name if provided
if t != None:
tag = t
title = tag+' / '+gene
if request.vars.width == None:
width = 820
else:
width = int(request.vars.width)
if request.vars.height != None:
height = int(request.vars.height)
else:
height = None
if request.vars.n == None:
n=3
else:
n = int(request.vars.n)
if request.vars.perccutoff != None:
perccutoff=float(request.vars.perccutoff)
else:
perccutoff=0.96
preds,bcell,cutoffs = getPredictions(label,g,tag,perccutoff)
if len(preds)==0 or preds==None:
return dict(error=True)
sd=None
if request.vars.annotation == 'on':
feat, fastafmt, previous, next = getFeature(g,tag)
seq = feat['translation']
sd = getSeqDepot(seq)['t']
script, div = plotTracks(preds,tag,n=n,title=title,
width=width,height=height,seqdepot=sd,bcell=bcell)
return dict(script=script,div=div,preds=preds,error=False)
def scoredistplots(preds):
"""Score distribution plots"""
from bokeh.models import Range1d,GridPlot
from bokeh.plotting import Figure
plots=[]
for p in preds:
pred=preds[p]
key=pred.scorekey
data = pred.data[key]
hist, edges = np.histogram(data, density=True, bins=30)
p = Figure(title=p,plot_height=250,tools='')
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
plots.append(p)
plot = GridPlot(children=[plots],title='test')
js,html = embedPlot(plot)
return html
def scoreCorrelations(preds):
figs=[]
for p in preds:
pred=preds[p]
df=pred.data
x = df.pivot_table(index='peptide', columns='allele', values=pred.scorekey)
f=plt.figure()
ax=f.add_subplot(111)
pd.scatter_matrix(x, alpha=0.2, figsize=(12,12), diagonal='hist',ax=ax)
#plt.tight_layout()
figs.append(f)
return figs
def results():
"""Component to show predictions for all peptides for each predictor """
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
preds,bcell,cutoffs = getPredictions(label,g,tag)
summary = summaryhtml(preds)
data = {}
for p in preds:
data[p] = preds[p].reshape()
data = dict(data)
return dict(data=data)
def binders():
"""Component for top binder tables"""
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
n = int(request.vars.n)
preds,bcell,cutoffs = getPredictions(label,g,tag)
summary = summaryhtml(preds)
b = base.getBinders(preds,n=n)
kys = b.keys()
if 'tepitope' in kys and 'netmhciipan' in kys:
shared = pd.merge(b['tepitope'],b['netmhciipan'],
on=['peptide','name','pos','core'],
copy=False).sort('pos')
else:
shared=''
return dict(b=b,summary=summary,shared=shared,n=n)
def showSequence(seq,preds):
"""Get html display of binders on sequences"""
colors = {'tepitope':'#70E2AA','netmhciipan':'orange',
'iedbmhc1':'#9DCEFF','iedbmhc2':'pink','threading':'#BCA9F5'}
l=9 #need to get this from predictors
seqs=[]
tabledata=[]
#idx = ''.join([seq[i] if i%10!=0 else '|' for i in range(len(seq))])
tabledata.append((TR(TH('allele'),TH('sequence'))))
for p in preds:
b = preds[p].getBinders()
clr = colors[p]
#pb = preds[p].getPromsicuousBinders(n=n)
#b = b[b.pos.isin(pb.pos)]
grps = b.groupby('allele')
for a,g in grps:
pos=[]
for i in g.pos: pos.extend(np.arange(i,i+l))
seqhtml=[]
for i in range(len(seq)):
if i in pos:
seqhtml.append(SPAN(seq[i],_style="background-color:%s" %clr))
else:
seqhtml.append(SPAN(seq[i],_style="color: gray"))
tabledata.append((TR(TH(a),TD(*seqhtml))))
table = TABLE(*tabledata,_class="seqtable")
return table
def sequence():
"""Component to highlight epitopes on sequence"""
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
n = int(request.vars.n)
feat, fastafmt, previous, next = getFeature(g,tag)
if feat==None:
return dict(table=None)
seq = feat.qualifiers['translation'][0]
preds,bcell,c = getPredictions(label,g,tag)
table = showSequence(seq,preds)
return dict(table=table)
def feature():
"""Component showing gene annotation"""
g = request.vars.genome
tag = request.vars.tag
items = getFeature(g,tag)
if items != None:
feat, fastafmt, previous, next = items
return dict(fastafmt=fastafmt,feat=feat,
previous=previous,next=next)
return dict()
def iedb():
"""remote iedb tools predcitions"""
g = request.vars.genome
tag = request.vars.tag
feature, fastafmt, previous, next = getFeature(g,tag)
seq = feature.qualifiers['translation'][0]
df = base.getIEDBRequest(seq)
result = XML(df.to_html(classes='mytable'))
return dict(result=result)
def seqdepot(result):
"""Sedepot data table format"""
#print result
kys = result['t'].keys()
tables = {}
for k in kys:
fieldnames = [TH(i) for i in sd.toolFields(k)]
rows = [TR(i) for i in result['t'][k]]
rows.insert(0,TR(*fieldnames))
tables[k] = TABLE(*rows,_class="tinytable")
fp = os.path.join(request.folder,'static/temp/')
filename = os.path.join(fp,tag+'.png')
sd.saveImage(aseqid, filename, {'format':'png'})
imgurl = IMG(_src=URL(r=request,c='static',f='temp/%s' %os.path.basename(filename)))
links = [LI(A(k,_href="#%s" %k)) for k in tables]
tablinks = UL(*links,_class="small-tab-links")
divs=[DIV(tables[k],_id=k,_class="tab") for k in tables]
content = DIV(*divs,_class="tab-content")
tabbedcontent = DIV(tablinks, content,_class="tabs")
return dict(result=result,seq=seq,imgurl=imgurl,tables=tables,
tabbedcontent=tabbedcontent)
def protein():
"""Display protein info from a fixed URL"""
label = request.args[0]
g = request.args[1]
tag = request.args[2]
n = 3
#print g
if g == 'other':
items = (None,None,'','')
else:
items = getFeature(g,tag)
if items != None:
feature, fastafmt, previous, next = items
else:
raise HTTP(404, "No such feature %s available in genome %s" %(tag,g))
return
result = dict(label=label,tag=tag,genome=g,n=n,
previous=previous,next=next)
return result
@auth.requires_login()
def sequences():
"""Allow user to add fasta sequences instead"""
uploadform = FORM(
TABLE(TR(TD(LABEL('Identifier:',_for='name')),
TD(INPUT(_name='name',_type='string',_required=True))),
TR(TD(LABEL('Fasta file:')),TD(INPUT(_name='fastafile',_type='file'))),
TR(TD(LABEL('Description:',_for='description')),
TD(INPUT(_name='description',_type='string',_required=False,
_style="width:400px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform")
if uploadform.accepts(request.vars,formname='upload_form'):
fname = request.vars.fastafile.filename
uploadform.vars.filename = fname
id = db.sequences.insert(name=uploadform.vars.name,
description=uploadform.vars.description,
file=uploadform.vars.fastafile,
filename=uploadform.vars.filename)
db.sequences.id.readable=False
query=((db.sequences.id>0))
default_sort_order=[db.sequences.id]
links=[lambda row: A('browse',_href=URL('fastaview', args=row.name))]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=False, deletable=True, maxtextlength=64, paginate=35,
details=True, csv=False, ondelete=myondelete,
editable=auth.has_membership('editor_group'),links=links)
return dict(grid=grid,form=uploadform)
@auth.requires_login()
def genomes():
"""Display available genomes and allow upload"""
formats = ['genbank']
uploadform = FORM(
TABLE(TR(TD(LABEL('Identifier:',_for='name')),
TD(INPUT(_name='name',_type='string',_required=True))),
TR(TD(LABEL('Format:',_for='format')),
TD(SELECT(formats,_name='format',_type='string',_required=True))),
TR(TD(LABEL('file to upload')),TD(INPUT(_name='gfile',_type='file'))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform")
if uploadform.accepts(request.vars,formname='upload_form'):
fname = request.vars.gfile.filename
uploadform.vars.filename = fname
id = db.genomes.insert(name=uploadform.vars.name,
file=uploadform.vars.gfile,
filename=uploadform.vars.filename,
format=uploadform.vars.format)
db.genomes.id.readable=False
query=((db.genomes.id>0))
default_sort_order=[db.genomes.id]
links=[lambda row: A('browse',_href=URL('genomeview', args=row.name))]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=False, deletable=True, maxtextlength=350, paginate=35,
details=True, csv=False, ondelete=myondelete,
editable=auth.has_membership('editor_group'),links=links)
return dict(grid=grid,form=uploadform)
def genomeview():
"""Summary page for genome"""
g = request.args[0]
if len(request.args) == 1:
gfile = getGenome(g)
data = sequtils.genbank2Dataframe(gfile)
summary = sequtils.genbankSummary(data)
data = data[data.type=='CDS']
data = data.drop(['type','pseudo'],1)
#data=data.set_index('locus_tag')
return dict(genome=g,data=data,summary=summary)
else:
return dict()
def fastaview():
"""Summary of fasta contents"""
f = request.args[0]
if len(request.args) == 1:
ffile,desc = getFasta(f)
pd.set_option('max_colwidth', 800)
data = sequtils.fasta2Dataframe(ffile)
return dict(fastafile=f,data=data,desc=desc)
else:
return dict()
@auth.requires_login()
def presets():
"""Preset alleles form"""
uploadform = FORM(
TABLE(TR(TD(LABEL('Name:',_for='name')),
TD(INPUT(_name='name',_type='string',_required=True))),
TR(TD(LABEL('CSV file:')),TD(INPUT(_name='csvfile',_type='file'))),
TR(TD(LABEL('Description:',_for='description')),
TD(INPUT(_name='description',_type='string',_required=False,
_style="width:400px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform")
if uploadform.accepts(request.vars,formname='upload_form'):
fname = request.vars.csvfile.filename
uploadform.vars.filename = fname
id = db.allelepresets.insert(name=uploadform.vars.name,
description=uploadform.vars.description,
file=uploadform.vars.csvfile,
filename=uploadform.vars.filename)
db.allelepresets.id.readable=False
query=((db.allelepresets.id>0))
default_sort_order=[db.allelepresets.id]
#links=[lambda row: A('browse',_href=URL('fastaview', args=row.name))]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=False, deletable=True, maxtextlength=64, paginate=35,
details=True, csv=False, ondelete=myondelete,
editable=auth.has_membership('editor_group'))#,links=links)
return dict(grid=grid,form=uploadform)
@auth.requires_login()
def predictions():
"""Parse results folder to show the actual data existing on file system
might not sync with the results ids."""
vals=[]
for root, subdirs, files in os.walk(datapath):
if not subdirs:
p1,method = os.path.split(root)
p2,genome = os.path.split(p1)
predid = os.path.basename(p2)
#print method,genome,predid
vals.append((predid, genome, method, len(files)))
df = pd.DataFrame(vals,columns=['identifier','genome','method','sequences'])
#df = df.set_index('pred. id')
db.predictions.id.readable=False
query=((db.predictions.id>0))
default_sort_order=[db.predictions.id]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=True, maxtextlength=350, #deletable=True,
paginate=20,details=True, csv=False, #ondelete=myondelete,
deletable=auth.has_membership('editor_group'),
editable=auth.has_membership('editor_group'))
return dict(results=df, grid=grid)
def myondelete(table, id):
form = FORM.confirm('Are you sure?')
print form
if form.accepted:
response.flash = "I don't like your submission"
print table, id
#db(db.predictions.id==id).delete()
return form
def summaryhtml(predictors):
"""Summary table of predictions"""
rows=[]
rows.append(TR(TH('name'),TH('cutoff'),TH('binders')))
for p in predictors:
pred=predictors[p]
b = pred.getPromiscuousBinders(n=2)
rows.append(TR(pred.name, pred.cutoff, len(b)))
return TABLE(*rows,_class='tinytable')
def download():
import StringIO
label = request.args[0]
g = request.args[1]
t = request.args[2]
preds,bcell,c = getPredictions(label,g,t)
data = [preds[p].data for p in preds]
df = pd.concat(data)
output = StringIO.StringIO()
df.to_csv(output,float_format='%.2f')
csvdata = output.getvalue()
return dict(csvdata=csvdata)
def clusterResults():
"""Cluster results"""
results = {}
files = ['topclusters_MTB-H37Rv.csv','topsinglebinders.csv']
for f in files:
f = os.path.join(datapath, f)
r = pd.read_csv(f, index_col=0)
r.reset_index(inplace=True,drop=True)
r.sort('name',inplace=True)
results[f] = r
return dict(results=results)
def quicksearch():
"""Non DB search just using paths"""
form = SQLFORM.factory(
Field('label',requires=IS_IN_DB(db, 'predictions.identifier',zero=None,
multiple=False),default=1,label='id'),
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('tag', 'string', label='locus tag',default='',length=10),
Field('gene', 'string', label='gene',default='',length=10),
hidden=dict(width=550,height=250,n=2),
formstyle="table3cols",_id='myform')
form.element('input[name=tag]')['_style'] = 'width:210px;'
form.element('input[name=gene]')['_style'] = 'width:210px;'
return form
def selectionForm():
"""Quick view form"""
form = SQLFORM.factory(
Field('label',requires=IS_IN_DB(db, 'predictions.identifier',zero=None,
multiple=False),default=1,label='id'),
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('tag', 'string', label='locus tag',default=''),
Field('gene', 'string', label='gene',default=''),
Field('n', 'string', label='min alleles',default=3),
Field('globalcutoff', 'boolean', label='global cutoff',default=True),
Field('perccutoff', 'string', label='perc. cutoff',default=.96),
Field('annotation', 'boolean', label='annotation',default=False),
submit_button="Update",
formstyle='table3cols',_id='myform',_class='myform')
form.element('select[name=genome]').insert(0,'other') #always add other
form.element('input[name=n]')['_style'] = 'width:50px;'
form.element('input[name=perccutoff]')['_style'] = 'width:50px;'
#form.element('input[name=scorecutoff]')['_style'] = 'width:50px;'
form.element('input[name=tag]')['_style'] = 'width:130px;'
form.element('input[name=gene]')['_style'] = 'width:130px;'
return form
@auth.requires_login()
def quickview():
"""Quickview"""
defaultid = 'results_bovine'
form = selectionForm()
searchform = findForm()
return dict(label=defaultid,form=form,searchform=searchform)
def show():
"""Quickview all results in one - faster"""
print request.vars
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
n = int(request.vars.n)
cutoff = float(request.vars.perccutoff)
gene = request.vars.gene
title=None
if gene != '':
t = getTagbyGene(g,gene)
if t != None:
tag = t
title = tag+' / '+gene
if request.vars.perccutoff == None:
cutoff = 0.96
else:
cutoff = float(request.vars.perccutoff)
if request.vars.width == None:
width = 820
else:
width = int(request.vars.width)
annot = request.vars.annotation
if label == 'dummy':
figure = plotEmpty()
preds,bcell,cutoffs = getPredictions(label,g,tag,cutoff)
if len(preds) == 0:
redirect(URL('error'))
if g == 'other':
#no genome stuff
feat = None; fastafmt=''; previous=''; next=''
seq = '' #get the fasta seq
sd=None
else:
feat = None; fastafmt=None
feat, fastafmt, previous, next = getFeature(g,tag)
seq = feat['translation']
sd=None
if request.vars.annotation == 'on':
sd = getSeqDepot(seq)['t']
script, div = plotTracks(preds,tag,n=n,title=title,width=width,seqdepot=sd,bcell=bcell)
#distplots = scoredistplots(preds)
summary = summaryhtml(preds)
#get all results into tables
data = {}
for p in preds:
data[p] = preds[p].reshape()
data = dict(data)
#top binders
b = base.getBinders(preds,n=n)
kys = b.keys()
if 'tepitope' in kys and 'netmhciipan' in kys:
shared = pd.merge(b['tepitope'],b['netmhciipan'],
on=['peptide','name','pos','core'],
copy=False).sort('pos')
else:
shared=''
seqtable = showSequence(seq,preds)
#info
path = os.path.join(datapath, label)
found = [(m,preds[m].getLength()) for m in preds]
info = TABLE(*found,_class='tinytable')
return dict(script=script,div=div,feat=feat,fastafmt=fastafmt,data=data,
b=b,summary=summary,shared=shared,n=n,seqtable=seqtable,cutoffs=cutoffs,
genome=g,tag=tag,label=label,info=info,path=path)
def error():
return dict()
def formerror():
msg = request.vars.msg
return dict(msg=msg)
@auth.requires_login()
def genomeanalysis():
"""Genome wide analysis of epitope predictions"""
defaultid = 'results_test'
predids = [p.identifier for p in db().select(db.predictions.ALL)]
opts1 = [OPTION(i,value=i) for i in predids]
genomes = [p.name for p in db().select(db.genomes.ALL)]
opts2 = [OPTION(i,value=i) for i in genomes]
form = FORM(TABLE(
TR(TD(LABEL('id:',_for='genome')),
TD(SELECT(*opts1,_name='label',
value=defaultid, _style="width:150px;"))),
TR(TD(LABEL('genome:',_for='genome')),
TD(SELECT(*opts2,_name='genome',value='',_style="width:150px;"))),
TR(TD(LABEL('method:',_for='method')),
TD(SELECT(*methods,_name='method',value='tepitope',_style="width:150px;"))),
TR(TD(LABEL('min alleles:',_for='n')),
TD(INPUT(_name='n',_type='text',value=3,_style="width:50px;"))),
TR(TD(LABEL('perc cutoff:',_for='perccutoff')),
TD(INPUT(_name='perccutoff',_type='text',value='0.96',_style="width:50px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Analyse'))),
_class="smalltable"), _id="myform")
return dict(form=form)
@auth.requires_login()
def analysegenome():
"""Analyse genome predictions"""
pd.set_option('max_colwidth', 800)
gname = request.vars.genome
label = request.vars.label
method = request.vars.method
if request.vars.n != None:
n = int(request.vars.n)
else:
n = 3
if request.vars.perccutoff != None:
cutoff = float(request.vars.perccutoff)
else:
cutoff = 0.96
b,res,top,cl,fig = genomeAnalysis(label, gname, method, n, cutoff)
#plothtml = mpld3Plot(fig)
plothtml=''
link = A('download binder list',_href=URL('default','analysegenome.csv',extension='',vars=request.vars))
summary = 'Found %s binders in >=%s alleles from %s proteins' %(len(b),n,len(res))
return dict(genome=gname,method=method,cutoff=cutoff,res=res,top=top,cl=cl,
summary=summary, link=link, plothtml=plothtml)
def zip_dataframes(data, filename):
"""Zip dataframes as csv"""
'''import cStringIO, zipfile
stream = cStringIO.StringIO()
zip_file = zipfile.ZipFile(stream, "w", zipfile.ZIP_DEFLATED, False)
for df in data:
zip_file.writestr(filename, df.to_csv(None, encoding='utf-8', index=False))'''
return
def compare():
"""Correlate predictions from 2 methods"""
form = SQLFORM.factory(
Field('label',requires=IS_IN_DB(db, 'predictions.identifier',zero=None,
multiple=False),default=1,label='id'),
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('method1',requires=IS_IN_SET(methods,multiple=False,zero=None),label='method 1'),
Field('method2',requires=IS_IN_SET(methods,multiple=False,zero=None),label='method 2'),
Field('n', 'string', label='min alleles',default=3),
hidden=dict(perccutoff=.98),
formstyle="table3cols",_id='myform',_class='myform')
form.element('input[name=n]')['_style'] = 'width:50px;'
return dict(form=form)
def correlationanalysis():
fig=''
msg=None
if request.vars.method1 == request.vars.method2:
return dict(res=None,msg='2 methods are the same!')
print request.vars
res = correlation(**request.vars)
if res is None:
msg = 'no such predictions'
fig = plotCorrelation(res)
return dict(fig=fig,res=res,msg=msg)
def plotCorrelation(res):
from bokeh.models import HoverTool,ColumnDataSource
from bokeh.plotting import Figure
width=600
height=600
plot = Figure(title='',title_text_font_size="11pt",
plot_width=width, plot_height=height,
x_axis_label='method1',y_axis_label='method2',
tools="pan, wheel_zoom, resize, hover, reset, save",
background_fill="#FAFAFA")
x=res['perc_x']
y=res['perc_y']
source = ColumnDataSource(data=dict(x=x,y=y, protein=res.locus_tag))
plot.circle(x,y, color='blue', line_color='gray',fill_alpha=0.5, size=10, source=source)
hover = plot.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("binders1", "@x"),
("binders2", "@y"),
("protein", "@protein"),
])
js,html = embedPlot(plot)
return html
def conservationAnalysisForm(defaultid='test'):
defaultg = 'MTB-H37Rv'
predids = [p.identifier for p in db().select(db.predictions.ALL)]
opts1 = [OPTION(i,value=i) for i in predids]
genomes = [p.name for p in db().select(db.genomes.ALL)]
genomes.insert(0,'other')
opts2 = [OPTION(i,value=i) for i in genomes]
form = FORM(TABLE(
TR(TD(LABEL('id:',_for='genome')),
TD(SELECT(*opts1,_name='label',
value=defaultid, _style="width:150px;"))),
TR(TD(LABEL('genome:',_for='genome')),
TD(SELECT(*opts2,_name='genome',value=defaultg,_style="width:150px;"))),
TR(TD(LABEL('locus tag:',_for='tag')),
TD(INPUT(_name='tag',_type='text',value="Rv0001",_style="width:150px;"))),
TR(TD(LABEL('method:',_for='method')),
TD(SELECT(*methods,_name='method',value='tepitope',_style="width:150px;"))),
TR(TD(LABEL('min alleles:',_for='n')),
TD(INPUT(_name='n',_type='text',value=3,_style="width:50px;"))),
TR(TD(LABEL('min identity:',_for='identity')),
TD(INPUT(_name='identity',value=70,_style="width:50px;"))),
TR(TD(),TD('BLAST options')),
TR(TD(LABEL('entrez query:',_for='entrezquery')),
TD(TEXTAREA(_name='entrezquery',value='',_style="height:100px;width:150px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform", hidden=dict(width=850))
return form
@auth.requires_login()
def conservation():
"""Analysis of epitope conservation"""
form = conservationAnalysisForm()
'''if form.process().accepted:
session.flash = 'form accepted'
pvars = {'seq':seq,'hitlist_size':400,'equery':equery}
task = scheduler.queue_task('doTask', #pvars=request.vars,
immediate=True, timeout=300)
print task.id
status = scheduler.task_status(task.id, output=True)
result = status.result
print status'''
return dict(form=form)
@auth.requires_login()
def conservationanalysis():
"""Analysis of epitope conservation"""
pd.set_option('max_colwidth', 3000)
label = request.vars.label
gname = request.vars.genome
method = request.vars.method
n=int(request.vars.n)
tag = request.vars.tag
identity = int(request.vars.identity)
equery = request.vars.entrezquery
retval = conservationAnalysis(**request.vars)
msg=''
if retval == 1:
msg = 'No predictions found for %s with method %s with n=%s.' %(tag,method,n)
return dict(res=None,msg=msg)
elif retval == 2:
msg = 'No BLAST results at >%s%% sequence identity.' %identity
return dict(res=None,msg=msg)
else:
res, alnrows, summary, fig = retval
alnrows = analysis.getAlignedBlastResults(alnrows)
alnrows = analysis.setBlastLink(alnrows)
plothtml = mpld3Plot(fig)
url = A('direct link to these results', _href=URL('default','conservationanalysis.load',
vars={'label':label,'genome':gname,'tag':tag,'method':method,'n':n,
'identity':identity,'equery':equery},extension=''))
return dict(res=res,alnrows=alnrows,summary=summary,plothtml=plothtml,
msg=msg,permlink=url)
def submissionForm():
"""Form for job submission"""
applySettings() #so that paths to predictors work
predids = [p.identifier for p in db().select(db.predictions.ALL)]
opts1 = [OPTION(i,value=i) for i in predids]
genomes = [p.name for p in db().select(db.genomes.ALL)]
genomes.insert(0,'')
opts2 = [OPTION(i,value=i) for i in genomes]
seqs = [p.name for p in db().select(db.sequences.ALL)]
seqs.insert(0,'')
opts3 = [OPTION(i,value=i) for i in seqs]
p1 = base.getPredictor('iedbmhc1')
mhc1alleles = p1.getMHCIList()
p2 = base.getPredictor('netmhciipan')
mhc2alleles = p2.getAlleleList()
drballeles = base.getDRBList(mhc2alleles)
dqpalleles = base.getDQPList(mhc2alleles)
tepitopealleles = tepitope.getAlleles()
#get all possible alleles for both MHCII methods
drballeles = sorted(list(set(drballeles+tepitopealleles)))
lengths = [9,11,13,15]
#presets = presetalleles.keys()
presets = [p.name for p in db().select(db.allelepresets.ALL)]
presets.insert(0,'')
user = session.auth.user['first_name']
form = FORM(DIV(
TABLE(
TR(TD(LABEL('current labels:',_for='genome')),
TD(SELECT(*opts1,_name='label',
value='', _style="width:200px;"))),
TR(TD(LABEL('OR new label:',_for='genome')),
TD(INPUT(_name='newlabel',_type='text',value="",_style="width:200px;"))),
TR(TD(LABEL('genome:',_for='genome')),
TD(SELECT(*opts2,_name='genome',value='',_style="width:200px;"))),
TR(TD(LABEL('locus tags:',_for='names')),
TD(INPUT(_name='names',_type='text',value="",_style="width:200px;"))),
TR(TD(LABEL('fasta seqs:',_for='fasta')),
TD(SELECT(*opts3,_name='fasta',value='',_style="width:200px;"))),
TR(TD(LABEL('methods:',_for='methods')),
TD(SELECT(*methods,_name='methods',value='tepitope',_size=4,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('mhc1 method:',_for='iedbmethod')),
TD(SELECT(*iedbmethods,_name='iedbmethod',value='IEDB_recommended',_size=1,
_style="width:200px;"))),
TR(TD(LABEL('bcell method:',_for='bcellmethod')),
TD(SELECT(*bcellmethods,_name='bcellmethod',value='Bepipred',_size=1,
_style="width:200px;"))),
TR(TD(LABEL('length:',_for='length')),
TD(SELECT(*lengths,_name='length',value=11,_size=1,_style="width:70px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit Job'))),
_class="smalltable"),_style='float: left'),
DIV(TABLE(
TR(TD(LABEL('MHC-I alleles:',_for='alleles')),
TD(SELECT(*mhc1alleles,_name='mhc1alleles',value='HLA-A*01:01-10',_size=6,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('MHC-II DRB:',_for='alleles')),
TD(SELECT(*drballeles,_name='drballeles',value='HLA-DRB1*0101',_size=8,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('MHC-II DQ/P:',_for='alleles')),
TD(SELECT(*dqpalleles,_name='dqpalleles',value='',_size=6,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('OR Use Preset:',_for='preset')),
TD(SELECT(*presets,_name='preset',value="",_style="width:200px;"))),
_class="smalltable"),_style='float: left'),
_id="myform", hidden=dict(user=user))
return form
@auth.requires_login()
def submit():
"""Process job for submission and queue job"""
form = submissionForm()
if form.process().accepted:
if form.vars.genome == '' and form.vars.fasta == '':
msg = 'provide a genome OR a sequence'
redirect(URL('formerror',vars={'msg':msg}))
session.flash = 'form accepted'
task = scheduler.queue_task('runPredictors', pvars=request.vars,
immediate=True, timeout=259200)
redirect(URL('jobsubmitted', vars={'id':task.id}))
elif form.errors:
response.flash = 'form has errors'
return dict(form=form)
@auth.requires_login()
def jobsubmitted():
"""Get details of a submitted job"""
taskid = int(request.vars['id'])
status = scheduler.task_status(taskid, output=True)
return dict(taskid=taskid,status=status)
def findForm():
"""Find form"""
result={}
form = SQLFORM.factory(
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('gene', 'string', label='gene',default=''),
Field('description', 'string', label='description',default=''),
submit_button="Search",
_id='findform',_class='myform')
form.element('input[name=gene]')['_style'] = 'height:30px;'
form.element('input[name=description]')['_style'] = 'height:30px;'
return form
def search():
"""Search page"""
form = findForm()
return dict(form=form)
def find():
"""Show search results"""
msg = T(" ")
results=pd.DataFrame()
pd.set_option('display.max_colwidth', -1)
gene = request.vars.gene
desc = request.vars.description
genome = request.vars.genome
results = doSearch(genome, gene, desc)
msg = 'found %s proteins' %len(results)
#lst = list(results.index)
link = A('download results',_href=URL('default','find.csv',extension='',vars=request.vars))
return dict(msg=msg,link=link,results=results)
def iedbForm():
dbs = ['iedb','hpv','imma2','hiv_frahm','tcga','tantigen']
types = ['mhc','tcell']
form = SQLFORM.factory(
Field('database', requires=IS_IN_SET(dbs,multiple=False,zero=None),label='database'),
Field('type', requires=IS_IN_SET(types,multiple=False,zero=None),label='type'),
Field('mhc_class', requires=IS_IN_SET([1,2],multiple=False,zero=None), label='mhc_class',default=2),
Field('epitope', 'string', label='epitope'),
submit_button="Search",
_id='iedbform',_class='iedbform')
return form
def datasourcesearch():
"""search IEDB page"""
form = iedbForm()
return dict(form=form)
def datasource():
"""Use pepdata to fetch and search IEDB epitopes"""
print request.vars
db = request.vars.database
epitope = request.vars.epitope
from pepdata import iedb, hpv, imma2, hiv_frahm, tcga, tantigen
if db == 'iedb':
df = iedb.mhc.load_dataframe(mhc_class=2,human=False)
df.columns = df.columns.get_level_values(1)
df = df[df.columns[5:18]]
#df = iedb.tcell.load_dataframe()
#if epitope != '':
# df = df[df['Description'].str.contains(epitope)]
#print df
elif db == 'hpv':
df = hpv.load_mhc()
#df = hpv.load_tcell()
elif db == 'IMMA2':
df, non = imma2.load_classes()
elif db == 'hiv_frahm':
df = hiv_frahm.load_dataframe()
elif db == 'tcga':
df = tcga.load_dataframe(cancer_type='paad')
df = df[:50]
elif db == 'tantigen':
df = tantigen.load_mhc()
#df = tantigen.load_tcell()
if len(df) > 5000:
df = df[:5000]
print df
return dict(results=df)
@auth.requires_login()
def test():
l='human' #'results_emida'
g='MTB-H37Rv'
tag='Rv3874'
feat, fastafmt, previous, next = getFeature(g,tag)
seq = feat['translation']
preds,bcell,c = getPredictions(l,g,tag)
exp = pd.read_csv(os.path.join(home, 'epitopedata/cfp10_regions.csv'))
exp = exp[exp.mean_sfc>0.0]
plot,figure = plotTracks(preds,tag,n=3,title='test',exp=exp)
return dict(figure=figure,exp=exp)
def plotExp(plot, data):
x = data.pos
y = data.mean_sfc
w = 15
h=40
x = [i+w/2.0 for i in x]
y = y+abs(min(y))
y = y*(h/max(y))+3
#plot.line(x, y, line_color="red", line_width=3, alpha=0.6,legend='exp')
plot.rect(x=x, y=1, width=w, height=y, color="blue", alpha=0.3)
return
def bokehtest():
"""Bokeh test"""
from bokeh.models import Range1d, HoverTool, GridPlot, ColumnDataSource
from bokeh.plotting import Figure
#from bokeh.layouts import gridplot
N = 100
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 3
colors = ["#%02x%02x%02x" % (r, g, 150) for r, g in zip(np.floor(50+2*x), np.floor(30+2*y))]
source = ColumnDataSource(data=dict(x=x,y=y,radius=radii))
def makeplot():
p = Figure(plot_width=800, plot_height=200,tools="hover,pan",title=None)
p.scatter(x, y, radius=radii,
fill_color=colors, fill_alpha=0.6,
line_color='gray', source=source)
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("radius", "@radius")])
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
return p
p1 = makeplot()
p2 = makeplot()
p3 = makeplot()
p = GridPlot(children=[[p1],[p2],[p3]])
#js,html = embedPlot(p)
script, div = embedPlot(p)
return dict(div=div,script=script)
@auth.requires_login()
def admin():
"""Settings"""
parser,conffile = getConfig()
options = dict(parser.items('base'))
form = SQLFORM.dictform(options)
if form.process().accepted:
for i in dict(parser.items('base')):
print i
parser.set('base', i, form.vars[i])
parser.write(open(conffile,'w'))
response.flash='Saved'
redirect(URL('default','admin'))
return dict(form=form)
def about():
msg = 'About this page'
#fp = os.path.join(request.folder,'static/docs','about.txt')
return dict(msg=msg)
def citation():
return dict()
def help():
msg = T('')
return dict(msg=msg)
| dmnfarrell/epitopemap | controllers/default.py | Python | apache-2.0 | 49,804 |
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
import argparse, os, ConfigParser, sys, re
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from sandesh_common.vns.constants import SERVICE_ALARM_GENERATOR, \
ServicesDefaultConfigurationFiles
class CfgParser(object):
def __init__(self, argv):
self._devices = []
self._args = None
self.__pat = None
self._argv = argv or ' '.join(sys.argv[1:])
def parse(self):
'''
command line example
contrail-alarm-gen --log_level SYS_DEBUG
--logging_level DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--worker_id 0
--partitions 5
--redis_password
--http_server_port 5995
--redis_server_port 6379
--redis_uve_list 127.0.0.1:6379
--alarmgen_list 127.0.0.1:0
--kafka_broker_list 127.0.0.1:9092
--zk_list 127.0.0.1:2181
--rabbitmq_server_list 127.0.0.1:5672
--conf_file /etc/contrail/contrail-alarm-gen.conf
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file", action="append",
help="Specify config file", metavar="FILE",
default=ServicesDefaultConfigurationFiles.get(
SERVICE_ALARM_GENERATOR, None))
args, remaining_argv = conf_parser.parse_known_args(self._argv.split())
defaults = {
'host_ip' : '127.0.0.1',
'collectors' : [],
'kafka_broker_list' : ['127.0.0.1:9092'],
'log_local' : False,
'log_level' : SandeshLevel.SYS_DEBUG,
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'http_server_port' : 5995,
'worker_id' : '0',
'partitions' : 15,
'zk_list' : None,
'alarmgen_list' : ['127.0.0.1:0'],
'cluster_id' :'',
}
defaults.update(SandeshConfig.get_default_options(['DEFAULTS']))
redis_opts = {
'redis_server_port' : 6379,
'redis_password' : None,
'redis_uve_list' : ['127.0.0.1:6379'],
}
configdb_opts = {
'rabbitmq_server_list': None,
'rabbitmq_port': 5672,
'rabbitmq_user': 'guest',
'rabbitmq_password': 'guest',
'rabbitmq_vhost': None,
'rabbitmq_ha_mode': False,
'rabbitmq_use_ssl': False,
'kombu_ssl_version': '',
'kombu_ssl_keyfile': '',
'kombu_ssl_certfile': '',
'kombu_ssl_ca_certs': '',
'config_db_server_list': None,
'config_db_username': None,
'config_db_password': None
}
sandesh_opts = SandeshConfig.get_default_options()
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.optionxform = str
config.read(args.conf_file)
if 'DEFAULTS' in config.sections():
defaults.update(dict(config.items('DEFAULTS')))
if 'REDIS' in config.sections():
redis_opts.update(dict(config.items('REDIS')))
if 'CONFIGDB' in config.sections():
configdb_opts.update(dict(config.items('CONFIGDB')))
SandeshConfig.update_options(sandesh_opts, config)
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
defaults.update(redis_opts)
defaults.update(configdb_opts)
defaults.update(sandesh_opts)
parser.set_defaults(**defaults)
parser.add_argument("--host_ip",
help="Host IP address")
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument(
"--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--http_server_port", type=int,
help="introspect server port")
parser.add_argument("--worker_id",
help="Worker Id")
parser.add_argument("--partitions", type=int,
help="Number of partitions for hashing UVE keys")
parser.add_argument("--redis_server_port",
type=int,
help="Redis server port")
parser.add_argument("--redis_password",
help="Redis server password")
parser.add_argument("--kafka_broker_list",
help="List of bootstrap kafka brokers in ip:port format",
nargs="+")
parser.add_argument("--zk_list",
help="List of zookeepers in ip:port format",
nargs="+")
parser.add_argument("--rabbitmq_server_list", type=str,
help="List of Rabbitmq server ip address separated by comma")
parser.add_argument("--rabbitmq_port",
help="Rabbitmq server port")
parser.add_argument("--rabbitmq_user",
help="Username for Rabbitmq")
parser.add_argument("--rabbitmq_password",
help="Password for Rabbitmq")
parser.add_argument("--rabbitmq_vhost",
help="vhost for Rabbitmq")
parser.add_argument("--rabbitmq_ha_mode",
action="store_true",
help="True if the rabbitmq cluster is mirroring all queue")
parser.add_argument("--config_db_server_list",
help="List of cassandra servers in ip:port format",
nargs='+')
parser.add_argument("--config_db_username",
help="Cassandra user name")
parser.add_argument("--config_db_password",
help="Cassandra password")
parser.add_argument("--redis_uve_list",
help="List of redis-uve in ip:port format. For internal use only",
nargs="+")
parser.add_argument("--alarmgen_list",
help="List of alarmgens in ip:inst format. For internal use only",
nargs="+")
parser.add_argument("--cluster_id",
help="Analytics Cluster Id")
SandeshConfig.add_parser_arguments(parser)
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
if type(self._args.kafka_broker_list) is str:
self._args.kafka_broker_list= self._args.kafka_broker_list.split()
if type(self._args.zk_list) is str:
self._args.zk_list= self._args.zk_list.split()
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
if type(self._args.alarmgen_list) is str:
self._args.alarmgen_list = self._args.alarmgen_list.split()
if type(self._args.config_db_server_list) is str:
self._args.config_db_server_list = \
self._args.config_db_server_list.split()
self._args.conf_file = args.conf_file
def _pat(self):
if self.__pat is None:
self.__pat = re.compile(', *| +')
return self.__pat
def _mklist(self, s):
return self._pat().split(s)
def redis_uve_list(self):
return self._args.redis_uve_list
def alarmgen_list(self):
return self._args.alarmgen_list
def collectors(self):
return self._args.collectors
def kafka_broker_list(self):
return self._args.kafka_broker_list
def zk_list(self):
return self._args.zk_list;
def log_local(self):
return self._args.log_local
def log_category(self):
return self._args.log_category
def log_level(self):
return self._args.log_level
def log_file(self):
return self._args.log_file
def use_syslog(self):
return self._args.use_syslog
def syslog_facility(self):
return self._args.syslog_facility
def http_port(self):
return self._args.http_server_port
def worker_id(self):
return self._args.worker_id
def partitions(self):
return self._args.partitions
def redis_password(self):
return self._args.redis_password
def redis_server_port(self):
return self._args.redis_server_port
def host_ip(self):
return self._args.host_ip
def kafka_prefix(self):
return self._args.cluster_id
def rabbitmq_params(self):
return {'servers': self._args.rabbitmq_server_list,
'port': self._args.rabbitmq_port,
'user': self._args.rabbitmq_user,
'password': self._args.rabbitmq_password,
'vhost': self._args.rabbitmq_vhost,
'ha_mode': self._args.rabbitmq_ha_mode,
'use_ssl': self._args.rabbitmq_use_ssl,
'ssl_version': self._args.kombu_ssl_version,
'ssl_keyfile': self._args.kombu_ssl_keyfile,
'ssl_certfile': self._args.kombu_ssl_certfile,
'ssl_ca_certs': self._args.kombu_ssl_ca_certs}
def cassandra_params(self):
return {'servers': self._args.config_db_server_list,
'user': self._args.config_db_username,
'password': self._args.config_db_password,
'cluster_id': self._args.cluster_id}
# end cassandra_params
def sandesh_config(self):
return SandeshConfig.from_parser_arguments(self._args)
| nischalsheth/contrail-controller | src/opserver/alarmgen_cfg.py | Python | apache-2.0 | 11,029 |
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import eventlet
import greenlet
from oslo.config import cfg
import six
from logcollector.openstack.common import excutils
from logcollector.openstack.common.gettextutils import _
from logcollector.openstack.common import importutils
from logcollector.openstack.common import jsonutils
from logcollector.openstack.common import log as logging
from logcollector.openstack.common.rpc import amqp as rpc_amqp
from logcollector.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
elif conf.qpid_topology_version == 2:
addr_opts = {
"link": {
"x-declare": {
"auto-delete": True,
"exclusive": False,
},
},
}
else:
raise_invalid_topology_version()
addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the receiver on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = None
else:
raise_invalid_topology_version()
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
link_opts = {"exclusive": True}
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"durable": False, "type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version()
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
node_name = msg_id
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
else:
raise_invalid_topology_version()
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""Init a 'fanout' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
node_opts = {"durable": True}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in six.itervalues(consumers):
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError) as e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
six.next(it)
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
| redhat-cip/openstack-logcollector | openstack-logcollector/openstack/common/rpc/impl_qpid.py | Python | apache-2.0 | 29,688 |
##
# Copyright (c) 2009-2014 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##
"""
Test memcacheprops.
"""
import os
from txweb2.http import HTTPError
from txdav.xml.base import encodeXMLName
from twistedcaldav.memcacheprops import MemcachePropertyCollection
from twistedcaldav.test.util import InMemoryPropertyStore
from twistedcaldav.test.util import TestCase
class StubCollection(object):
def __init__(self, path, childNames):
self.path = path
self.fp = StubFP(path)
self.children = {}
for childName in childNames:
self.children[childName] = StubResource(self, path, childName)
def listChildren(self):
return self.children.iterkeys()
def getChild(self, childName):
return self.children[childName]
def propertyCollection(self):
if not hasattr(self, "_propertyCollection"):
self._propertyCollection = MemcachePropertyCollection(self)
return self._propertyCollection
class StubResource(object):
def __init__(self, parent, path, name):
self.parent = parent
self.fp = StubFP(os.path.join(path, name))
def deadProperties(self):
if not hasattr(self, "_dead_properties"):
self._dead_properties = self.parent.propertyCollection().propertyStoreForChild(self, InMemoryPropertyStore())
return self._dead_properties
class StubFP(object):
def __init__(self, path):
self.path = path
def child(self, childName):
class _Child(object):
def __init__(self, path):
self.path = path
return _Child(os.path.join(self.path, childName))
def basename(self):
return os.path.basename(self.path)
class StubProperty(object):
def __init__(self, ns, name, value=None):
self.ns = ns
self.name = name
self.value = value
def qname(self):
return self.ns, self.name
def __repr__(self):
return "%s = %s" % (encodeXMLName(self.ns, self.name), self.value)
class MemcachePropertyCollectionTestCase(TestCase):
"""
Test MemcacheProprtyCollection
"""
def getColl(self):
return StubCollection("calendars", ["a", "b", "c"])
def test_setget(self):
child1 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val1"))
child2 = self.getColl().getChild("a")
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1")).value,
"val1")
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val2"))
# force memcache to be consulted (once per collection per request)
child1 = self.getColl().getChild("a")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop1")).value,
"val2")
def test_merge(self):
child1 = self.getColl().getChild("a")
child2 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val0"))
child1.deadProperties().set(StubProperty("ns1:", "prop2", value="val0"))
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val0"))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3")).value,
"val0")
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val1"))
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val3"))
# force memcache to be consulted (once per collection per request)
child2 = self.getColl().getChild("a")
# verify properties
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1")).value,
"val1")
self.assertEquals(child2.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child2.deadProperties().get(("ns1:", "prop3")).value,
"val3")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop1")).value,
"val1")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3")).value,
"val3")
def test_delete(self):
child1 = self.getColl().getChild("a")
child2 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val0"))
child1.deadProperties().set(StubProperty("ns1:", "prop2", value="val0"))
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val0"))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3")).value,
"val0")
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val1"))
child1.deadProperties().delete(("ns1:", "prop1"))
self.assertRaises(HTTPError, child1.deadProperties().get, ("ns1:", "prop1"))
self.assertFalse(child1.deadProperties().contains(("ns1:", "prop1")))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3")).value,
"val0")
# force memcache to be consulted (once per collection per request)
child2 = self.getColl().getChild("a")
# verify properties
self.assertFalse(child2.deadProperties().contains(("ns1:", "prop1")))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child2.deadProperties().get(("ns1:", "prop3")).value,
"val0")
def test_setget_uids(self):
for uid in (None, "123", "456"):
child1 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val1%s" % (uid if uid else "",)), uid=uid)
child2 = self.getColl().getChild("a")
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val1%s" % (uid if uid else "",))
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val2%s" % (uid if uid else "",)), uid=uid)
# force memcache to be consulted (once per collection per request)
child1 = self.getColl().getChild("a")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val2%s" % (uid if uid else "",))
def test_merge_uids(self):
for uid in (None, "123", "456"):
child1 = self.getColl().getChild("a")
child2 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val0%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().set(StubProperty("ns1:", "prop2", value="val0%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val0%s" % (uid if uid else "",)), uid=uid)
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val0%s" % (uid if uid else "",))
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val1%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val3%s" % (uid if uid else "",)), uid=uid)
# force memcache to be consulted (once per collection per request)
child2 = self.getColl().getChild("a")
# verify properties
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val1%s" % (uid if uid else "",))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val3%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val1%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val3%s" % (uid if uid else "",))
def test_delete_uids(self):
for uid in (None, "123", "456"):
child1 = self.getColl().getChild("a")
child2 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val0%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().set(StubProperty("ns1:", "prop2", value="val0%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val0%s" % (uid if uid else "",)), uid=uid)
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val0%s" % (uid if uid else "",))
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val1%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().delete(("ns1:", "prop1"), uid=uid)
self.assertRaises(HTTPError, child1.deadProperties().get, ("ns1:", "prop1"), uid=uid)
self.assertFalse(child1.deadProperties().contains(("ns1:", "prop1"), uid=uid))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val0%s" % (uid if uid else "",))
# force memcache to be consulted (once per collection per request)
child2 = self.getColl().getChild("a")
# verify properties
self.assertFalse(child2.deadProperties().contains(("ns1:", "prop1"), uid=uid))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val0%s" % (uid if uid else "",))
def _stub_set_multi(self, values, time=None):
self.callCount += 1
for key, value in values.iteritems():
self.results[key] = value
def test_splitSetMulti(self):
self.callCount = 0
self.results = {}
mpc = MemcachePropertyCollection(None)
values = {}
for i in xrange(600):
values["key%d" % (i,)] = "value%d" % (i,)
mpc._split_set_multi(values, self._stub_set_multi)
self.assertEquals(self.callCount, 3)
self.assertEquals(self.results, values)
def test_splitSetMultiWithChunksize(self):
self.callCount = 0
self.results = {}
mpc = MemcachePropertyCollection(None)
values = {}
for i in xrange(13):
values["key%d" % (i,)] = "value%d" % (i,)
mpc._split_set_multi(values, self._stub_set_multi, chunksize=3)
self.assertEquals(self.callCount, 5)
self.assertEquals(self.results, values)
def _stub_gets_multi(self, keys):
self.callCount += 1
result = {}
for key in keys:
result[key] = self.expected[key]
return result
def test_splitGetsMulti(self):
self.callCount = 0
self.expected = {}
keys = []
for i in xrange(600):
keys.append("key%d" % (i,))
self.expected["key%d" % (i,)] = "value%d" % (i,)
mpc = MemcachePropertyCollection(None)
result = mpc._split_gets_multi(keys, self._stub_gets_multi)
self.assertEquals(self.callCount, 3)
self.assertEquals(self.expected, result)
def test_splitGetsMultiWithChunksize(self):
self.callCount = 0
self.expected = {}
keys = []
for i in xrange(600):
keys.append("key%d" % (i,))
self.expected["key%d" % (i,)] = "value%d" % (i,)
mpc = MemcachePropertyCollection(None)
result = mpc._split_gets_multi(keys, self._stub_gets_multi, chunksize=12)
self.assertEquals(self.callCount, 50)
self.assertEquals(self.expected, result)
| trevor/calendarserver | twistedcaldav/test/test_memcacheprops.py | Python | apache-2.0 | 14,385 |
# -*- coding: utf-8 -*-
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.address import AddressHelper
class Application(object):
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == 'chrome':
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError(f"Unrecognized browser {browser}")
self.wd.implicitly_wait(3)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.address = AddressHelper(self)
self.base_url = base_url
self.open_home_page()
def open_home_page(self):
# open homepage
wd = self.wd
if not (wd.current_url.endswith("/addressbook/") and wd.find_element_by_name("searchstring")):
wd.get(self.base_url)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def destroy(self):
self.wd.quit()
| vpalex999/python_training | fixture/application.py | Python | apache-2.0 | 1,165 |
#!/usr/bin/python
from __future__ import absolute_import, print_function
import argparse
import csv
import os
import re
import sys
try:
from plistlib import load as load_plist # Python 3
from plistlib import dump as dump_plist
except ImportError:
from plistlib import readPlist as load_plist # Python 2
from plistlib import writePlist as dump_plist
def getOptionsString(optionList):
# optionList should be a list item
optionsString = ''
for option in optionList:
if option == optionList[-1]:
optionsString += "\"%s\":\"%s\"" % (str(option.split('=')[0]), str(option.split('=')[1]))
else:
optionsString += "\"%s\":\"%s\"" % (str(option.split('=')[0]), str(option.split('=')[1])) + ', '
return optionsString
parser = argparse.ArgumentParser(description='Generate a Munki nopkg-style pkginfo for printer installation.')
parser.add_argument('--printername', help='Name of printer queue. May not contain spaces, tabs, # or /. Required.')
parser.add_argument('--driver', help='Name of driver file in /Library/Printers/PPDs/Contents/Resources/. Can be relative or full path. Required.')
parser.add_argument('--address', help='IP or DNS address of printer. If no protocol is specified, defaults to lpd://. Required.')
parser.add_argument('--location', help='Location name for printer. Optional. Defaults to printername.')
parser.add_argument('--displayname', help='Display name for printer (and Munki pkginfo). Optional. Defaults to printername.')
parser.add_argument('--desc', help='Description for Munki pkginfo only. Optional.')
parser.add_argument('--requires', help='Required packages in form of space-delimited \'CanonDriver1 CanonDriver2\'. Optional.')
parser.add_argument('--options', nargs='*', dest='options', help='Printer options in form of space-delimited \'Option1=Key Option2=Key Option3=Key\', etc. Optional.')
parser.add_argument('--version', help='Version number of Munki pkginfo. Optional. Defaults to 1.0.', default='1.0')
parser.add_argument('--icon', help='Specifies an existing icon in the Munki repo to display for the printer in Managed Software Center. Optional.')
parser.add_argument('--csv', help='Path to CSV file containing printer info. If CSV is provided, all other options are ignored.')
args = parser.parse_args()
pwd = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(pwd, 'AddPrinter-Template.plist'), 'rb')
templatePlist = load_plist(f)
f.close()
if args.csv:
# A CSV was found, use that for all data.
with open(args.csv, mode='r') as infile:
reader = csv.reader(infile)
next(reader, None) # skip the header row
for row in reader:
newPlist = dict(templatePlist)
# each row contains 10 elements:
# Printer name, location, display name, address, driver, description, options, version, requires, icon
# options in the form of "Option=Value Option2=Value Option3=Value"
# requires in the form of "package1 package2" Note: the space seperator
theOptionString = ''
if row[6] != "":
theOptionString = getOptionsString(row[6].split(" "))
# First, change the plist keys in the pkginfo itself
newPlist['display_name'] = row[2]
newPlist['description'] = row[5]
newPlist['name'] = "AddPrinter_" + str(row[0]) # set to printer name
# Check for an icon
if row[9] != "":
newPlist['icon_name'] = row[9]
# Check for a version number
if row[7] != "":
# Assume the user specified a version number
version = row[7]
else:
# Use the default version of 1.0
version = "1.0"
newPlist['version'] = version
# Check for a protocol listed in the address
if '://' in row[3]:
# Assume the user passed in a full address and protocol
address = row[3]
else:
# Assume the user wants to use the default, lpd://
address = 'lpd://' + row[3]
# Append the driver path to the driver file specified in the csv
driver = '/Library/Printers/PPDs/Contents/Resources/%s' % row[4]
base_driver = row[4]
if row[4].endswith('.gz'):
base_driver = row[4].replace('.gz', '')
if base_driver.endswith('.ppd'):
base_driver = base_driver.replace('.ppd', '')
# Now change the variables in the installcheck_script
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("PRINTERNAME", row[0])
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("OPTIONS", theOptionString)
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("LOCATION", row[1].replace('"', ''))
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("DISPLAY_NAME", row[2].replace('"', ''))
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("ADDRESS", address)
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("DRIVER", base_driver)
# Now change the variables in the postinstall_script
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("PRINTERNAME", row[0])
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("LOCATION", row[1].replace('"', ''))
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("DISPLAY_NAME", row[2].replace('"', ''))
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("ADDRESS", address)
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("DRIVER", driver)
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("OPTIONS", theOptionString)
# Now change the one variable in the uninstall_script
newPlist['uninstall_script'] = newPlist['uninstall_script'].replace("PRINTERNAME", row[0])
# Add required packages if passed in the csv
if row[8] != "":
newPlist['requires'] = row[8].split(' ')
# Write out the file
newFileName = "AddPrinter-" + row[0] + "-" + version + ".pkginfo"
f = open(newFileName, 'wb')
dump_plist(newPlist, f)
f.close()
else:
if not args.printername:
print(os.path.basename(sys.argv[0]) + ': error: argument --printername is required', file=sys.stderr)
parser.print_usage()
sys.exit(1)
if not args.driver:
print(os.path.basename(sys.argv[0]) + ': error: argument --driver is required', file=sys.stderr)
parser.print_usage()
sys.exit(1)
if not args.address:
print(os.path.basename(sys.argv[0]) + ': error: argument --address is required', file=sys.stderr)
parser.print_usage()
sys.exit(1)
if re.search(r"[\s#/]", args.printername):
# printernames can't contain spaces, tabs, # or /. See lpadmin manpage for details.
print("ERROR: Printernames can't contain spaces, tabs, # or /.", file=sys.stderr)
sys.exit(1)
if args.desc:
description = args.desc
else:
description = ""
if args.displayname:
displayName = args.displayname
else:
displayName = str(args.printername)
if args.location:
location = args.location
else:
location = args.printername
if args.version:
version = str(args.version)
else:
version = "1.0"
if args.requires:
requires = args.requires
else:
requires = ""
if args.icon:
icon = args.icon
else:
icon = ""
if args.options:
optionsString = str(args.options[0]).split(' ')
optionsString = getOptionsString(optionsString)
else:
optionsString = ''
if args.driver.startswith('/Library'):
# Assume the user passed in a full path rather than a relative filename
driver = args.driver
else:
# Assume only a relative filename
driver = os.path.join('/Library/Printers/PPDs/Contents/Resources', args.driver)
if '://' in args.address:
# Assume the user passed in a full address and protocol
address = args.address
else:
# Assume the user wants to use the default, lpd://
address = 'lpd://' + args.address
newPlist = dict(templatePlist)
# root pkginfo variable replacement
newPlist['description'] = description
newPlist['display_name'] = displayName
newPlist['name'] = "AddPrinter_" + displayName.replace(" ", "")
newPlist['version'] = version
newPlist['icon_name'] = icon
# installcheck_script variable replacement
newPlist['installcheck_script'] = templatePlist['installcheck_script'].replace("PRINTERNAME", args.printername)
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("ADDRESS", address)
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("DISPLAY_NAME", displayName)
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("LOCATION", location.replace('"', ''))
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("DRIVER", os.path.splitext(os.path.basename(driver))[0].replace('"', ''))
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("OPTIONS", optionsString)
# postinstall_script variable replacement
newPlist['postinstall_script'] = templatePlist['postinstall_script'].replace("PRINTERNAME", args.printername)
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("ADDRESS", address)
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("DISPLAY_NAME", displayName)
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("LOCATION", location.replace('"', ''))
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("DRIVER", driver.replace('"', ''))
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("OPTIONS", optionsString)
# uninstall_script variable replacement
newPlist['uninstall_script'] = templatePlist['uninstall_script'].replace("PRINTERNAME", args.printername)
# required packages
if requires != "":
newPlist['requires'] = [r.replace('\\', '') for r in re.split(r"(?<!\\)\s", requires)]
newFileName = "AddPrinter-" + str(args.printername) + "-%s.pkginfo" % str(version)
f = open(newFileName, 'wb')
dump_plist(newPlist, f)
f.close()
| nmcspadden/PrinterGenerator | print_generator.py | Python | apache-2.0 | 10,809 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import cloudfiles as swift_client
from django import http
from django import test as django_test
from django.conf import settings
from django.contrib.messages.storage import default_storage
from django.core.handlers import wsgi
from django.test.client import RequestFactory
from functools import wraps
from glanceclient.v1 import client as glance_client
from keystoneclient.v2_0 import client as keystone_client
from novaclient.v1_1 import client as nova_client
import quantumclient as quantum_client
import httplib2
import mox
from horizon import api
from horizon import context_processors
from horizon import middleware
from horizon import users
from horizon.tests.test_data.utils import load_test_data
from .time import time
from .time import today
from .time import utcnow
# Makes output of failing mox tests much easier to read.
wsgi.WSGIRequest.__repr__ = lambda self: "<class 'django.http.HttpRequest'>"
def create_stubs(stubs_to_create={}):
if not isinstance(stubs_to_create, dict):
raise TypeError, ("create_stub must be passed a dict, but a %s was " \
"given." % type(stubs_to_create).__name__)
def inner_stub_out(fn):
@wraps(fn)
def instance_stub_out(self):
for key in stubs_to_create:
if not (isinstance(stubs_to_create[key], tuple) or \
isinstance(stubs_to_create[key], list)):
raise TypeError, ("The values of the create_stub " \
"dict must be lists or tuples, but is a %s." %
type(stubs_to_create[key]).__name__)
for value in stubs_to_create[key]:
self.mox.StubOutWithMock(key, value)
return fn(self)
return instance_stub_out
return inner_stub_out
class RequestFactoryWithMessages(RequestFactory):
def get(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).get(*args, **kwargs)
req.session = []
req._messages = default_storage(req)
return req
def post(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).post(*args, **kwargs)
req.session = []
req._messages = default_storage(req)
return req
class TestCase(django_test.TestCase):
"""
Specialized base test case class for Horizon which gives access to
numerous additional features:
* A full suite of test data through various attached objects and
managers (e.g. ``self.servers``, ``self.user``, etc.). See the
docs for :class:`~horizon.tests.test_data.utils.TestData` for more
information.
* The ``mox`` mocking framework via ``self.mox``.
* A set of request context data via ``self.context``.
* A ``RequestFactory`` class which supports Django's ``contrib.messages``
framework via ``self.factory``.
* A ready-to-go request object via ``self.request``.
* The ability to override specific time data controls for easier testing.
* Several handy additional assertion methods.
"""
def setUp(self):
load_test_data(self)
self.mox = mox.Mox()
self.factory = RequestFactoryWithMessages()
self.context = {'authorized_tenants': self.tenants.list()}
def fake_conn_request(*args, **kwargs):
raise Exception("An external URI request tried to escape through "
"an httplib2 client. Args: %s, kwargs: %s"
% (args, kwargs))
self._real_conn_request = httplib2.Http._conn_request
httplib2.Http._conn_request = fake_conn_request
self._real_horizon_context_processor = context_processors.horizon
context_processors.horizon = lambda request: self.context
self._real_get_user_from_request = users.get_user_from_request
tenants = self.context['authorized_tenants']
self.setActiveUser(id=self.user.id,
token=self.token.id,
username=self.user.name,
tenant_id=self.tenant.id,
service_catalog=self.service_catalog,
authorized_tenants=tenants)
self.request = http.HttpRequest()
self.request.session = self.client._session()
self.request.session['token'] = self.token.id
middleware.HorizonMiddleware().process_request(self.request)
def tearDown(self):
self.mox.UnsetStubs()
httplib2.Http._conn_request = self._real_conn_request
context_processors.horizon = self._real_horizon_context_processor
users.get_user_from_request = self._real_get_user_from_request
self.mox.VerifyAll()
def setActiveUser(self, id=None, token=None, username=None, tenant_id=None,
service_catalog=None, tenant_name=None, roles=None,
authorized_tenants=None):
users.get_user_from_request = lambda x: \
users.User(id=id,
token=token,
user=username,
tenant_id=tenant_id,
service_catalog=service_catalog,
roles=roles,
authorized_tenants=authorized_tenants,
request=self.request)
def override_times(self):
""" Overrides the "current" time with immutable values. """
now = datetime.datetime.utcnow()
time.override_time = \
datetime.time(now.hour, now.minute, now.second)
today.override_time = datetime.date(now.year, now.month, now.day)
utcnow.override_time = now
return now
def reset_times(self):
""" Undoes the changes made by ``override_times``. """
time.override_time = None
today.override_time = None
utcnow.override_time = None
def assertRedirectsNoFollow(self, response, expected_url):
"""
Asserts that the given response issued a 302 redirect without
processing the view which is redirected to.
"""
assert (response.status_code / 100 == 3), \
"The response did not return a redirect."
self.assertEqual(response._headers.get('location', None),
('Location', settings.TESTSERVER + expected_url))
self.assertEqual(response.status_code, 302)
def assertNoMessages(self, response=None):
"""
Asserts that no messages have been attached by the ``contrib.messages``
framework.
"""
self.assertMessageCount(response, success=0, warn=0, info=0, error=0)
def assertMessageCount(self, response=None, **kwargs):
"""
Asserts that the specified number of messages have been attached
for various message types. Usage would look like
``self.assertMessageCount(success=1)``.
"""
temp_req = self.client.request(**{'wsgi.input': None})
temp_req.COOKIES = self.client.cookies
storage = default_storage(temp_req)
messages = []
if response is None:
# To gain early access to the messages we have to decode the
# cookie on the test client.
if 'messages' in self.client.cookies:
message_cookie = self.client.cookies['messages'].value
messages = storage._decode(message_cookie)
# Check for messages in the context
elif hasattr(response, "context") and "messages" in response.context:
messages = response.context["messages"]
# Check for messages attached to the request on a TemplateResponse
elif hasattr(response, "_request") and hasattr(response._request,
"_messages"):
messages = response._request._messages._queued_messages
# If we don't have messages and we don't expect messages, we're done.
if not any(kwargs.values()) and not messages:
return
# If we expected messages and have none, that's a problem.
if any(kwargs.values()) and not messages:
error_msg = "Messages were expected, but none were set."
assert 0 == sum(kwargs.values()), error_msg
# Otherwise, make sure we got the expected messages.
for msg_type, count in kwargs.items():
msgs = [m.message for m in messages if msg_type in m.tags]
assert len(msgs) == count, \
"%s messages not as expected: %s" % (msg_type.title(),
", ".join(msgs))
def assertNoFormErrors(self, response, context_name="form"):
"""
Asserts that the response either does not contain a form in it's
context, or that if it does, that form has no errors.
"""
context = getattr(response, "context", {})
if not context or context_name not in context:
return True
errors = response.context[context_name]._errors
assert len(errors) == 0, \
"Unexpected errors were found on the form: %s" % errors
def assertFormErrors(self, response, count=0, message=None,
context_name="form"):
"""
Asserts that the response does contain a form in it's
context, and that form has errors, if count were given,
it must match the exact numbers of errors
"""
context = getattr(response, "context", {})
assert (context and context_name in context), \
"The response did not contain a form."
errors = response.context[context_name]._errors
if count:
assert len(errors) == count, \
"%d errors were found on the form, %d expected" % \
(len(errors), count)
if message and message not in unicode(errors):
self.fail("Expected message not found, instead found: %s"
% ["%s: %s" % (key, [e for e in field_errors]) for
(key, field_errors) in errors.items()])
else:
assert len(errors) > 0, "No errors were found on the form"
class BaseAdminViewTests(TestCase):
"""
A ``TestCase`` subclass which sets an active user with the "admin" role
for testing admin-only views and functionality.
"""
def setActiveUser(self, *args, **kwargs):
if "roles" not in kwargs:
kwargs['roles'] = [self.roles.admin._info]
super(BaseAdminViewTests, self).setActiveUser(*args, **kwargs)
class APITestCase(TestCase):
"""
The ``APITestCase`` class is for use with tests which deal with the
underlying clients rather than stubbing out the horizon.api.* methods.
"""
def setUp(self):
super(APITestCase, self).setUp()
def fake_keystoneclient(request, username=None, password=None,
tenant_id=None, token_id=None, endpoint=None,
admin=False):
"""
Wrapper function which returns the stub keystoneclient. Only
necessary because the function takes too many arguments to
conveniently be a lambda.
"""
return self.stub_keystoneclient()
# Store the original clients
self._original_glanceclient = api.glance.glanceclient
self._original_keystoneclient = api.keystone.keystoneclient
self._original_novaclient = api.nova.novaclient
self._original_quantumclient = api.quantum.quantumclient
# Replace the clients with our stubs.
api.glance.glanceclient = lambda request: self.stub_glanceclient()
api.keystone.keystoneclient = fake_keystoneclient
api.nova.novaclient = lambda request: self.stub_novaclient()
api.quantum.quantumclient = lambda request: self.stub_quantumclient()
def tearDown(self):
super(APITestCase, self).tearDown()
api.glance.glanceclient = self._original_glanceclient
api.nova.novaclient = self._original_novaclient
api.keystone.keystoneclient = self._original_keystoneclient
def stub_novaclient(self):
if not hasattr(self, "novaclient"):
self.mox.StubOutWithMock(nova_client, 'Client')
self.novaclient = self.mox.CreateMock(nova_client.Client)
return self.novaclient
def stub_keystoneclient(self):
if not hasattr(self, "keystoneclient"):
self.mox.StubOutWithMock(keystone_client, 'Client')
self.keystoneclient = self.mox.CreateMock(keystone_client.Client)
return self.keystoneclient
def stub_glanceclient(self):
if not hasattr(self, "glanceclient"):
self.mox.StubOutWithMock(glance_client, 'Client')
self.glanceclient = self.mox.CreateMock(glance_client.Client)
return self.glanceclient
def stub_swiftclient(self, expected_calls=1):
if not hasattr(self, "swiftclient"):
self.mox.StubOutWithMock(swift_client, 'Connection')
self.swiftclient = self.mox.CreateMock(swift_client.Connection)
while expected_calls:
swift_client.Connection(auth=mox.IgnoreArg())\
.AndReturn(self.swiftclient)
expected_calls -= 1
return self.swiftclient
def stub_quantumclient(self):
if not hasattr(self, "quantumclient"):
self.mox.StubOutWithMock(quantum_client, 'Client')
self.quantumclient = self.mox.CreateMock(quantum_client.Client)
return self.quantumclient
| asomya/test | horizon/test.py | Python | apache-2.0 | 14,512 |
#!/usr/bin/env python
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utility functions for dealing with VMDKs and datastores
import os
import os.path
import glob
import re
import logging
import fnmatch
from pyVim import vmconfig
from pyVmomi import vim
import pyVim
from pyVim.invt import GetVmFolder, FindChild
from error_code import *
import threadutils
import vmdk_ops
import auth_data_const
import auth
import auth_api
import log_config
from error_code import *
# datastores should not change during 'vmdkops_admin' run,
# so using global to avoid multiple scans of /vmfs/volumes
datastores = None
# we assume files smaller that that to be descriptor files
MAX_DESCR_SIZE = 5000
# regexp for finding "snapshot" (aka delta disk) descriptor names
SNAP_NAME_REGEXP = r"^.*-[0-9]{6}$" # used for names without .vmdk suffix
SNAP_VMDK_REGEXP = r"^.*-[0-9]{6}\.vmdk$" # used for file names
# regexp for finding 'special' vmdk files (they are created by ESXi)
SPECIAL_FILES_REGEXP = r"\A.*-(delta|ctk|digest|flat)\.vmdk$"
# glob expression to match end of 'delta' (aka snapshots) file names.
SNAP_SUFFIX_GLOB = "-[0-9][0-9][0-9][0-9][0-9][0-9].vmdk"
# regexp for finding datastore path "[datastore] path/to/file.vmdk" from full vmdk path
DATASTORE_PATH_REGEXP = r"^/vmfs/volumes/([^/]+)/(.*\.vmdk)$"
# lsof command
LSOF_CMD = "/bin/vmkvsitools lsof"
# Number of times and sleep time to retry on IOError EBUSY
VMDK_RETRY_COUNT = 5
VMDK_RETRY_SLEEP = 1
# root for all the volumes
VOLUME_ROOT = "/vmfs/volumes/"
# For managing resource locks.
lockManager = threadutils.LockManager()
def init_datastoreCache(force=False):
"""
Initializes the datastore cache with the list of datastores accessible
from local ESX host. force=True will force it to ignore current cache
and force init
"""
with lockManager.get_lock("init_datastoreCache"):
global datastores
logging.debug("init_datastoreCache: %s", datastores)
if datastores and not force:
return
si = vmdk_ops.get_si()
# We are connected to ESX so childEntity[0] is current DC/Host
ds_objects = si.content.rootFolder.childEntity[0].datastoreFolder.childEntity
tmp_ds = []
for datastore in ds_objects:
dockvols_path, err = vmdk_ops.get_vol_path(datastore.info.name)
if err:
logging.error(" datastore %s is being ignored as the dockvol path can't be created on it", datastore.info.name)
continue
tmp_ds.append((datastore.info.name,
datastore.info.url,
dockvols_path))
datastores = tmp_ds
def validate_datastore(datastore):
"""
Checks if the datastore is part of datastoreCache.
If not it will update the datastore cache and check if datastore
is a part of the updated cache.
"""
init_datastoreCache()
if datastore in [i[0] for i in datastores]:
return True
else:
init_datastoreCache(force=True)
if datastore in [i[0] for i in datastores]:
return True
return False
def get_datastores():
"""
Returns a list of (name, url, dockvol_path), with an element per datastore
where:
'name' is datastore name (e.g. 'vsanDatastore') ,
'url' is datastore URL (e.g. '/vmfs/volumes/vsan:572904f8c031435f-3513e0db551fcc82')
'dockvol-path; is a full path to 'dockvols' folder on datastore
"""
init_datastoreCache()
return datastores
def get_volumes(tenant_re):
""" Return dicts of docker volumes, their datastore and their paths
"""
# Assume we have two tenants "tenant1" and "tenant2"
# volumes for "tenant1" are in /vmfs/volumes/datastore1/dockervol/tenant1
# volumes for "tenant2" are in /vmfs/volumes/datastore1/dockervol/tenant2
# volumes does not belongs to any tenants are under /vmfs/volumes/dockervol
# tenant_re = None : only return volumes which do not belong to a tenant
# tenant_re = "tenant1" : only return volumes which belongs to tenant1
# tenant_re = "tenant*" : return volumes which belong to tenant1 or tenant2
# tenant_re = "*" : return all volumes under /vmfs/volumes/datastore1/dockervol
logging.debug("get_volumes: tenant_pattern(%s)", tenant_re)
volumes = []
for (datastore, url, path) in get_datastores():
logging.debug("get_volumes: %s %s %s", datastore, url, path)
if not tenant_re:
for file_name in list_vmdks(path):
# path : docker_vol path
volumes.append({'path': path,
'filename': file_name,
'datastore': datastore})
else:
for root, dirs, files in os.walk(path):
# walkthough all files under docker_vol path
# root is the current directory which is traversing
# root = /vmfs/volumes/datastore1/dockervol/tenant1_uuid
# path = /vmfs/volumes/datastore1/dockervol
# sub_dir get the string "/tenant1_uuid"
# sub_dir_name is "tenant1_uuid"
# call get_tenant_name with "tenant1_uuid" to find corresponding
# tenant_name which will be used to match
# pattern specified by tenant_re
logging.debug("get_volumes: path=%s root=%s", path, root)
sub_dir = root.replace(path, "")
sub_dir_name = sub_dir[1:]
# sub_dir_name is the tenant uuid
error_info, tenant_name = auth_api.get_tenant_name(sub_dir_name)
if not error_info:
logging.debug("get_volumes: path=%s root=%s sub_dir_name=%s tenant_name=%s",
path, root, sub_dir_name, tenant_name)
if fnmatch.fnmatch(tenant_name, tenant_re):
for file_name in list_vmdks(root):
volumes.append({'path': root,
'filename': file_name,
'datastore': datastore,
'tenant': tenant_name})
else:
# cannot find this tenant, this tenant was removed
# mark those volumes created by "orphan" tenant
logging.debug("get_volumes: cannot find tenant_name for tenant_uuid=%s", sub_dir_name)
logging.debug("get_volumes: path=%s root=%s sub_dir_name=%s",
path, root, sub_dir_name)
# return orphan volumes only in case when volumes from any tenants are asked
if tenant_re == "*":
for file_name in list_vmdks(root):
volumes.append({'path': root,
'filename': file_name,
'datastore': datastore,
'tenant' : auth_data_const.ORPHAN_TENANT})
logging.debug("volumes %s", volumes)
return volumes
def get_vmdk_path(path, vol_name):
"""
If the volume-related VMDK exists, returns full path to the latest
VMDK disk in the disk chain, be it volume-NNNNNN.vmdk or volume.vmdk.
If the disk does not exists, returns full path to the disk for create().
"""
# Get a delta disk list, and if it's empty - return the full path for volume
# VMDK base file.
# Note: we rely on NEVER allowing '-NNNNNN' in end of a volume name and on
# the fact that ESXi always creates deltadisks as <name>-NNNNNN.vmdk (N is a
# digit, and there are exactly 6 digits there) for delta disks
#
# see vmdk_ops.py:parse_vol_name() which enforces the volume name rules.
delta_disks = glob.glob("{0}/{1}{2}".format(path, vol_name, SNAP_SUFFIX_GLOB))
if not delta_disks:
return os.path.join(path, "{0}.vmdk".format(vol_name))
# this funky code gets the name of the latest delta disk:
latest = sorted([(vmdk, os.stat(vmdk).st_ctime) for vmdk in delta_disks], key=lambda d: d[1], reverse=True)[0][0]
logging.debug("The latest delta disk is %s. All delta disks: %s", latest, delta_disks)
return latest
def get_datastore_path(vmdk_path):
"""Returns a string datastore path "[datastore] path/to/file.vmdk"
from a full vmdk path.
"""
match = re.search(DATASTORE_PATH_REGEXP, vmdk_path)
datastore, path = match.groups()
return "[{0}] {1}".format(datastore, path)
def get_datastore_from_vmdk_path(vmdk_path):
"""Returns a string representing the datastore from a full vmdk path.
"""
match = re.search(DATASTORE_PATH_REGEXP, vmdk_path)
datastore, path = match.groups()
return datastore
def get_volname_from_vmdk_path(vmdk_path):
"""Returns the volume name from a full vmdk path.
"""
match = re.search(DATASTORE_PATH_REGEXP, vmdk_path)
_, path = match.groups()
vmdk = path.split("/")[-1]
return strip_vmdk_extension(vmdk)
def list_vmdks(path, volname="", show_snapshots=False):
""" Return a list of VMDKs in a given path. Filters out non-descriptor
files and delta disks.
Params:
path - where the VMDKs are looked for
volname - if passed, only files related to this VMDKs will be returned. Useful when
doing volume snapshot inspect
show_snapshots - if set to True, all VMDKs (including delta files) will be returned
"""
# dockvols may not exists on a datastore - this is normal.
if not os.path.exists(path):
return []
logging.debug("list_vmdks: dockvol existed on datastore")
vmdks = [f for f in os.listdir(path) if vmdk_is_a_descriptor(path, f)]
if volname:
vmdks = [f for f in vmdks if f.startswith(volname)]
if not show_snapshots:
expr = re.compile(SNAP_VMDK_REGEXP)
vmdks = [f for f in vmdks if not expr.match(f)]
logging.debug("vmdks %s", vmdks)
return vmdks
def vmdk_is_a_descriptor(path, file_name):
"""
Is the file a vmdk descriptor file? We assume any file that ends in .vmdk,
does not have -delta or -flat or -digest or -ctk at the end of filename,
and has a size less than MAX_DESCR_SIZE is a descriptor file.
"""
name = file_name.lower()
# filter out all files with wrong extention
# also filter out -delta, -flat, -digest and -ctk VMDK files
if not name.endswith('.vmdk') or re.match(SPECIAL_FILES_REGEXP, name):
return False
# Check the size. It's a cheap(ish) way to check for descriptor,
# without actually checking the file content and risking lock conflicts
try:
if os.stat(os.path.join(path, file_name)).st_size > MAX_DESCR_SIZE:
return False
except OSError:
pass # if file does not exist, assume it's small enough
return True
def strip_vmdk_extension(filename):
""" Remove the .vmdk file extension from a string """
return filename.replace(".vmdk", "")
def get_vm_uuid_by_name(vm_name):
""" Returns vm_uuid for given vm_name, or None """
si = vmdk_ops.get_si()
try:
vm = FindChild(GetVmFolder(), vm_name)
return vm.config.uuid
except:
return None
def get_vm_name_by_uuid(vm_uuid):
""" Returns vm_name for given vm_uuid, or None """
si = vmdk_ops.get_si()
try:
return vmdk_ops.vm_uuid2name(vm_uuid)
except:
return None
def get_vm_config_path(vm_name):
"""Returns vm_uuid for given vm_name, or None """
si = vmdk_ops.get_si()
try:
vm = FindChild(GetVmFolder(), vm_name)
config_path = vm.summary.config.vmPathName
except:
return None
# config path has the format like this "[datastore1] test_vm1/test_vm1/test_vm1.vmx"
datastore, path = config_path.split()
datastore = datastore[1:-1]
datastore_path = os.path.join("/vmfs/volumes/", datastore)
# datastore_path has the format like this /vmfs/volumes/datastore_name
vm_config_path = os.path.join(datastore_path, path)
return vm_config_path
def get_attached_volume_path(vm, volname, datastore):
"""
Returns full path for docker volume "volname", residing on "datastore" and attached to "VM"
Files a warning and returns None if the volume is not attached
"""
# Find the attached disk with backing matching "[datastore] dockvols/[.*]/volname[-ddddddd]?.vmdk"
# SInce we don't know the vmgroup (the path after dockvols), we'll just pick the first match (and yell if
# there is more than one match)
# Yes, it is super redundant - we will find VM, scan disks and find a matching one here, then return the path
# and it will likely be used to do the same steps - find VM, scan the disks, etc.. It's a hack and it's a corner
# case, so we'll live with this
# Note that if VM is moved to a different vmgroup in flight, we may fail here and it's fine.
# Note that if there is a volume with the same name in 2 different vmgroup folders and both are attached
# and VM is moved between the groups we may end up returning the wrong volume but not possible, as the user
# would need to change VMgroup in-flight and admin tool would block it when volumes are attached.
if not datastore:
# we rely on datastore always being a part of volume name passed to detach.
# if this contract breaks, or we are called from somewhere else - bail out
logging.error("get_attached_volume_path internal error - empty datastore")
return None
# look for '[datastore] dockvols/tenant/volume.vmdk' name
# and account for delta disks (e.g. volume-000001.vmdk)
prog = re.compile('\[%s\] %s/[^/]+/%s(-[0-9]{6})?\.vmdk$' %
(datastore, vmdk_ops.DOCK_VOLS_DIR, volname))
attached = [d for d in vm.config.hardware.device \
if isinstance(d, vim.VirtualDisk) and \
isinstance(d.backing, vim.VirtualDisk.FlatVer2BackingInfo) and \
prog.match(d.backing.fileName)]
if len(attached) == 0:
logging.error("Can't find device attached to '%s' for volume '%s' on [%s].",
vm.config.name, volname, datastore)
return None
if len(attached) > 1:
logging.warning("More than 1 device attached to '%s' for volume '%s' on [%s].",
vm.config.name, volname, datastore)
path = find_dvs_volume(attached[0])
logging.warning("Found path: %s", path)
return path
def find_dvs_volume(dev):
"""
If the @param dev (type is vim.vm.device) a vDVS managed volume, return its vmdk path
"""
# if device is not a virtual disk, skip this device
if type(dev) != vim.vm.device.VirtualDisk:
return False
# Filename format is as follows:
# "[<datastore name>] <parent-directory>/tenant/<vmdk-descriptor-name>"
# Trim the datastore name and keep disk path.
datastore_name, disk_path = dev.backing.fileName.rsplit("]", 1)
logging.info("backing disk name is %s", disk_path)
# name formatting to remove unwanted characters
datastore_name = datastore_name[1:]
disk_path = disk_path.lstrip()
# find the dockvols dir on current datastore and resolve symlinks if any
dvol_dir_path = os.path.realpath(os.path.join(VOLUME_ROOT,
datastore_name, vmdk_ops.DOCK_VOLS_DIR))
dvol_dir = os.path.basename(dvol_dir_path)
if disk_path.startswith(dvol_dir):
# returning the vmdk path for vDVS volume
return os.path.join(VOLUME_ROOT, datastore_name, disk_path)
return None
def check_volumes_mounted(vm_list):
"""
Return error_info if any vm in @param vm_list have docker volume mounted
"""
for vm_id, _ in vm_list:
vm = vmdk_ops.findVmByUuid(vm_id)
if vm:
for d in vm.config.hardware.device:
if find_dvs_volume(d):
error_info = generate_error_info(ErrorCode.VM_WITH_MOUNTED_VOLUMES,
vm.config.name)
return error_info
else:
error_info = generate_error_info(ErrorCode.VM_NOT_FOUND, vm_id)
return error_info
return None
def log_volume_lsof(vol_name):
"""Log volume open file descriptors"""
rc, out = vmdk_ops.RunCommand(LSOF_CMD)
if rc != 0:
logging.error("Error running lsof for %s: %s", vol_name, out)
return
for line in out.splitlines():
# Make sure we only match the lines pertaining to that volume files.
if re.search(r".*/vmfs/volumes/.*{0}.*".format(vol_name), line):
cartel, name, ftype, fd, desc = line.split()
msg = "cartel={0}, name={1}, type={2}, fd={3}, desc={4}".format(
cartel, name, ftype, fd, desc)
logging.info("Volume open descriptor: %s", msg)
def get_datastore_objects():
""" return all datastore objects """
si = vmdk_ops.get_si()
return si.content.rootFolder.childEntity[0].datastore
def get_datastore_url(datastore_name):
""" return datastore url for given datastore name """
# Return datastore url for datastore name "_VM_DS""
if datastore_name == auth_data_const.VM_DS:
return auth_data_const.VM_DS_URL
# Return datastore url for datastore name "_ALL_DS""
if datastore_name == auth_data_const.ALL_DS:
return auth_data_const.ALL_DS_URL
# validate_datastore will refresh the cache if datastore_name is not in cache
if not validate_datastore(datastore_name):
return None
# Query datastore URL from VIM API
# get_datastores() return a list of tuple
# each tuple has format like (datastore_name, datastore_url, dockvol_path)
res = [d[1] for d in get_datastores() if d[0] == datastore_name]
return res[0]
def get_datastore_name(datastore_url):
""" return datastore name for given datastore url """
# Return datastore name for datastore url "_VM_DS_URL""
if datastore_url == auth_data_const.VM_DS_URL:
return auth_data_const.VM_DS
# Return datastore name for datastore url "_ALL_DS_URL""
if datastore_url == auth_data_const.ALL_DS_URL:
return auth_data_const.ALL_DS
# Query datastore name from VIM API
# get_datastores() return a list of tuple
# each tuple has format like (datastore_name, datastore_url, dockvol_path)
res = [d[0] for d in get_datastores() if d[1] == datastore_url]
logging.debug("get_datastore_name: res=%s", res)
return res[0] if res else None
def get_datastore_url_from_config_path(config_path):
"""Returns datastore url in config_path """
# path can be /vmfs/volumes/<datastore_url_name>/...
# or /vmfs/volumes/datastore_name/...
# so extract datastore_url_name:
config_ds_url = os.path.join("/vmfs/volumes/",
os.path.realpath(config_path).split("/")[3])
logging.debug("get_datastore_url_from_config_path: config_path=%s config_ds_url=%s"
% (config_path, config_ds_url))
return config_ds_url
def main():
log_config.configure()
if __name__ == "__main__":
main()
| shivanshu21/docker-volume-vsphere | esx_service/utils/vmdk_utils.py | Python | apache-2.0 | 19,851 |
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of block device actions to display in the CLI.
"""
from .._actions import PhysicalActions
PHYSICAL_SUBCMDS = [
(
"list",
dict(
help="List information about blockdevs in the pool",
args=[
(
"pool_name",
dict(action="store", default=None, nargs="?", help="Pool name"),
)
],
func=PhysicalActions.list_devices,
),
)
]
| stratis-storage/stratis-cli | src/stratis_cli/_parser/_physical.py | Python | apache-2.0 | 1,060 |
def write(es,body,index,doc_type):
try:
res = es.index(index=index, doc_type=doc_type, body=body)
return res
except Exception, e:
return e
def search(es,body,index,doc_type,size=None):
if size is None:
size=1000
try:
res = es.search(index=index, doc_type=doc_type, body=body, size=size)
return res
except Exception, e:
return None
def update(es,body,index,doc_type,id):
res = es.update(index=index, id=id, doc_type=doc_type, body=body)
return res
def delete(es,index,doc_type,id):
res = es.delete(index=index,doc_type=doc_type,id=id)
return res
def compare(d1,d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
compared = {o : (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
return compared
def consolidate(mac,es,type):
device1={}
deviceQuery = {"query": {"match_phrase": {"mac": { "query": mac }}}}
deviceInfo=search(es, deviceQuery, 'sweet_security', type)
for device in deviceInfo['hits']['hits']:
if len(device1) > 0:
modifiedInfo = compare(device1['_source'],device['_source'])
#usually just two, but we'll keep the oldest one, since that one has probably been modified
if modifiedInfo['firstSeen'][0] < modifiedInfo['firstSeen'][1]:
deleteID=device['_id']
else:
deleteID=device1['_id']
delete(es,'sweet_security',type,deleteID)
device1=device
| TravisFSmith/SweetSecurity | apache/flask/webapp/es.py | Python | apache-2.0 | 1,377 |
Subsets and Splits