repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
LLNL/spack | var/spack/repos/builtin/packages/precice/package.py | 2 | 6339 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Precice(CMakePackage):
"""preCICE (Precise Code Interaction Coupling Environment) is a
coupling library for partitioned multi-physics simulations.
Partitioned means that preCICE couples existing programs (solvers)
capable of simulating a subpart of the complete physics involved in
a simulation."""
homepage = 'https://www.precice.org'
git = 'https://github.com/precice/precice.git'
url = 'https://github.com/precice/precice/archive/v1.2.0.tar.gz'
maintainers = ['fsimonis', 'MakisH']
version('develop', branch='develop')
version('2.0.2', sha256='72864480f32696e7b6da94fd404ef5cd6586e2e1640613e46b75f1afac8569ed')
version('2.0.1', sha256='e4fe2d2063042761ab325f8c802f88ae088c90862af288ad1a642967d074bd50')
version('2.0.0', sha256='c8979d366f06e35626a8da08a1c589df77ec13972eb524a1ba99a011e245701f')
version('1.6.1', sha256='7d0c54faa2c69e52304f36608d93c408629868f16f3201f663a0f9b2008f0763')
version('1.6.0', sha256='c3b16376fda9eb3449adb6cc3c1e267c3dc792a5d118e37d93a32a59b5a4bc6f')
version('1.5.2', sha256='051e0d7655a91f8681901e5c92812e48f33a5779309e2f104c99f5a687e1a418')
version('1.5.1', sha256='fbe151f1a9accf9154362c70d15254935d4f594d189982c3a99fdb3dd9d9e665')
version('1.5.0', sha256='a2a794becd08717e3049252134ae35692fed71966ed32e22cca796a169c16c3e')
version('1.4.1', sha256='dde4882edde17882340f9f601941d110d5976340bd71af54c6e6ea22ae56f1a5')
version('1.4.0', sha256='3499bfc0941fb9f004d5e32eb63d64f93e17b4057fab3ada1cde40c8311bd466')
version('1.3.0', sha256='610322ba1b03df8e8f7d060d57a6a5afeabd5db4e8c4a638d04ba4060a3aec96')
version('1.2.0', sha256='0784ecd002092949835151b90393beb6e9e7a3e9bd78ffd40d18302d6da4b05b')
# Skip version 1.1.1 entirely, the cmake was lacking install.
variant('mpi', default=True, description='Enable MPI support')
variant('petsc', default=True, description='Enable PETSc support')
variant('python', default=False, description='Enable Python support')
variant('shared', default=True, description='Build shared libraries')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build', when='@1.4:')
depends_on('[email protected]:')
depends_on('[email protected]:', when='@1.4:')
depends_on('boost@:1.72.99', when='@:2.0.2')
depends_on('[email protected]:')
depends_on('eigen@:3.3.7', type='build', when='@:1.5') # bug in prettyprint
depends_on('libxml2')
depends_on('mpi', when='+mpi')
depends_on('[email protected]:', when='+petsc')
# Python 3 support was added in version 2.0
depends_on('[email protected]:2.8', when='@:1.9+python', type=('build', 'run'))
depends_on('python@3:', when='@2:+python', type=('build', 'run'))
# numpy 1.17+ requires Python 3
depends_on('py-numpy@:1.16', when='@:1.9+python', type=('build', 'run'))
depends_on('[email protected]:', when='@2:+python', type=('build', 'run'))
# We require C++11 compiler support as well as
# library support for time manipulators (N2071, N2072)
conflicts('%gcc@:4')
conflicts('%clang@:3.7')
conflicts('%intel@:14')
conflicts('%pgi@:14')
def cmake_args(self):
"""Populate cmake arguments for precice."""
spec = self.spec
# The xSDK installation policies were implemented after 1.5.2
xsdk_mode = spec.satisfies("@1.6:")
# Select the correct CMake variables by version
mpi_option = "MPI"
if spec.satisfies("@2:"):
mpi_option = "PRECICE_MPICommunication"
petsc_option = "PETSC"
if spec.satisfies("@2:"):
petsc_option = "PRECICE_PETScMapping"
python_option = "PYTHON"
if spec.satisfies("@2:"):
python_option = "PRECICE_PythonActions"
def variant_bool(feature, on='ON', off='OFF'):
"""Ternary for spec variant to ON/OFF string"""
if feature in spec:
return on
return off
cmake_args = [
'-DBUILD_SHARED_LIBS:BOOL=%s' % variant_bool('+shared'),
]
cmake_args.append('-D%s:BOOL=%s' % (mpi_option, variant_bool('+mpi')))
# Boost
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_BOOST=ON')
cmake_args.append('-DBOOST_ROOT=%s' % spec['boost'].prefix)
# Eigen3
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_EIGEN3=ON')
cmake_args.append(
'-DEIGEN3_INCLUDE_DIR=%s' % spec['eigen'].headers.directories[0])
# LibXML2
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_LIBXML2=ON')
libxml2_includes = spec['libxml2'].headers.directories[0]
cmake_args.extend([
'-DLIBXML2_INCLUDE_DIRS=%s' % libxml2_includes,
'-DLIBXML2_LIBRARIES=%s' % spec['libxml2'].libs[0],
])
# PETSc
if '+petsc' in spec:
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_PETSC:BOOL=ON')
else:
cmake_args.append('-D%s:BOOL=ON' % petsc_option)
cmake_args.extend([
'-DPETSC_DIR=%s' % spec['petsc'].prefix,
'-DPETSC_ARCH=.'
])
else:
cmake_args.append('-D%s:BOOL=OFF' % petsc_option)
# Python
if '+python' in spec:
python_library = spec['python'].libs[0]
python_include = spec['python'].headers.directories[0]
numpy_include = join_path(
spec['py-numpy'].prefix,
spec['python'].package.site_packages_dir,
'numpy', 'core', 'include')
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_PYTHON:BOOL=ON')
else:
cmake_args.append('-D%s:BOOL=ON' % python_option)
cmake_args.extend([
'-DPYTHON_INCLUDE_DIR=%s' % python_include,
'-DNumPy_INCLUDE_DIR=%s' % numpy_include,
'-DPYTHON_LIBRARY=%s' % python_library
])
else:
cmake_args.append('-D%s:BOOL=OFF' % python_option)
return cmake_args
| lgpl-2.1 | -7,378,302,579,113,582,000 | 41.26 | 95 | 0.62218 | false |
fdemian/Morpheus | api/routes/Alerts.py | 1 | 4990 | import json
from api.model.sessionHelper import get_session
from api.model.models import Notification
from api.authentication.AuthenticatedHandler import AuthenticatedHandler
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from tornado.gen import coroutine
from api.Utils import authenticated
class AlertsHandler(AuthenticatedHandler):
def data_received(self, chunk):
pass
# GET /alerts
@authenticated
def get(self):
if not self.settings['notifications_enabled']:
response = {'message': "Notifications disabled."}
self.set_status(501, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
session_object = get_session()
session = session_object()
current_user = self.current_user
alerts = session.query(Notification).filter(Notification.user_id == current_user, Notification.read == False)\
.order_by(Notification.id.desc())\
.all()
data = []
for notification in alerts:
json_notification = {
'id': notification.id,
'type': notification.type,
'text': notification.text,
'link': notification.link,
'read': notification.read
}
data.append(json_notification)
response = {"notifications": data}
self.set_status(200, 'Ok ')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
# TODO: change to POST method?
# -- REASON: Successful response returns a body (and shouldn't).
# -- Otherwise the method is same as PUT.
# PUT /alerts
@authenticated
def put(self):
request = self.request.body.decode("utf-8")
json_request = json.loads(request)
session_object = get_session()
session = session_object()
try:
notification_id = json_request["id"]
notification = session.query(Notification).filter(Notification.id == notification_id).one()
# Modify all the fields.
notification.type = json_request["type"]
notification.text = json_request["text"]
notification.link = json_request["link"]
notification.read = json_request["read"]
session.commit()
status = 200
status_str = 'Ok'
response = {'id': notification_id}
except NoResultFound:
status = 500
status_str = "Error"
response = {'message': 'No notifications with the id' + notification_id + 'found.'}
except MultipleResultsFound:
status = 500
status_str = "Error"
response = {'message': 'More than one notification with the id' + notification_id + ' was found.'}
self.set_header("Content-Type", "application/jsonp;charset=UTF-8")
self.set_header("Access-Control-Allow-Origin", "*")
self.set_status(status, status_str)
self.write(response)
return
@coroutine
def post(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def delete(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def trace(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def connect(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def options(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def patch(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
@coroutine
def head(self):
response = {"message": "This is not a valid method for this resource."}
self.set_status(405, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
| bsd-2-clause | 4,065,950,719,263,779,000 | 29.426829 | 118 | 0.589379 | false |
solashirai/edx-platform | cms/djangoapps/contentstore/views/course.py | 1 | 70092 | """
Views related to operations on course objects
"""
import copy
import json
import logging
import random
import string # pylint: disable=deprecated-module
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, Http404
from django.shortcuts import redirect
import django.utils
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods, require_GET
from django.views.decorators.csrf import ensure_csrf_cookie
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import Location
from .component import (
ADVANCED_COMPONENT_TYPES,
)
from .item import create_xblock_info
from .library import LIBRARIES_ENABLED
from contentstore import utils
from contentstore.course_group_config import (
COHORT_SCHEME,
GroupConfiguration,
GroupConfigurationsValidationError,
RANDOM_SCHEME,
)
from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
from contentstore.push_notification import push_notification_enabled
from contentstore.tasks import rerun_course
from contentstore.utils import (
add_instructor,
initialize_permissions,
get_lms_link_for_item,
remove_all_instructors,
reverse_course_url,
reverse_library_url,
reverse_usage_url,
reverse_url,
)
from contentstore.views.entrance_exam import (
create_entrance_exam,
delete_entrance_exam,
update_entrance_exam,
)
from course_action_state.managers import CourseActionStateItemNotFoundError
from course_action_state.models import CourseRerunState, CourseRerunUIStateManager
from course_creators.views import get_course_creator_status, add_user_with_status_unrequested
from edxmako.shortcuts import render_to_response
from microsite_configuration import microsite
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from models.settings.encoder import CourseSettingsEncoder
from openedx.core.djangoapps.content.course_structures.api.v0 import api, errors
from openedx.core.djangoapps.credit.api import is_credit_course, get_credit_requirements
from openedx.core.djangoapps.credit.tasks import update_credit_course_requirements
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.programs.utils import get_programs
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.lib.course_tabs import CourseTabPluginManager
from openedx.core.lib.courses import course_image_url
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from student import auth
from student.auth import has_course_author_access, has_studio_write_access, has_studio_read_access
from student.roles import (
CourseInstructorRole, CourseStaffRole, CourseCreatorRole, GlobalStaff, UserBasedRole
)
from util.date_utils import get_default_time_display
from util.json_request import JsonResponse, JsonResponseBadRequest, expect_json
from util.milestones_helpers import (
is_entrance_exams_enabled,
is_prerequisite_courses_enabled,
is_valid_course_key,
set_prerequisite_courses,
)
from util.organizations_helpers import (
add_organization_course,
get_organization_by_short_name,
organizations_enabled,
)
from util.string_utils import _has_non_ascii_characters
from util.course_key_utils import from_string_or_404
from xmodule.contentstore.content import StaticContent
from xmodule.course_module import CourseFields
from xmodule.course_module import DEFAULT_START_DATE
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError
from xmodule.tabs import CourseTab, CourseTabList, InvalidTabsException
log = logging.getLogger(__name__)
__all__ = ['course_info_handler', 'course_handler', 'course_listing',
'course_info_update_handler', 'course_search_index_handler',
'course_rerun_handler',
'settings_handler',
'grading_handler',
'advanced_settings_handler',
'course_notifications_handler',
'textbooks_list_handler', 'textbooks_detail_handler',
'group_configurations_list_handler', 'group_configurations_detail_handler']
class AccessListFallback(Exception):
"""
An exception that is raised whenever we need to `fall back` to fetching *all* courses
available to a user, rather than using a shorter method (i.e. fetching by group)
"""
pass
def get_course_and_check_access(course_key, user, depth=0):
"""
Internal method used to calculate and return the locator and course module
for the view functions in this file.
"""
if not has_studio_read_access(user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key, depth=depth)
return course_module
def reindex_course_and_check_access(course_key, user):
"""
Internal method used to restart indexing on a course.
"""
if not has_course_author_access(user, course_key):
raise PermissionDenied()
return CoursewareSearchIndexer.do_course_reindex(modulestore(), course_key)
@login_required
def course_notifications_handler(request, course_key_string=None, action_state_id=None):
"""
Handle incoming requests for notifications in a RESTful way.
course_key_string and action_state_id must both be set; else a HttpBadResponseRequest is returned.
For each of these operations, the requesting user must have access to the course;
else a PermissionDenied error is returned.
GET
json: return json representing information about the notification (action, state, etc)
DELETE
json: return json repressing success or failure of dismissal/deletion of the notification
PUT
Raises a NotImplementedError.
POST
Raises a NotImplementedError.
"""
# ensure that we have a course and an action state
if not course_key_string or not action_state_id:
return HttpResponseBadRequest()
response_format = request.GET.get('format') or request.POST.get('format') or 'html'
course_key = CourseKey.from_string(course_key_string)
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if not has_studio_write_access(request.user, course_key):
raise PermissionDenied()
if request.method == 'GET':
return _course_notifications_json_get(action_state_id)
elif request.method == 'DELETE':
# we assume any delete requests dismiss actions from the UI
return _dismiss_notification(request, action_state_id)
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'POST':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
else:
return HttpResponseNotFound()
def _course_notifications_json_get(course_action_state_id):
"""
Return the action and the action state for the given id
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
return HttpResponseBadRequest()
action_state_info = {
'action': action_state.action,
'state': action_state.state,
'should_display': action_state.should_display
}
return JsonResponse(action_state_info)
def _dismiss_notification(request, course_action_state_id): # pylint: disable=unused-argument
"""
Update the display of the course notification
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
# Can't dismiss a notification that doesn't exist in the first place
return HttpResponseBadRequest()
if action_state.state == CourseRerunUIStateManager.State.FAILED:
# We remove all permissions for this course key at this time, since
# no further access is required to a course that failed to be created.
remove_all_instructors(action_state.course_key)
# The CourseRerunState is no longer needed by the UI; delete
action_state.delete()
return JsonResponse({'success': True})
# pylint: disable=unused-argument
@login_required
def course_handler(request, course_key_string=None):
"""
The restful handler for course specific requests.
It provides the course tree with the necessary information for identifying and labeling the parts. The root
will typically be a 'course' object but may not be especially as we support modules.
GET
html: return course listing page if not given a course id
html: return html page overview for the given course if given a course id
json: return json representing the course branch's index entry as well as dag w/ all of the children
replaced w/ json docs where each doc has {'_id': , 'display_name': , 'children': }
POST
json: create a course, return resulting json
descriptor (same as in GET course/...). Leaving off /branch/draft would imply create the course w/ default
branches. Cannot change the structure contents ('_id', 'display_name', 'children') but can change the
index entry.
PUT
json: update this course (index entry not xblock) such as repointing head, changing display name, org,
course, run. Return same json as above.
DELETE
json: delete this branch from this course (leaving off /branch/draft would imply delete the course)
"""
try:
response_format = request.GET.get('format') or request.POST.get('format') or 'html'
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
return JsonResponse(_course_outline_json(request, course_module))
elif request.method == 'POST': # not sure if this is only post. If one will have ids, it goes after access
return _create_or_rerun_course(request)
elif not has_studio_write_access(request.user, CourseKey.from_string(course_key_string)):
raise PermissionDenied()
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'DELETE':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
elif request.method == 'GET': # assume html
if course_key_string is None:
return redirect(reverse("home"))
else:
return course_index(request, CourseKey.from_string(course_key_string))
else:
return HttpResponseNotFound()
except InvalidKeyError:
raise Http404
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_rerun_handler(request, course_key_string):
"""
The restful handler for course reruns.
GET
html: return html page with form to rerun a course for the given course id
"""
# Only global staff (PMs) are able to rerun courses during the soft launch
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=3)
if request.method == 'GET':
return render_to_response('course-create-rerun.html', {
'source_course_key': course_key,
'display_name': course_module.display_name,
'user': request.user,
'course_creator_status': _get_course_creator_status(request.user),
'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False)
})
@login_required
@ensure_csrf_cookie
@require_GET
def course_search_index_handler(request, course_key_string):
"""
The restful handler for course indexing.
GET
html: return status of indexing task
json: return status of indexing task
"""
# Only global staff (PMs) are able to index courses
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
content_type = request.META.get('CONTENT_TYPE', None)
if content_type is None:
content_type = "application/json; charset=utf-8"
with modulestore().bulk_operations(course_key):
try:
reindex_course_and_check_access(course_key, request.user)
except SearchIndexingError as search_err:
return HttpResponse(dump_js_escaped_json({
"user_message": search_err.error_list
}), content_type=content_type, status=500)
return HttpResponse(dump_js_escaped_json({
"user_message": _("Course has been successfully reindexed.")
}), content_type=content_type, status=200)
def _course_outline_json(request, course_module):
"""
Returns a JSON representation of the course module and recursively all of its children.
"""
return create_xblock_info(
course_module,
include_child_info=True,
course_outline=True,
include_children_predicate=lambda xblock: not xblock.category == 'vertical',
user=request.user
)
def get_in_process_course_actions(request):
"""
Get all in-process course actions
"""
return [
course for course in
CourseRerunState.objects.find_all(
exclude_args={'state': CourseRerunUIStateManager.State.SUCCEEDED}, should_display=True
)
if has_studio_read_access(request.user, course.course_key)
]
def _accessible_courses_summary_list(request):
"""
List all courses available to the logged in user by iterating through all the courses
"""
def course_filter(course_summary):
"""
Filter out unusable and inaccessible courses
"""
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
if course_summary.location.course == 'templates':
return False
return has_studio_read_access(request.user, course_summary.id)
courses_summary = filter(course_filter, modulestore().get_course_summaries())
in_process_course_actions = get_in_process_course_actions(request)
return courses_summary, in_process_course_actions
def _accessible_courses_list(request):
"""
List all courses available to the logged in user by iterating through all the courses
"""
def course_filter(course):
"""
Filter out unusable and inaccessible courses
"""
if isinstance(course, ErrorDescriptor):
return False
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
if course.location.course == 'templates':
return False
return has_studio_read_access(request.user, course.id)
courses = filter(course_filter, modulestore().get_courses())
in_process_course_actions = get_in_process_course_actions(request)
return courses, in_process_course_actions
def _accessible_courses_list_from_groups(request):
"""
List all courses available to the logged in user by reversing access group names
"""
courses_list = {}
in_process_course_actions = []
instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()
staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role()
all_courses = instructor_courses | staff_courses
for course_access in all_courses:
course_key = course_access.course_id
if course_key is None:
# If the course_access does not have a course_id, it's an org-based role, so we fall back
raise AccessListFallback
if course_key not in courses_list:
# check for any course action state for this course
in_process_course_actions.extend(
CourseRerunState.objects.find_all(
exclude_args={'state': CourseRerunUIStateManager.State.SUCCEEDED},
should_display=True,
course_key=course_key,
)
)
# check for the course itself
try:
course = modulestore().get_course(course_key)
except ItemNotFoundError:
# If a user has access to a course that doesn't exist, don't do anything with that course
pass
if course is not None and not isinstance(course, ErrorDescriptor):
# ignore deleted or errored courses
courses_list[course_key] = course
return courses_list.values(), in_process_course_actions
def _accessible_libraries_list(user):
"""
List all libraries available to the logged in user by iterating through all libraries
"""
# No need to worry about ErrorDescriptors - split's get_libraries() never returns them.
return [lib for lib in modulestore().get_libraries() if has_studio_read_access(user, lib.location.library_key)]
@login_required
@ensure_csrf_cookie
def course_listing(request):
"""
List all courses available to the logged in user
"""
courses, in_process_course_actions = get_courses_accessible_to_user(request)
libraries = _accessible_libraries_list(request.user) if LIBRARIES_ENABLED else []
programs_config = ProgramsApiConfig.current()
raw_programs = get_programs(request.user) if programs_config.is_studio_tab_enabled else []
# Sort programs alphabetically by name.
# TODO: Support ordering in the Programs API itself.
programs = sorted(raw_programs, key=lambda p: p['name'].lower())
def format_in_process_course_view(uca):
"""
Return a dict of the data which the view requires for each unsucceeded course
"""
return {
'display_name': uca.display_name,
'course_key': unicode(uca.course_key),
'org': uca.course_key.org,
'number': uca.course_key.course,
'run': uca.course_key.run,
'is_failed': True if uca.state == CourseRerunUIStateManager.State.FAILED else False,
'is_in_progress': True if uca.state == CourseRerunUIStateManager.State.IN_PROGRESS else False,
'dismiss_link': reverse_course_url(
'course_notifications_handler',
uca.course_key,
kwargs={
'action_state_id': uca.id,
},
) if uca.state == CourseRerunUIStateManager.State.FAILED else ''
}
def format_library_for_view(library):
"""
Return a dict of the data which the view requires for each library
"""
return {
'display_name': library.display_name,
'library_key': unicode(library.location.library_key),
'url': reverse_library_url('library_handler', unicode(library.location.library_key)),
'org': library.display_org_with_default,
'number': library.display_number_with_default,
'can_edit': has_studio_write_access(request.user, library.location.library_key),
}
courses = _remove_in_process_courses(courses, in_process_course_actions)
in_process_course_actions = [format_in_process_course_view(uca) for uca in in_process_course_actions]
return render_to_response('index.html', {
'courses': courses,
'in_process_course_actions': in_process_course_actions,
'libraries_enabled': LIBRARIES_ENABLED,
'libraries': [format_library_for_view(lib) for lib in libraries],
'show_new_library_button': LIBRARIES_ENABLED and request.user.is_active,
'user': request.user,
'request_course_creator_url': reverse('contentstore.views.request_course_creator'),
'course_creator_status': _get_course_creator_status(request.user),
'rerun_creator_status': GlobalStaff().has_user(request.user),
'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False),
'allow_course_reruns': settings.FEATURES.get('ALLOW_COURSE_RERUNS', True),
'is_programs_enabled': programs_config.is_studio_tab_enabled and request.user.is_staff,
'programs': programs,
'program_authoring_url': reverse('programs'),
})
def _get_rerun_link_for_item(course_key):
""" Returns the rerun link for the given course key. """
return reverse_course_url('course_rerun_handler', course_key)
def _deprecated_blocks_info(course_module, deprecated_block_types):
"""
Returns deprecation information about `deprecated_block_types`
Arguments:
course_module (CourseDescriptor): course object
deprecated_block_types (list): list of deprecated blocks types
Returns:
Dict with following keys:
block_types (list): list containing types of all deprecated blocks
block_types_enabled (bool): True if any or all `deprecated_blocks` present in Advanced Module List else False
blocks (list): List of `deprecated_block_types` component names and their parent's url
advance_settings_url (str): URL to advance settings page
"""
data = {
'block_types': deprecated_block_types,
'block_types_enabled': any(
block_type in course_module.advanced_modules for block_type in deprecated_block_types
),
'blocks': [],
'advance_settings_url': reverse_course_url('advanced_settings_handler', course_module.id)
}
try:
structure_data = api.course_structure(course_module.id, block_types=deprecated_block_types)
except errors.CourseStructureNotAvailableError:
return data
blocks = []
for block in structure_data['blocks'].values():
blocks.append([reverse_usage_url('container_handler', block['parent']), block['display_name']])
data['blocks'].extend(blocks)
return data
@login_required
@ensure_csrf_cookie
def course_index(request, course_key):
"""
Display an editable course overview.
org, course, name: Attributes of the Location for the item to edit
"""
# A depth of None implies the whole course. The course outline needs this in order to compute has_changes.
# A unit may not have a draft version, but one of its components could, and hence the unit itself has changes.
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
if not course_module:
raise Http404
lms_link = get_lms_link_for_item(course_module.location)
reindex_link = None
if settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False):
reindex_link = "/course/{course_id}/search_reindex".format(course_id=unicode(course_key))
sections = course_module.get_children()
course_structure = _course_outline_json(request, course_module)
locator_to_show = request.GET.get('show', None)
course_release_date = get_default_time_display(course_module.start) if course_module.start != DEFAULT_START_DATE else _("Unscheduled")
settings_url = reverse_course_url('settings_handler', course_key)
try:
current_action = CourseRerunState.objects.find_first(course_key=course_key, should_display=True)
except (ItemNotFoundError, CourseActionStateItemNotFoundError):
current_action = None
deprecated_blocks_info = _deprecated_blocks_info(course_module, settings.DEPRECATED_BLOCK_TYPES)
return render_to_response('course_outline.html', {
'context_course': course_module,
'lms_link': lms_link,
'sections': sections,
'course_structure': course_structure,
'initial_state': course_outline_initial_state(locator_to_show, course_structure) if locator_to_show else None,
'rerun_notification_id': current_action.id if current_action else None,
'course_release_date': course_release_date,
'settings_url': settings_url,
'reindex_link': reindex_link,
'deprecated_blocks_info': deprecated_blocks_info,
'notification_dismiss_url': reverse_course_url(
'course_notifications_handler',
current_action.course_key,
kwargs={
'action_state_id': current_action.id,
},
) if current_action else None,
})
def get_courses_accessible_to_user(request):
"""
Try to get all courses by first reversing django groups and fallback to old method if it fails
Note: overhead of pymongo reads will increase if getting courses from django groups fails
"""
if GlobalStaff().has_user(request.user):
# user has global access so no need to get courses from django groups
courses, in_process_course_actions = _accessible_courses_summary_list(request)
else:
try:
courses, in_process_course_actions = _accessible_courses_list_from_groups(request)
except AccessListFallback:
# user have some old groups or there was some error getting courses from django groups
# so fallback to iterating through all courses
courses, in_process_course_actions = _accessible_courses_summary_list(request)
return courses, in_process_course_actions
def _remove_in_process_courses(courses, in_process_course_actions):
"""
removes any in-process courses in courses list. in-process actually refers to courses
that are in the process of being generated for re-run
"""
def format_course_for_view(course):
"""
Return a dict of the data which the view requires for each course
"""
return {
'display_name': course.display_name,
'course_key': unicode(course.location.course_key),
'url': reverse_course_url('course_handler', course.id),
'lms_link': get_lms_link_for_item(course.location),
'rerun_link': _get_rerun_link_for_item(course.id),
'org': course.display_org_with_default,
'number': course.display_number_with_default,
'run': course.location.run
}
in_process_action_course_keys = [uca.course_key for uca in in_process_course_actions]
courses = [
format_course_for_view(course)
for course in courses
if not isinstance(course, ErrorDescriptor) and (course.id not in in_process_action_course_keys)
]
return courses
def course_outline_initial_state(locator_to_show, course_structure):
"""
Returns the desired initial state for the course outline view. If the 'show' request parameter
was provided, then the view's initial state will be to have the desired item fully expanded
and to scroll to see the new item.
"""
def find_xblock_info(xblock_info, locator):
"""
Finds the xblock info for the specified locator.
"""
if xblock_info['id'] == locator:
return xblock_info
children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None
if children:
for child_xblock_info in children:
result = find_xblock_info(child_xblock_info, locator)
if result:
return result
return None
def collect_all_locators(locators, xblock_info):
"""
Collect all the locators for an xblock and its children.
"""
locators.append(xblock_info['id'])
children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None
if children:
for child_xblock_info in children:
collect_all_locators(locators, child_xblock_info)
selected_xblock_info = find_xblock_info(course_structure, locator_to_show)
if not selected_xblock_info:
return None
expanded_locators = []
collect_all_locators(expanded_locators, selected_xblock_info)
return {
'locator_to_show': locator_to_show,
'expanded_locators': expanded_locators
}
@expect_json
def _create_or_rerun_course(request):
"""
To be called by requests that create a new destination course (i.e., create_new_course and rerun_course)
Returns the destination course_key and overriding fields for the new course.
Raises DuplicateCourseError and InvalidKeyError
"""
if not auth.user_has_role(request.user, CourseCreatorRole()):
raise PermissionDenied()
try:
org = request.json.get('org')
course = request.json.get('number', request.json.get('course'))
display_name = request.json.get('display_name')
# force the start date for reruns and allow us to override start via the client
start = request.json.get('start', CourseFields.start.default)
run = request.json.get('run')
# allow/disable unicode characters in course_id according to settings
if not settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID'):
if _has_non_ascii_characters(org) or _has_non_ascii_characters(course) or _has_non_ascii_characters(run):
return JsonResponse(
{'error': _('Special characters not allowed in organization, course number, and course run.')},
status=400
)
fields = {'start': start}
if display_name is not None:
fields['display_name'] = display_name
# Set a unique wiki_slug for newly created courses. To maintain active wiki_slugs for
# existing xml courses this cannot be changed in CourseDescriptor.
# # TODO get rid of defining wiki slug in this org/course/run specific way and reconcile
# w/ xmodule.course_module.CourseDescriptor.__init__
wiki_slug = u"{0}.{1}.{2}".format(org, course, run)
definition_data = {'wiki_slug': wiki_slug}
fields.update(definition_data)
if 'source_course_key' in request.json:
return _rerun_course(request, org, course, run, fields)
else:
return _create_new_course(request, org, course, run, fields)
except DuplicateCourseError:
return JsonResponse({
'ErrMsg': _(
'There is already a course defined with the same '
'organization and course number. Please '
'change either organization or course number to be unique.'
),
'OrgErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
'CourseErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
})
except InvalidKeyError as error:
return JsonResponse({
"ErrMsg": _("Unable to create course '{name}'.\n\n{err}").format(name=display_name, err=error.message)}
)
def _create_new_course(request, org, number, run, fields):
"""
Create a new course.
Returns the URL for the course overview page.
Raises DuplicateCourseError if the course already exists
"""
org_data = get_organization_by_short_name(org)
if not org_data and organizations_enabled():
return JsonResponse(
{'error': _('You must link this course to an organization in order to continue. '
'Organization you selected does not exist in the system, '
'you will need to add it to the system')},
status=400
)
store_for_new_course = modulestore().default_modulestore.get_modulestore_type()
new_course = create_new_course_in_store(store_for_new_course, request.user, org, number, run, fields)
add_organization_course(org_data, new_course.id)
return JsonResponse({
'url': reverse_course_url('course_handler', new_course.id),
'course_key': unicode(new_course.id),
})
def create_new_course_in_store(store, user, org, number, run, fields):
"""
Create course in store w/ handling instructor enrollment, permissions, and defaulting the wiki slug.
Separated out b/c command line course creation uses this as well as the web interface.
"""
# Set default language from settings and enable web certs
fields.update({
'language': getattr(settings, 'DEFAULT_COURSE_LANGUAGE', 'en'),
'cert_html_view_enabled': True,
})
with modulestore().default_store(store):
# Creating the course raises DuplicateCourseError if an existing course with this org/name is found
new_course = modulestore().create_course(
org,
number,
run,
user.id,
fields=fields,
)
# Make sure user has instructor and staff access to the new course
add_instructor(new_course.id, user, user)
# Initialize permissions for user in the new course
initialize_permissions(new_course.id, user)
return new_course
def _rerun_course(request, org, number, run, fields):
"""
Reruns an existing course.
Returns the URL for the course listing page.
"""
source_course_key = CourseKey.from_string(request.json.get('source_course_key'))
# verify user has access to the original course
if not has_studio_write_access(request.user, source_course_key):
raise PermissionDenied()
# create destination course key
store = modulestore()
with store.default_store('split'):
destination_course_key = store.make_course_key(org, number, run)
# verify org course and run don't already exist
if store.has_course(destination_course_key, ignore_case=True):
raise DuplicateCourseError(source_course_key, destination_course_key)
# Make sure user has instructor and staff access to the destination course
# so the user can see the updated status for that course
add_instructor(destination_course_key, request.user, request.user)
# Mark the action as initiated
CourseRerunState.objects.initiated(source_course_key, destination_course_key, request.user, fields['display_name'])
# Clear the fields that must be reset for the rerun
fields['advertised_start'] = None
# Rerun the course as a new celery task
json_fields = json.dumps(fields, cls=EdxJSONEncoder)
rerun_course.delay(unicode(source_course_key), unicode(destination_course_key), request.user.id, json_fields)
# Return course listing page
return JsonResponse({
'url': reverse_url('course_handler'),
'destination_course_key': unicode(destination_course_key)
})
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_info_handler(request, course_key_string):
"""
GET
html: return html for editing the course info handouts and updates.
"""
course_key = from_string_or_404(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if not course_module:
raise Http404
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
return render_to_response(
'course_info.html',
{
'context_course': course_module,
'updates_url': reverse_course_url('course_info_update_handler', course_key),
'handouts_locator': course_key.make_usage_key('course_info', 'handouts'),
'base_asset_url': StaticContent.get_base_url_path_for_course_assets(course_module.id),
'push_notification_enabled': push_notification_enabled()
}
)
else:
return HttpResponseBadRequest("Only supports html requests")
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def course_info_update_handler(request, course_key_string, provided_id=None):
"""
restful CRUD operations on course_info updates.
provided_id should be none if it's new (create) and index otherwise.
GET
json: return the course info update models
POST
json: create an update
PUT or DELETE
json: change an existing update
"""
if 'application/json' not in request.META.get('HTTP_ACCEPT', 'application/json'):
return HttpResponseBadRequest("Only supports json requests")
course_key = CourseKey.from_string(course_key_string)
usage_key = course_key.make_usage_key('course_info', 'updates')
if provided_id == '':
provided_id = None
# check that logged in user has permissions to this item (GET shouldn't require this level?)
if not has_studio_write_access(request.user, usage_key.course_key):
raise PermissionDenied()
if request.method == 'GET':
course_updates = get_course_updates(usage_key, provided_id, request.user.id)
if isinstance(course_updates, dict) and course_updates.get('error'):
return JsonResponse(course_updates, course_updates.get('status', 400))
else:
return JsonResponse(course_updates)
elif request.method == 'DELETE':
try:
return JsonResponse(delete_course_update(usage_key, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to delete",
content_type="text/plain"
)
# can be either and sometimes django is rewriting one to the other:
elif request.method in ('POST', 'PUT'):
try:
return JsonResponse(update_course_updates(usage_key, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to save",
content_type="text/plain"
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "PUT", "POST"))
@expect_json
def settings_handler(request, course_key_string):
"""
Course settings for dates and about pages
GET
html: get the page
json: get the CourseDetails model
PUT
json: update the Course and About xblocks through the CourseDetails model
"""
course_key = CourseKey.from_string(course_key_string)
credit_eligibility_enabled = settings.FEATURES.get('ENABLE_CREDIT_ELIGIBILITY', False)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
upload_asset_url = reverse_course_url('assets_handler', course_key)
# see if the ORG of this course can be attributed to a 'Microsite'. In that case, the
# course about page should be editable in Studio
marketing_site_enabled = microsite.get_value_for_org(
course_module.location.org,
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
about_page_editable = not marketing_site_enabled
enrollment_end_editable = GlobalStaff().has_user(request.user) or not marketing_site_enabled
short_description_editable = settings.FEATURES.get('EDITABLE_SHORT_DESCRIPTION', True)
self_paced_enabled = SelfPacedConfiguration.current().enabled
settings_context = {
'context_course': course_module,
'course_locator': course_key,
'lms_link_for_about_page': utils.get_lms_link_for_about_page(course_key),
'course_image_url': course_image_url(course_module),
'details_url': reverse_course_url('settings_handler', course_key),
'about_page_editable': about_page_editable,
'short_description_editable': short_description_editable,
'upload_asset_url': upload_asset_url,
'course_handler_url': reverse_course_url('course_handler', course_key),
'language_options': settings.ALL_LANGUAGES,
'credit_eligibility_enabled': credit_eligibility_enabled,
'is_credit_course': False,
'show_min_grade_warning': False,
'enrollment_end_editable': enrollment_end_editable,
'is_prerequisite_courses_enabled': is_prerequisite_courses_enabled(),
'is_entrance_exams_enabled': is_entrance_exams_enabled(),
'self_paced_enabled': self_paced_enabled,
}
if is_prerequisite_courses_enabled():
courses, in_process_course_actions = get_courses_accessible_to_user(request)
# exclude current course from the list of available courses
courses = [course for course in courses if course.id != course_key]
if courses:
courses = _remove_in_process_courses(courses, in_process_course_actions)
settings_context.update({'possible_pre_requisite_courses': courses})
if credit_eligibility_enabled:
if is_credit_course(course_key):
# get and all credit eligibility requirements
credit_requirements = get_credit_requirements(course_key)
# pair together requirements with same 'namespace' values
paired_requirements = {}
for requirement in credit_requirements:
namespace = requirement.pop("namespace")
paired_requirements.setdefault(namespace, []).append(requirement)
# if 'minimum_grade_credit' of a course is not set or 0 then
# show warning message to course author.
show_min_grade_warning = False if course_module.minimum_grade_credit > 0 else True
settings_context.update(
{
'is_credit_course': True,
'credit_requirements': paired_requirements,
'show_min_grade_warning': show_min_grade_warning,
}
)
return render_to_response('settings.html', settings_context)
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
course_details = CourseDetails.fetch(course_key)
return JsonResponse(
course_details,
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
# For every other possible method type submitted by the caller...
else:
# if pre-requisite course feature is enabled set pre-requisite course
if is_prerequisite_courses_enabled():
prerequisite_course_keys = request.json.get('pre_requisite_courses', [])
if prerequisite_course_keys:
if not all(is_valid_course_key(course_key) for course_key in prerequisite_course_keys):
return JsonResponseBadRequest({"error": _("Invalid prerequisite course key")})
set_prerequisite_courses(course_key, prerequisite_course_keys)
# If the entrance exams feature has been enabled, we'll need to check for some
# feature-specific settings and handle them accordingly
# We have to be careful that we're only executing the following logic if we actually
# need to create or delete an entrance exam from the specified course
if is_entrance_exams_enabled():
course_entrance_exam_present = course_module.entrance_exam_enabled
entrance_exam_enabled = request.json.get('entrance_exam_enabled', '') == 'true'
ee_min_score_pct = request.json.get('entrance_exam_minimum_score_pct', None)
# If the entrance exam box on the settings screen has been checked...
if entrance_exam_enabled:
# Load the default minimum score threshold from settings, then try to override it
entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT)
if ee_min_score_pct:
entrance_exam_minimum_score_pct = float(ee_min_score_pct)
if entrance_exam_minimum_score_pct.is_integer():
entrance_exam_minimum_score_pct = entrance_exam_minimum_score_pct / 100
entrance_exam_minimum_score_pct = unicode(entrance_exam_minimum_score_pct)
# If there's already an entrance exam defined, we'll update the existing one
if course_entrance_exam_present:
exam_data = {
'entrance_exam_minimum_score_pct': entrance_exam_minimum_score_pct
}
update_entrance_exam(request, course_key, exam_data)
# If there's no entrance exam defined, we'll create a new one
else:
create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct)
# If the entrance exam box on the settings screen has been unchecked,
# and the course has an entrance exam attached...
elif not entrance_exam_enabled and course_entrance_exam_present:
delete_entrance_exam(request, course_key)
# Perform the normal update workflow for the CourseDetails model
return JsonResponse(
CourseDetails.update_from_json(course_key, request.json, request.user),
encoder=CourseSettingsEncoder
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def grading_handler(request, course_key_string, grader_index=None):
"""
Course Grading policy configuration
GET
html: get the page
json no grader_index: get the CourseGrading model (graceperiod, cutoffs, and graders)
json w/ grader_index: get the specific grader
PUT
json no grader_index: update the Course through the CourseGrading model
json w/ grader_index: create or update the specific grader (create if index out of range)
"""
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
course_details = CourseGradingModel.fetch(course_key)
return render_to_response('settings_graders.html', {
'context_course': course_module,
'course_locator': course_key,
'course_details': course_details,
'grading_url': reverse_course_url('grading_handler', course_key),
'is_credit_course': is_credit_course(course_key),
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
if grader_index is None:
return JsonResponse(
CourseGradingModel.fetch(course_key),
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(CourseGradingModel.fetch_grader(course_key, grader_index))
elif request.method in ('POST', 'PUT'): # post or put, doesn't matter.
# update credit course requirements if 'minimum_grade_credit'
# field value is changed
if 'minimum_grade_credit' in request.json:
update_credit_course_requirements.delay(unicode(course_key))
# None implies update the whole model (cutoffs, graceperiod, and graders) not a specific grader
if grader_index is None:
return JsonResponse(
CourseGradingModel.update_from_json(course_key, request.json, request.user),
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(
CourseGradingModel.update_grader_from_json(course_key, request.json, request.user)
)
elif request.method == "DELETE" and grader_index is not None:
CourseGradingModel.delete_grader(course_key, grader_index, request.user)
return JsonResponse()
def _refresh_course_tabs(request, course_module):
"""
Automatically adds/removes tabs if changes to the course require them.
Raises:
InvalidTabsException: raised if there's a problem with the new version of the tabs.
"""
def update_tab(tabs, tab_type, tab_enabled):
"""
Adds or removes a course tab based upon whether it is enabled.
"""
tab_panel = {
"type": tab_type.type,
}
has_tab = tab_panel in tabs
if tab_enabled and not has_tab:
tabs.append(CourseTab.from_json(tab_panel))
elif not tab_enabled and has_tab:
tabs.remove(tab_panel)
course_tabs = copy.copy(course_module.tabs)
# Additionally update any tabs that are provided by non-dynamic course views
for tab_type in CourseTabPluginManager.get_tab_types():
if not tab_type.is_dynamic and tab_type.is_default:
tab_enabled = tab_type.is_enabled(course_module, user=request.user)
update_tab(course_tabs, tab_type, tab_enabled)
CourseTabList.validate_tabs(course_tabs)
# Save the tabs into the course if they have been changed
if course_tabs != course_module.tabs:
course_module.tabs = course_tabs
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def advanced_settings_handler(request, course_key_string):
"""
Course settings configuration
GET
html: get the page
json: get the model
PUT, POST
json: update the Course's settings. The payload is a json rep of the
metadata dicts.
"""
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
return render_to_response('settings_advanced.html', {
'context_course': course_module,
'advanced_dict': CourseMetadata.fetch(course_module),
'advanced_settings_url': reverse_course_url('advanced_settings_handler', course_key)
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(CourseMetadata.fetch(course_module))
else:
try:
# validate data formats and update the course module.
# Note: don't update mongo yet, but wait until after any tabs are changed
is_valid, errors, updated_data = CourseMetadata.validate_and_update_from_json(
course_module,
request.json,
user=request.user,
)
if is_valid:
try:
# update the course tabs if required by any setting changes
_refresh_course_tabs(request, course_module)
except InvalidTabsException as err:
log.exception(err.message)
response_message = [
{
'message': _('An error occurred while trying to save your tabs'),
'model': {'display_name': _('Tabs Exception')}
}
]
return JsonResponseBadRequest(response_message)
# now update mongo
modulestore().update_item(course_module, request.user.id)
return JsonResponse(updated_data)
else:
return JsonResponseBadRequest(errors)
# Handle all errors that validation doesn't catch
except (TypeError, ValueError, InvalidTabsException) as err:
return HttpResponseBadRequest(
django.utils.html.escape(err.message),
content_type="text/plain"
)
class TextbookValidationError(Exception):
"An error thrown when a textbook input is invalid"
pass
def validate_textbooks_json(text):
"""
Validate the given text as representing a single PDF textbook
"""
try:
textbooks = json.loads(text)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbooks, (list, tuple)):
raise TextbookValidationError("must be JSON list")
for textbook in textbooks:
validate_textbook_json(textbook)
# check specified IDs for uniqueness
all_ids = [textbook["id"] for textbook in textbooks if "id" in textbook]
unique_ids = set(all_ids)
if len(all_ids) > len(unique_ids):
raise TextbookValidationError("IDs must be unique")
return textbooks
def validate_textbook_json(textbook):
"""
Validate the given text as representing a list of PDF textbooks
"""
if isinstance(textbook, basestring):
try:
textbook = json.loads(textbook)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbook, dict):
raise TextbookValidationError("must be JSON object")
if not textbook.get("tab_title"):
raise TextbookValidationError("must have tab_title")
tid = unicode(textbook.get("id", ""))
if tid and not tid[0].isdigit():
raise TextbookValidationError("textbook ID must start with a digit")
return textbook
def assign_textbook_id(textbook, used_ids=()):
"""
Return an ID that can be assigned to a textbook
and doesn't match the used_ids
"""
tid = Location.clean(textbook["tab_title"])
if not tid[0].isdigit():
# stick a random digit in front
tid = random.choice(string.digits) + tid
while tid in used_ids:
# add a random ASCII character to the end
tid = tid + random.choice(string.ascii_lowercase)
return tid
@require_http_methods(("GET", "POST", "PUT"))
@login_required
@ensure_csrf_cookie
def textbooks_list_handler(request, course_key_string):
"""
A RESTful handler for textbook collections.
GET
html: return textbook list page (Backbone application)
json: return JSON representation of all textbooks in this course
POST
json: create a new textbook for this course
PUT
json: overwrite all textbooks in the course with the given list
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
if "application/json" not in request.META.get('HTTP_ACCEPT', 'text/html'):
# return HTML page
upload_asset_url = reverse_course_url('assets_handler', course_key)
textbook_url = reverse_course_url('textbooks_list_handler', course_key)
return render_to_response('textbooks.html', {
'context_course': course,
'textbooks': course.pdf_textbooks,
'upload_asset_url': upload_asset_url,
'textbook_url': textbook_url,
})
# from here on down, we know the client has requested JSON
if request.method == 'GET':
return JsonResponse(course.pdf_textbooks)
elif request.method == 'PUT':
try:
textbooks = validate_textbooks_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
tids = set(t["id"] for t in textbooks if "id" in t)
for textbook in textbooks:
if "id" not in textbook:
tid = assign_textbook_id(textbook, tids)
textbook["id"] = tid
tids.add(tid)
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append(CourseTab.load('pdf_textbooks'))
course.pdf_textbooks = textbooks
store.update_item(course, request.user.id)
return JsonResponse(course.pdf_textbooks)
elif request.method == 'POST':
# create a new textbook for the course
try:
textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if not textbook.get("id"):
tids = set(t["id"] for t in course.pdf_textbooks if "id" in t)
textbook["id"] = assign_textbook_id(textbook, tids)
existing = course.pdf_textbooks
existing.append(textbook)
course.pdf_textbooks = existing
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append(CourseTab.load('pdf_textbooks'))
store.update_item(course, request.user.id)
resp = JsonResponse(textbook, status=201)
resp["Location"] = reverse_course_url(
'textbooks_detail_handler',
course.id,
kwargs={'textbook_id': textbook["id"]}
)
return resp
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def textbooks_detail_handler(request, course_key_string, textbook_id):
"""
JSON API endpoint for manipulating a textbook via its internal ID.
Used by the Backbone application.
GET
json: return JSON representation of textbook
POST or PUT
json: update textbook based on provided information
DELETE
json: remove textbook
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
matching_id = [tb for tb in course_module.pdf_textbooks
if unicode(tb.get("id")) == unicode(textbook_id)]
if matching_id:
textbook = matching_id[0]
else:
textbook = None
if request.method == 'GET':
if not textbook:
return JsonResponse(status=404)
return JsonResponse(textbook)
elif request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
new_textbook["id"] = textbook_id
if textbook:
i = course_module.pdf_textbooks.index(textbook)
new_textbooks = course_module.pdf_textbooks[0:i]
new_textbooks.append(new_textbook)
new_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = new_textbooks
else:
course_module.pdf_textbooks.append(new_textbook)
store.update_item(course_module, request.user.id)
return JsonResponse(new_textbook, status=201)
elif request.method == 'DELETE':
if not textbook:
return JsonResponse(status=404)
i = course_module.pdf_textbooks.index(textbook)
remaining_textbooks = course_module.pdf_textbooks[0:i]
remaining_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = remaining_textbooks
store.update_item(course_module, request.user.id)
return JsonResponse()
def remove_content_or_experiment_group(request, store, course, configuration, group_configuration_id, group_id=None):
"""
Remove content group or experiment group configuration only if it's not in use.
"""
configuration_index = course.user_partitions.index(configuration)
if configuration.scheme.name == RANDOM_SCHEME:
usages = GroupConfiguration.get_content_experiment_usage_info(store, course)
used = int(group_configuration_id) in usages
if used:
return JsonResponse(
{"error": _("This group configuration is in use and cannot be deleted.")},
status=400
)
course.user_partitions.pop(configuration_index)
elif configuration.scheme.name == COHORT_SCHEME:
if not group_id:
return JsonResponse(status=404)
group_id = int(group_id)
usages = GroupConfiguration.get_content_groups_usage_info(store, course)
used = group_id in usages
if used:
return JsonResponse(
{"error": _("This content group is in use and cannot be deleted.")},
status=400
)
matching_groups = [group for group in configuration.groups if group.id == group_id]
if matching_groups:
group_index = configuration.groups.index(matching_groups[0])
configuration.groups.pop(group_index)
else:
return JsonResponse(status=404)
course.user_partitions[configuration_index] = configuration
store.update_item(course, request.user.id)
return JsonResponse(status=204)
@require_http_methods(("GET", "POST"))
@login_required
@ensure_csrf_cookie
def group_configurations_list_handler(request, course_key_string):
"""
A RESTful handler for Group Configurations
GET
html: return Group Configurations list page (Backbone application)
POST
json: create new group configuration
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
group_configuration_url = reverse_course_url('group_configurations_list_handler', course_key)
course_outline_url = reverse_course_url('course_handler', course_key)
should_show_experiment_groups = are_content_experiments_enabled(course)
if should_show_experiment_groups:
experiment_group_configurations = GroupConfiguration.get_split_test_partitions_with_usage(store, course)
else:
experiment_group_configurations = None
content_group_configuration = GroupConfiguration.get_or_create_content_group(store, course)
return render_to_response('group_configurations.html', {
'context_course': course,
'group_configuration_url': group_configuration_url,
'course_outline_url': course_outline_url,
'experiment_group_configurations': experiment_group_configurations,
'should_show_experiment_groups': should_show_experiment_groups,
'content_group_configuration': content_group_configuration
})
elif "application/json" in request.META.get('HTTP_ACCEPT'):
if request.method == 'POST':
# create a new group configuration for the course
try:
new_configuration = GroupConfiguration(request.body, course).get_user_partition()
except GroupConfigurationsValidationError as err:
return JsonResponse({"error": err.message}, status=400)
course.user_partitions.append(new_configuration)
response = JsonResponse(new_configuration.to_json(), status=201)
response["Location"] = reverse_course_url(
'group_configurations_detail_handler',
course.id,
kwargs={'group_configuration_id': new_configuration.id}
)
store.update_item(course, request.user.id)
return response
else:
return HttpResponse(status=406)
@login_required
@ensure_csrf_cookie
@require_http_methods(("POST", "PUT", "DELETE"))
def group_configurations_detail_handler(request, course_key_string, group_configuration_id, group_id=None):
"""
JSON API endpoint for manipulating a group configuration via its internal ID.
Used by the Backbone application.
POST or PUT
json: update group configuration based on provided information
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
matching_id = [p for p in course.user_partitions
if unicode(p.id) == unicode(group_configuration_id)]
if matching_id:
configuration = matching_id[0]
else:
configuration = None
if request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_configuration = GroupConfiguration(request.body, course, group_configuration_id).get_user_partition()
except GroupConfigurationsValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if configuration:
index = course.user_partitions.index(configuration)
course.user_partitions[index] = new_configuration
else:
course.user_partitions.append(new_configuration)
store.update_item(course, request.user.id)
configuration = GroupConfiguration.update_usage_info(store, course, new_configuration)
return JsonResponse(configuration, status=201)
elif request.method == "DELETE":
if not configuration:
return JsonResponse(status=404)
return remove_content_or_experiment_group(
request=request,
store=store,
course=course,
configuration=configuration,
group_configuration_id=group_configuration_id,
group_id=group_id
)
def are_content_experiments_enabled(course):
"""
Returns True if content experiments have been enabled for the course.
"""
return (
'split_test' in ADVANCED_COMPONENT_TYPES and
'split_test' in course.advanced_modules
)
def _get_course_creator_status(user):
"""
Helper method for returning the course creator status for a particular user,
taking into account the values of DISABLE_COURSE_CREATION and ENABLE_CREATOR_GROUP.
If the user passed in has not previously visited the index page, it will be
added with status 'unrequested' if the course creator group is in use.
"""
if user.is_staff:
course_creator_status = 'granted'
elif settings.FEATURES.get('DISABLE_COURSE_CREATION', False):
course_creator_status = 'disallowed_for_this_site'
elif settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
course_creator_status = get_course_creator_status(user)
if course_creator_status is None:
# User not grandfathered in as an existing user, has not previously visited the dashboard page.
# Add the user to the course creator admin table with status 'unrequested'.
add_user_with_status_unrequested(user)
course_creator_status = get_course_creator_status(user)
else:
course_creator_status = 'granted'
return course_creator_status
| agpl-3.0 | 1,798,611,080,060,205,000 | 42.133538 | 142 | 0.638718 | false |
freshs/freshs | client/modules/ffs/client_ffs.py | 1 | 13183 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Kai Kratzer, Universität Stuttgart, ICP,
# Allmandring 3, 70569 Stuttgart, Germany; all rights
# reserved unless otherwise stated.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import time
##import wrappers for simulation programs
from harness import harness
class client_ffs:
def __init__(self, client):
self.cli = client
# build flexible option string
def build_options(self,paramdict):
optionstring = ''
for el in paramdict:
if el != 'random_points':
optionstring += ' -' + str(el) + ' ' + str(paramdict[el])
return optionstring
def build_custominfo(self, basis, paramdict):
for el in paramdict:
if el not in basis:
basis += ", \"" + el + "\": " + str(paramdict[el])
return basis
#### JOB 1 ####
def job1_escape_flux(self, parameterset):
A = parameterset['A']
B = parameterset['B']
#next_interface = parameterset['next_interface']
act_lambda = parameterset['act_lambda']
seed = parameterset['seed']
try:
parent_id = parameterset['rp_id']
except:
parent_id = 'escape'
if 'uuid' in parameterset:
uuid = parameterset['uuid']
else:
uuid = ''
print('Calculating escape flux: ' + str(A) + ', ' + str(B))
all_meta = {}
success = False
rcvals=[]
points = []
q = 0
h = harness(self.cli.exec_name, self.cli.harness_path + "/job_script", self)
# Wrap the code that uses threading/subprocesses
# in a try-catch to clean up on interrupts, ctrl-C etc.
try:
# start loading the input pipes for the MD process
use_existing_point = False
# Checking for previous points
if 'random_points' in parameterset:
print("Random points key in paramset")
if not 'None' in str(parameterset['random_points']):
# Use previous point
use_existing_point = True
if use_existing_point:
# we are at least on A...
if 'equilibrate_point' in parameterset:
comefromok = True
else:
comefromok = False
h.send( True, True, True, parameterset['random_points'] )
optionlist = "-tmpdir " + h.tmpdir + \
" -initial_config None" + \
" -in_fifoname " + h.crds_in_fifoname + \
" -back_fifoname " + h.crds_back_fifoname + \
" -metadata_fifoname " + h.metadata_fifoname + \
" -halt_steps 0 " + \
" -check_rc_every 1" + \
self.build_options(parameterset)
h.subthread_run_script(optionlist)
else:
# we assume that the simulation is set up in A if no
# last successful point is received
comefromok = True
h.send( False, True, True, ['None'] )
optionlist = "-tmpdir " + h.tmpdir + \
" -initial_config " + self.cli.harness_path + "/initial_config.dat" + \
" -back_fifoname " + h.crds_back_fifoname + \
" -metadata_fifoname " + h.metadata_fifoname + \
" -halt_steps 0" + \
" -check_rc_every 1" + \
self.build_options(parameterset)
h.subthread_run_script(optionlist)
calcsteps = 0
ctime = 0
while True:
# read output from the MD subthread
steps, time, rc, all_meta = h.collect( points, rcvals )
if 'quit' in all_meta:
raise SystemExit(all_meta['quit'])
calcsteps += steps
ctime += time
print("Client: collected MD output" + str((steps, time, rc)))
flRc = float(rc)
if 'step_abort' in all_meta:
if all_meta['step_abort']:
print("Client: job was aborted because of maximum steps.")
success = False
elif 'reached_B_escape' in all_meta:
if all_meta['reached_B_escape']:
print("Escape job reached B, asking server for new config")
success = False
else:
success = True
else:
success = True
if self.cli.checking_script > 0:
break
else:
# Verify that escape conditions have been met.
# This is necessary for simulation tools
# which do not do this logic themselves
# ...but can cause problems if they *do*
# perform the logic themselves
if flRc >= float(B):
print("Client: reached B, resetting")
success = False
break
elif flRc >= float(A) and comefromok:
print("Client: reached interface coming from A, saving point.")
comefromok = False
success = True
break
elif flRc < float(A) and not comefromok:
print("Client: has fallen back to A")
comefromok = True
if parameterset['max_steps'] > 0:
if calcsteps >= parameterset['max_steps']:
success = False
break
##
print("Client: continuing, with rc: "+str(flRc)+" of "+str(A)+", "+str(B))
# Start a new sender to write out the data that we just recieved.
# Assuming that it is safe to both read and write from points, because all
# simulation programs will complete reading their input
# before they write their output.
h.send( True, True, True, points[-1] )
optionlist = "-tmpdir " + h.tmpdir + \
" -initial_config None" + \
" -in_fifoname " + h.crds_in_fifoname + \
" -back_fifoname " + h.crds_back_fifoname + \
" -metadata_fifoname " + h.metadata_fifoname + \
" -halt_steps 0" + \
" -check_rc_every 1" + \
self.build_options(parameterset)
# fork a subthread to run the MD, starting from the crds_in fifo.
h.subthread_run_script(optionlist)
except Exception as e:
print( "Client: exception while running harness, %s" % e )
h.clean()
raise SystemExit(e)
h.clean()
print("Client: Constructing result string")
if success:
results_base = "\"jobtype\": 1, \"success\": True, \"points\": " + str(points[-1])
else:
results_base = "\"jobtype\": 1, \"success\": False"
results_base += ", \"ctime\": " + str(ctime) + \
", \"seed\": " + str(seed) + \
", \"act_lambda\": " + str(act_lambda) + \
", \"calcsteps\": " + str(calcsteps) + \
", \"origin_points\": \"" + str(parent_id) + "\"" + \
", \"rcval\": " + str(flRc) + \
", \"uuid\": \"" + uuid + "\""
#print("Resultstring before appending:", results_base)
results = self.build_custominfo(results_base, all_meta)
#print("Resultstring after appending:", results)
return "{" + results + "}"
#### JOB 2 ####
def job2_probabilities(self, parameterset):
A = parameterset['A']
next_interface = parameterset['next_interface']
act_lambda = parameterset['act_lambda']
seed = parameterset['seed']
parent_id = parameterset['rp_id']
points = []
rcvals = []
ctime = 0
calcsteps = 0
t = 0.0
i = 0
if 'uuid' in parameterset:
uuid = parameterset['uuid']
else:
uuid = ''
all_meta = {}
h = harness(self.cli.exec_name, self.cli.harness_path+"/job_script", self)
# Wrap the code that uses threading/subprocesses
# in a try-catch to clean up on interrupts, ctrl-C etc.
try:
print("sending: "+str(parameterset['random_points'])[0:64])
# start loading the input pipes for the MD process
h.send( True, True, True, parameterset['random_points'] )
calcsteps = 0
ctime = 0
while True:
optionlist = "-tmpdir " + h.tmpdir + \
" -initial_config None" + \
" -in_fifoname " + h.crds_in_fifoname + \
" -back_fifoname " + h.crds_back_fifoname + \
" -metadata_fifoname " + h.metadata_fifoname + \
" -halt_steps 0" + \
" -check_rc_every 1" + \
self.build_options(parameterset)
# fork a subthread to run the MD
h.subthread_run_script(optionlist)
# read output from the MD subthread
steps, time, rc, all_meta = h.collect( points, rcvals )
calcsteps += steps
ctime += time
flRc = float(rc)
if self.cli.checking_script > 0:
break
else:
# Verify that the conditions have been met.
# This is necessary for simulation tools
# which do not do this logic themselves
if flRc <= A:
break
elif flRc >= next_interface:
break
# Start a new sender to write out the data that we just recieved.
# Assuming that it is safe to both read and write from points, because all
# simulation programs will complete reading their input
# before they write their output.
h.send( True, True, True, points[-1] )
except e:
print( "Cient: exception while running harness, %s" % e )
h.clean()
exit( e )
h.clean()
# only build a full results packet if we have a success
if flRc >= next_interface:
results_base = "\"jobtype\": 2, \"success\": True, \"points\": " + str(points[-1])
else:
results_base = "\"jobtype\": 2, \"success\": False"
results_base += ", \"act_lambda\": " + str(act_lambda)+ \
", \"seed\": " + str(seed) + \
", \"origin_points\": \"" + str(parent_id) + "\"" + \
", \"calcsteps\": " + str(calcsteps) + \
", \"ctime\": " + str(ctime) + \
", \"rcval\": " + str(flRc) + \
", \"uuid\": \"" + uuid + "\""
#print("Resultstring before appending:", results_base)
results = self.build_custominfo(results_base, all_meta)
#print("Resultstring after appending:", results)
return "{" + results + "}"
#####################################################
| gpl-3.0 | 5,442,273,400,865,735,000 | 37.319767 | 100 | 0.453345 | false |
quattor/aquilon | tests/broker/test_update_rack.py | 1 | 10779 | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2012,2013,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update rack command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestUpdateRack(TestBrokerCommand):
def test_010_verifyupdateut9(self):
command = "show rack --rack ut9"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Rack: ut9", command)
self.matchoutput(out, "Fullname: Aperture_name", command)
self.matchoutput(out, "Row: g", command)
self.matchoutput(out, "Column: 3", command)
# Was row zz column 99
def test_015_updatenp997(self):
self.noouttest(["update", "rack", "--rack", "np997", "--row", "xx",
"--column", "77", "--fullname", "My Other Rack",
"--comments", "New other rack comments"])
def test_020_verifyupdatenp997(self):
command = "show rack --rack np997"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Rack: np997", command)
self.matchoutput(out, "Fullname: My Other Rack", command)
self.matchoutput(out, "Row: xx", command)
self.matchoutput(out, "Column: 77", command)
self.matchoutput(out, "Comments: New other rack comments", command)
# Was row yy column 88
def test_025_updatenp998(self):
self.noouttest(["update", "rack", "--rack", "np998", "--row", "vv",
"--column", "66", "--fullname", "My Other Rack"])
def test_030_failrow(self):
command = ["update", "rack", "--rack", "np999", "--row", "a-b"]
err = self.badrequesttest(command)
self.matchoutput(err, "must be alphanumeric", command)
def test_035_alphacolumn(self):
""" we now accept characters for rack columns """
command = ["update", "rack", "--rack", "np999", "--column", "a"]
self.noouttest(command)
def test_100_prepare_CM_setup(self):
command = ["update", "machine", "--machine", "ut9s03p41", "--rack", "ut8"] + self.valid_just_tcm
self.noouttest(command)
command = "search host --machine ut9s03p41"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "aquilon91.aqd-unittest.ms.com", command)
command = "show host --host aquilon91.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, 'Environment: prod',
command)
self.matchoutput(out, 'Build Status: ready',
command)
self.matchoutput(out, 'Rack: ut8',
command)
def test_105_updateroom(self):
command = ['update_rack', '--rack=ut8', '--room=utroom1']
self.justificationmissingtest(command, auth=True, msgcheck=False)
command = ['update_rack', '--rack=ut8', '--room=utroom1'] + self.valid_just_tcm
self.noouttest(command)
def test_110_verifyroom(self):
command = ['show_rack', '--rack=ut8']
out = self.commandtest(command)
self.searchoutput(out,
r'Location Parents: \[.*Building ut, Room utroom1\]',
command)
def test_120_swaproom(self):
command = ['update_rack', '--rack=ut8', '--room=utroom2']
self.justificationmissingtest(command, auth=True, msgcheck=False)
command = ['update_rack', '--rack=ut8', '--room=utroom2'] + self.valid_just_tcm
self.noouttest(command)
def test_130_verifyroom(self):
command = ['show_rack', '--rack=ut8']
out = self.commandtest(command)
self.searchoutput(out,
r'Location Parents: \[.*Building ut, Room utroom2\]',
command)
def test_140_updatebunker(self):
command = ['update_rack', '--rack=ut8', '--bunker=bucket2.ut']
self.justificationmissingtest(command, auth=True, msgcheck=False)
command = ['update_rack', '--rack=ut8', '--bunker=bucket2.ut'] + self.valid_just_tcm
self.noouttest(command)
def test_145_verifybunker(self):
command = ['show_rack', '--rack=ut8']
out = self.commandtest(command)
self.searchoutput(out,
r'Location Parents: \[.*Building ut, '
r'Room utroom2, Bunker bucket2.ut\]',
command)
def test_150_clearroom(self):
command = ['update_rack', '--rack=ut8', '--building', 'ut']
self.justificationmissingtest(command, auth=True, msgcheck=False)
command = ['update_rack', '--rack=ut8', '--building', 'ut'] + self.valid_just_sn
self.noouttest(command)
def test_160_verifyclear(self):
command = ['show_rack', '--rack=ut8']
out = self.commandtest(command)
self.searchclean(out, r'Location Parents: \[.* Room .*\]', command)
self.searchclean(out, r'Location Parents: \[.* Bunker .*\]', command)
def test_170_failchangebuilding(self):
command = ['update_rack', '--rack=ut8', '--room=np-lab1'] + self.valid_just_sn
out = self.badrequesttest(command)
self.matchoutput(out,
"Cannot change buildings. Room np-lab1 is in "
"Building np while Rack ut8 is in Building ut.",
command)
# Was row a column 3
def test_180_updateut3(self):
self.noouttest(["update", "rack", "--rack", "ut3", "--row", "b"])
# Was row g column 2
def test_185_updateut8(self):
self.justificationmissingtest(["update", "rack", "--rack", "ut8", "--column", "8"],
auth=True, msgcheck=False)
# Was row g column 2
def test_186_updateut8(self):
self.emergencynojustification(["update", "rack", "--rack", "ut8", "--column", "8"] + self.emergency_just_with_reason)
# Was row g column 3
def test_190_updateut9(self):
self.noouttest(["update", "rack", "--rack", "ut9", "--row", "h",
"--column", "9", "--fullname", "My Rack",
"--comments", "New rack comments"] + self.valid_just_sn)
def test_195_verifyut9(self):
command = "show rack --rack ut9"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Rack: ut9", command)
self.matchoutput(out, "Fullname: My Rack", command)
self.matchoutput(out, "Row: h", command)
self.matchoutput(out, "Column: 9", command)
self.matchoutput(out, "Comments: New rack comments", command)
def test_200_defaultdns(self):
command = ["update", "rack", "--rack", "ut9",
"--default_dns_domain", "aqd-unittest.ms.com"]
self.noouttest(command)
def test_210_verify_defaultdns(self):
command = ["show", "rack", "--rack", "ut9"]
out = self.commandtest(command)
self.matchoutput(out, "Default DNS Domain: aqd-unittest.ms.com",
command)
def test_220_clear_defaultdns(self):
command = ["update", "rack", "--rack", "ut9",
"--default_dns_domain", ""]
self.noouttest(command)
def test_230_verify_defaultdns_gone(self):
command = ["show", "rack", "--rack", "ut9"]
out = self.commandtest(command)
self.matchclean(out, "Default DNS", command)
def test_235_search_fullname(self):
command = ["search", "rack", "--fullname", "My Other Rack"]
out = self.commandtest(command)
self.matchoutput(out, "np997", command)
self.matchoutput(out, "np998", command)
def test_240_dsdb_fail(self):
command = ["update", "rack", "--rack", "oy604",
"--comments", "Important comments"]
out, err = self.successtest(command)
self.matchoutput(err, "Rack oy604 update in DSDB failed!", command)
self.matchoutput(err, "Update rack oy604 in DSDB failed, proceeding in AQDB.", command)
def test_245_dsdb_fail_verify(self):
command = ["show", "rack", "--rack", "oy604"]
out = self.commandtest(command)
self.matchoutput(out, "Comments: Important comments", command)
def test_250_verifyshowallcsv(self):
command = "show rack --all --format=csv"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "rack,ut3,bunker,zebrabucket.ut,b,3,,ut3", command)
self.matchoutput(out, "rack,ut8,building,ut,g,8", command)
self.matchoutput(out, "rack,ut9,bunker,bucket2.ut,h,9", command)
self.matchoutput(out, "rack,np997,building,np,xx,77", command)
self.matchoutput(out, "rack,np998,building,np,vv,66", command)
self.matchoutput(out, "rack,np999,building,np,zz,a", command)
def test_255_verifyut3plenary(self):
command = "cat --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"rack/name" = "ut3";', command)
self.matchoutput(out, '"rack/row" = "b";', command)
self.matchoutput(out, '"rack/column" = "3";', command)
def test_300_verifyut8plenary(self):
command = "cat --machine ut8s02p1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"rack/name" = "ut8";', command)
self.matchoutput(out, '"rack/row" = "g";', command)
self.matchoutput(out, '"rack/column" = "8";', command)
def test_355_verifyut9plenary(self):
command = "cat --machine ut9s03p1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"rack/name" = "ut9";', command)
self.matchoutput(out, '"rack/row" = "h";', command)
self.matchoutput(out, '"rack/column" = "9";', command)
self.matchoutput(out, '"sysloc/room" = "utroom2";', command)
self.matchoutput(out, '"sysloc/bunker" = "bucket2.ut";', command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateRack)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 | 4,033,490,638,004,429,000 | 41.77381 | 125 | 0.593376 | false |
team-vigir/vigir_behaviors | behaviors/vigir_behavior_grasp_object/src/vigir_behavior_grasp_object/grasp_object_sm.py | 1 | 16228 | #!/usr/bin/env python
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
import roslib; roslib.load_manifest('vigir_behavior_grasp_object')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, Logger
from flexbe_states.decision_state import DecisionState
from vigir_flexbe_states.get_template_grasp_state import GetTemplateGraspState
from flexbe_states.calculation_state import CalculationState
from vigir_flexbe_states.plan_endeffector_cartesian_waypoints_state import PlanEndeffectorCartesianWaypointsState
from vigir_flexbe_states.execute_trajectory_msg_state import ExecuteTrajectoryMsgState
from flexbe_states.log_state import LogState
from flexbe_states.operator_decision_state import OperatorDecisionState
from vigir_flexbe_states.get_template_finger_config_state import GetTemplateFingerConfigState
from vigir_flexbe_states.hand_trajectory_state import HandTrajectoryState
from vigir_flexbe_states.finger_configuration_state import FingerConfigurationState
from flexbe_states.input_state import InputState
from vigir_flexbe_states.get_template_pregrasp_state import GetTemplatePregraspState
from vigir_flexbe_states.plan_endeffector_pose_state import PlanEndeffectorPoseState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
import rospy
from moveit_msgs.msg import Grasp, GripperTranslation
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion, Vector3Stamped, Vector3
from std_msgs.msg import Header
# [/MANUAL_IMPORT]
'''
Created on Fri Feb 27 2015
@author: Philipp Schillinger
'''
class GraspObjectSM(Behavior):
'''
Behavior to perform grasping. Robot has to be in range of the object.
'''
def __init__(self):
super(GraspObjectSM, self).__init__()
self.name = 'Grasp Object'
# parameters of this behavior
self.add_parameter('hand_type', 'robotiq')
self.add_parameter('hand_side', 'left')
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
# O 866 352 /Go_to_Grasp
# Need to adjust pregrasp pose for new grasp.
# O 452 33
# Close fingers to avoid collisions and reduce risk of damage when colliding
# O 496 391 /Perform_Grasp
# Go back to pregrasp pose first
# O 699 12 /Go_to_Grasp
# Allow operator to precisely align the template.
def create(self):
arm_controller = ExecuteTrajectoryMsgState.CONTROLLER_LEFT_ARM if self.hand_side == 'left' else ExecuteTrajectoryMsgState.CONTROLLER_RIGHT_ARM
# x:1033 y:440, x:359 y:535
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['hand_side', 'template_id'])
_state_machine.userdata.hand_side = self.hand_side
_state_machine.userdata.grasp_preference = 0
_state_machine.userdata.template_id = None # provide or request
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:819 y:235, x:305 y:73
_sm_go_to_pregrasp_0 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['hand_side', 'grasp_preference', 'template_id'], output_keys=['grasp_preference', 'pregrasp_pose'])
with _sm_go_to_pregrasp_0:
# x:27 y:68
OperatableStateMachine.add('Get_Pregrasp_Info',
GetTemplatePregraspState(),
transitions={'done': 'Plan_To_Pregrasp_Pose', 'failed': 'failed', 'not_available': 'Inform_Pregrasp_Failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.High, 'not_available': Autonomy.High},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'preference': 'grasp_preference', 'pre_grasp': 'pregrasp_pose'})
# x:269 y:153
OperatableStateMachine.add('Inform_Pregrasp_Failed',
LogState(text="No grasp choice left!", severity=Logger.REPORT_WARN),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Low})
# x:537 y:228
OperatableStateMachine.add('Move_To_Pregrasp_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'finished', 'failed': 'Decide_Which_Pregrasp'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.High},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:25 y:328
OperatableStateMachine.add('Increase_Preference_Index',
CalculationState(calculation=lambda x: x + 1),
transitions={'done': 'Get_Pregrasp_Info'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_preference', 'output_value': 'grasp_preference'})
# x:266 y:228
OperatableStateMachine.add('Plan_To_Pregrasp_Pose',
PlanEndeffectorPoseState(ignore_collisions=False, include_torso=False, allowed_collisions=[], planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Pregrasp_Pose', 'failed': 'Decide_Which_Pregrasp'},
autonomy={'planned': Autonomy.Low, 'failed': Autonomy.High},
remapping={'target_pose': 'pregrasp_pose', 'hand': 'hand_side', 'joint_trajectory': 'joint_trajectory'})
# x:266 y:327
OperatableStateMachine.add('Decide_Which_Pregrasp',
OperatorDecisionState(outcomes=["same", "next"], hint='Try the same pregrasp or the next one?', suggestion='same'),
transitions={'same': 'Get_Pregrasp_Info', 'next': 'Increase_Preference_Index'},
autonomy={'same': Autonomy.High, 'next': Autonomy.High})
# x:110 y:343, x:533 y:169, x:728 y:40
_sm_perform_grasp_1 = OperatableStateMachine(outcomes=['finished', 'failed', 'next'], input_keys=['hand_side', 'grasp_preference', 'template_id', 'pregrasp_pose'], output_keys=['grasp_preference'])
with _sm_perform_grasp_1:
# x:68 y:76
OperatableStateMachine.add('Get_Finger_Configuration',
GetTemplateFingerConfigState(),
transitions={'done': 'Close_Fingers', 'failed': 'failed', 'not_available': 'Inform_Closing_Failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.High, 'not_available': Autonomy.High},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'preference': 'grasp_preference', 'finger_config': 'finger_config'})
# x:293 y:328
OperatableStateMachine.add('Convert_Waypoints',
CalculationState(calculation=lambda msg: [msg.pose]),
transitions={'done': 'Plan_Back_To_Pregrasp'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'pregrasp_pose', 'output_value': 'pregrasp_waypoints'})
# x:496 y:328
OperatableStateMachine.add('Plan_Back_To_Pregrasp',
PlanEndeffectorCartesianWaypointsState(ignore_collisions=True, include_torso=False, keep_endeffector_orientation=False, allow_incomplete_plans=True, vel_scaling=0.1, planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_Back_To_Pregrasp_Pose', 'incomplete': 'Move_Back_To_Pregrasp_Pose', 'failed': 'failed'},
autonomy={'planned': Autonomy.High, 'incomplete': Autonomy.High, 'failed': Autonomy.Low},
remapping={'waypoints': 'pregrasp_waypoints', 'hand': 'hand_side', 'frame_id': 'pregrasp_frame_id', 'joint_trajectory': 'joint_trajectory', 'plan_fraction': 'plan_fraction'})
# x:662 y:228
OperatableStateMachine.add('Move_Back_To_Pregrasp_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'Increase_Preference_Index', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Low},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:296 y:228
OperatableStateMachine.add('Extract_Frame_Id',
CalculationState(calculation=lambda pose: pose.header.frame_id),
transitions={'done': 'Convert_Waypoints'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'pregrasp_pose', 'output_value': 'pregrasp_frame_id'})
# x:673 y:128
OperatableStateMachine.add('Increase_Preference_Index',
CalculationState(calculation=lambda x: x + 1),
transitions={'done': 'next'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_preference', 'output_value': 'grasp_preference'})
# x:81 y:228
OperatableStateMachine.add('Close_Fingers',
HandTrajectoryState(hand_type=self.hand_type),
transitions={'done': 'finished', 'failed': 'Extract_Frame_Id'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.High},
remapping={'finger_trajectory': 'finger_config', 'hand_side': 'hand_side'})
# x:490 y:75
OperatableStateMachine.add('Inform_Closing_Failed',
LogState(text="No grasp choice left!", severity=Logger.REPORT_WARN),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:1041 y:56, x:257 y:85, x:1035 y:196
_sm_go_to_grasp_2 = OperatableStateMachine(outcomes=['finished', 'failed', 'again'], input_keys=['hand_side', 'grasp_preference', 'template_id'], output_keys=['grasp_preference'])
with _sm_go_to_grasp_2:
# x:33 y:49
OperatableStateMachine.add('Get_Grasp_Info',
GetTemplateGraspState(),
transitions={'done': 'Extract_Frame_Id', 'failed': 'failed', 'not_available': 'Inform_Grasp_Failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Low, 'not_available': Autonomy.Low},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'preference': 'grasp_preference', 'grasp': 'grasp_pose'})
# x:40 y:293
OperatableStateMachine.add('Convert_Waypoints',
CalculationState(calculation=lambda msg: [msg.pose]),
transitions={'done': 'Plan_To_Grasp'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_pose', 'output_value': 'grasp_waypoints'})
# x:242 y:292
OperatableStateMachine.add('Plan_To_Grasp',
PlanEndeffectorCartesianWaypointsState(ignore_collisions=True, include_torso=False, keep_endeffector_orientation=False, allow_incomplete_plans=True, vel_scaling=0.1, planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Grasp_Pose', 'incomplete': 'Move_To_Grasp_Pose', 'failed': 'Decide_Which_Grasp'},
autonomy={'planned': Autonomy.Low, 'incomplete': Autonomy.High, 'failed': Autonomy.High},
remapping={'waypoints': 'grasp_waypoints', 'hand': 'hand_side', 'frame_id': 'grasp_frame_id', 'joint_trajectory': 'joint_trajectory', 'plan_fraction': 'plan_fraction'})
# x:494 y:175
OperatableStateMachine.add('Move_To_Grasp_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'Optional_Template_Adjustment', 'failed': 'Decide_Which_Grasp'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Low},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:226 y:177
OperatableStateMachine.add('Inform_Grasp_Failed',
LogState(text="No grasp choice left!", severity=Logger.REPORT_WARN),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:970 y:294
OperatableStateMachine.add('Increase_Preference_Index',
CalculationState(calculation=lambda x: x + 1),
transitions={'done': 'again'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_preference', 'output_value': 'grasp_preference'})
# x:41 y:178
OperatableStateMachine.add('Extract_Frame_Id',
CalculationState(calculation=lambda pose: pose.header.frame_id),
transitions={'done': 'Convert_Waypoints'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_pose', 'output_value': 'grasp_frame_id'})
# x:727 y:50
OperatableStateMachine.add('Optional_Template_Adjustment',
OperatorDecisionState(outcomes=["grasp", "pregrasp", "skip"], hint="Consider adjusting the template's pose", suggestion="skip"),
transitions={'grasp': 'Get_Grasp_Info', 'pregrasp': 'again', 'skip': 'finished'},
autonomy={'grasp': Autonomy.Full, 'pregrasp': Autonomy.Full, 'skip': Autonomy.High})
# x:754 y:294
OperatableStateMachine.add('Decide_Which_Grasp',
OperatorDecisionState(outcomes=["same", "next"], hint='Try the same grasp or the next one?', suggestion='same'),
transitions={'same': 'Optional_Template_Adjustment', 'next': 'Increase_Preference_Index'},
autonomy={'same': Autonomy.High, 'next': Autonomy.High})
with _state_machine:
# x:24 y:78
OperatableStateMachine.add('Decide_Request_Template',
DecisionState(outcomes=['request', 'continue'], conditions=lambda x: 'continue' if x is not None else 'request'),
transitions={'request': 'Request_Template', 'continue': 'Go_to_Pregrasp'},
autonomy={'request': Autonomy.Low, 'continue': Autonomy.Off},
remapping={'input_value': 'template_id'})
# x:849 y:224
OperatableStateMachine.add('Go_to_Grasp',
_sm_go_to_grasp_2,
transitions={'finished': 'Perform_Grasp', 'failed': 'Grasp_Manually', 'again': 'Close_Fingers'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'again': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'grasp_preference': 'grasp_preference', 'template_id': 'template_id'})
# x:845 y:81
OperatableStateMachine.add('Perform_Grasp',
_sm_perform_grasp_1,
transitions={'finished': 'finished', 'failed': 'Grasp_Manually', 'next': 'Close_Fingers'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'next': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'grasp_preference': 'grasp_preference', 'template_id': 'template_id', 'pregrasp_pose': 'pregrasp_pose'})
# x:506 y:228
OperatableStateMachine.add('Open_Fingers',
FingerConfigurationState(hand_type=self.hand_type, configuration=0.0),
transitions={'done': 'Go_to_Grasp', 'failed': 'Grasp_Manually'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.High},
remapping={'hand_side': 'hand_side'})
# x:506 y:84
OperatableStateMachine.add('Close_Fingers',
FingerConfigurationState(hand_type=self.hand_type, configuration=1.0),
transitions={'done': 'Go_to_Pregrasp', 'failed': 'Grasp_Manually'},
autonomy={'done': Autonomy.High, 'failed': Autonomy.High},
remapping={'hand_side': 'hand_side'})
# x:324 y:428
OperatableStateMachine.add('Grasp_Manually',
OperatorDecisionState(outcomes=["fingers_closed", "abort"], hint="Grasp the object manually, continue when fingers are closed.", suggestion=None),
transitions={'fingers_closed': 'finished', 'abort': 'failed'},
autonomy={'fingers_closed': Autonomy.Full, 'abort': Autonomy.Full})
# x:34 y:178
OperatableStateMachine.add('Request_Template',
InputState(request=InputState.SELECTED_OBJECT_ID, message="Specify target template"),
transitions={'received': 'Go_to_Pregrasp', 'aborted': 'Grasp_Manually', 'no_connection': 'Grasp_Manually', 'data_error': 'Grasp_Manually'},
autonomy={'received': Autonomy.Low, 'aborted': Autonomy.High, 'no_connection': Autonomy.Low, 'data_error': Autonomy.Low},
remapping={'data': 'template_id'})
# x:236 y:122
OperatableStateMachine.add('Go_to_Pregrasp',
_sm_go_to_pregrasp_0,
transitions={'finished': 'Open_Fingers', 'failed': 'Grasp_Manually'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'grasp_preference': 'grasp_preference', 'template_id': 'template_id', 'pregrasp_pose': 'pregrasp_pose'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| bsd-3-clause | 8,565,125,616,860,684,000 | 49.397516 | 215 | 0.670816 | false |
sam-tsai/django-old | django/test/utils.py | 1 | 3206 | import sys, time, os
from django.conf import settings
from django.core import mail
from django.core.mail.backends import locmem
from django.db import DEFAULT_DB_ALIAS
from django.test import signals
from django.template import Template
from django.utils.translation import deactivate
from django.utils.unittest import skipIf
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, basestring):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
signals.template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template.original_render = Template._render
Template._render = instrumented_test_render
mail.original_SMTPConnection = mail.SMTPConnection
mail.SMTPConnection = locmem.EmailBackend
mail.original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template.original_render
del Template.original_render
mail.SMTPConnection = mail.original_SMTPConnection
del mail.original_SMTPConnection
settings.EMAIL_BACKEND = mail.original_email_backend
del mail.original_email_backend
del mail.outbox
def get_runner(settings):
test_path = settings.TEST_RUNNER.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
def skipIfDBEngine(engine, reason=None):
"""
Decorator to skip tests on a given database engine.
Note that you can pass a single engine or an iterable here
"""
if not reason:
reason = "not supported on this database"
settings_engine = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']
if isinstance(engine, basestring):
return skipIf(settings_engine == engine, reason)
return skipIf(settings_engine in engine, reason)
| bsd-3-clause | 2,068,668,929,728,630,300 | 31.06 | 79 | 0.682782 | false |
piotrmaslanka/vanad | interfaces/vanad.py | 1 | 7505 | from __future__ import division
from socket import socket, AF_INET, SOCK_STREAM
from time import time
from select import select
from struct import pack, unpack
from threading import Lock
RQT_GET = 0x00
RQT_ASSIGN = 0x01
RQT_DELETE = 0x02
def REQ_to_bytes(request, tablespace, key, value):
return pack('!BBLL', request, tablespace, len(key), len(value)) + key + value
def GET_to_bytes(tablespace, key):
return REQ_to_bytes(RQT_GET, tablespace, key, '')
def ASSIGN_to_bytes(tablespace, key, value):
return REQ_to_bytes(RQT_ASSIGN, tablespace, key, value)
def DELETE_to_bytes(tablespace, key):
return REQ_to_bytes(RQT_DELETE, tablespace, key, '')
def scan_frame(frame):
"""
Scans a Vanad server reply frame, and asserts if this could be a frame.
If this cannot be a valid frame, it will raise an exception of
undefined type and arguments.
Will return values if this is a valid frame
@return: tuple (int resultcode, bytearray data)
"""
# Unzip the header. Will throw if not sufficient bytes there
resultcode, data_len = unpack('!BL', str(frame[:5]))
# Check if frame is OK with length, if not - throw Exception
if len(frame) != 5 + data_len: raise Exception
# Extract data and rest of the data
data = frame[5:5+data_len]
return resultcode, data
class VanadConnection(object):
"""
Class that represents a connection to a Vanad database
Will autoreconnect upon detecting socket lossage and repeat the query, as needed
Will behave smoothly even if user orders a query in the middle of database's
restart.
Will connect only if there'a need to do so.
If database is reliably down for longer periods of time, this WILL HANG!
"""
def __init__(self, address, connect_timeout=4, txrx_timeout=4, eo_timeout=8):
"""
Connect to a remote database.
@type address: tuple of (str address, int port)
@param address: SOCK_STREAM-compatible address of target database
@type connect_timeout: int
@param connect_timeout: timeout in seconds that will be used during
connecting to database
@type txrx_timeout: int
@param txrx_timeout: timeout for send/recv operations
@type eo_timeout: dont-care
@param eo_timeout: supported for legacy applications. dont-care.
"""
self.lock = Lock()
self.connect_timeout = connect_timeout
self.txrx_timeout = txrx_timeout
self.remote_address = address
self.connected = False
self.last_activity = 0 # an int with time() of last activity
self.socket = None # a socket.socket object will be here
self.default_tablespace = 0 # default tablespace
def __shut_sock(self):
try:
self.socket.close()
except:
pass
self.socket = None
self.connected = False
def __ensure_connected(self, force_reconnect=False):
"""PRIVATE METHOD.
Ensured that connection to database is on.
If it isn't, it will make it so.
If it can't be done, it will hang."""
if time() - self.last_activity > 3: # Connection down
self.__shut_sock()
while (not self.connected) or force_reconnect: # Assure that you are connected
# we don't close our sockets here, because closing a socket might take a while
# we just plainly discard it. Mail me if you got a better idea.
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.settimeout(self.connect_timeout)
try:
self.socket.connect(self.remote_address)
except: # timeout or active denial
try:
self.socket.close()
except:
pass
self.socket = None
else:
self.connected = True
self.last_activity = time()
self.socket.settimeout(self.txrx_timeout)
def set_default_tablespace(self, id):
"""
Sets a new tablespace as default one
@type id: int in (0..255)
@param id: number of new default tablespace
"""
self.default_tablespace = id
def __transact(self, to_send):
"""
Transacts with the database. Will return value that got returned.
Will raise exception if it could not be completed, and should be retried.
"""
# Send now
self.socket.sendall(to_send)
# Now, wait for reception
recvdata = bytearray()
while True:
k = self.socket.recv(1024)
if len(k) == 0: raise Exception # server closed connection
recvdata.extend(k)
try:
result, value = scan_frame(recvdata)
except: # Frame not ready yet
pass
else: # Frame completed
break
self.last_activity = time() # Note the activity
if result == 0x01: return None # Not found for GET's
if len(value) == 0: return None # None and empty string have same meaning
return value
def get(self, key, tablespace=None):
"""
Fetches a record from database.
@type key: str
@param key: Key to fetch with
@type tablespace: int in (0..255), or None
@param tablespace: number of tablespace to fetch from. If None,
default tablespace will be used
"""
self.lock.acquire()
if tablespace == None: tablespace = self.default_tablespace
self.__ensure_connected()
while True:
try:
f = self.__transact(GET_to_bytes(tablespace, key))
self.lock.release()
return f
except:
self.__ensure_connected(force_reconnect=True)
def assign(self, key, value, tablespace=None):
"""
Writes a record to database
@type key: str
@param key: Key to write
@type value: str
@param value: Value to write
@type tablespace: int in (0..255), or None
@param tablespace: number of tablespace to write to. If None,
default tablespace will be used
"""
self.lock.acquire()
if tablespace == None: tablespace = self.default_tablespace
self.__ensure_connected()
while True:
try:
self.__transact(ASSIGN_to_bytes(tablespace, key, value))
self.lock.release()
return
except:
self.__ensure_connected(force_reconnect=True)
def delete(self, key, tablespace=None):
"""
Deletes a record from database
@type key: str
@param key: Key to delete
@type tablespace: int in (0..255), or None
@param tablespace: number of tablespace to write to. If None,
default tablespace will be used
"""
self.lock.acquire()
if tablespace == None: tablespace = self.default_tablespace
self.__ensure_connected()
while True:
try:
self.__transact(DELETE_to_bytes(tablespace, key))
self.lock.release()
return
except:
self.__ensure_connected(force_reconnect=True)
| gpl-3.0 | -2,702,130,170,785,214,500 | 31.489177 | 90 | 0.582678 | false |
fboender/miniorganizer | src/lib/kiwi/db/sqlalch.py | 1 | 6532 | ##
## Copyright (C) 2007 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Ali Afshar <[email protected]>
## Johan Dahlin <[email protected]>
##
"""
SQLAlchemy integration for Kiwi
"""
from sqlalchemy import and_, or_
from kiwi.db.query import NumberQueryState, StringQueryState, \
DateQueryState, DateIntervalQueryState, QueryExecuter, \
NumberIntervalQueryState
from kiwi.interfaces import ISearchFilter
class SQLAlchemyQueryExecuter(QueryExecuter):
def __init__(self, session):
QueryExecuter.__init__(self)
self.session = session
self.table = None
self._query_callbacks = []
self._filter_query_callbacks = {}
self._query = self._default_query
self._full_text_indexes = {}
#
# Public API
#
def set_table(self, table):
"""
Sets the SQLObject table/object for this executer
@param table: a SQLObject subclass
"""
self.table = table
def add_query_callback(self, callback):
"""
Adds a generic query callback
@param callback: a callable
"""
if not callable(callback):
raise TypeError
self._query_callbacks.append(callback)
def add_filter_query_callback(self, search_filter, callback):
"""
Adds a query callback for the filter search_filter
@param search_filter: a search filter
@param callback: a callable
"""
if not ISearchFilter.providedBy(search_filter):
raise TypeError
if not callable(callback):
raise TypeError
l = self._filter_query_callbacks.setdefault(search_filter, [])
l.append(callback)
def set_query(self, callback):
"""
Overrides the default query mechanism.
@param callback: a callable which till take two arguments:
(query, connection)
"""
if callback is None:
callback = self._default_query
elif not callable(callback):
raise TypeError
self._query = callback
#
# QueryBuilder
#
def search(self, states):
"""
Execute a search.
@param states:
"""
if self.table is None:
raise ValueError("table cannot be None")
table = self.table
queries = []
for state in states:
search_filter = state.filter
assert state.filter
# Column query
if search_filter in self._columns:
query = self._construct_state_query(
table, state, self._columns[search_filter])
if query:
queries.append(query)
# Custom per filter/state query.
elif search_filter in self._filter_query_callbacks:
for callback in self._filter_query_callbacks[search_filter]:
query = callback(state)
if query:
queries.append(query)
else:
if (self._query == self._default_query and
not self._query_callbacks):
raise ValueError(
"You need to add a search column or a query callback "
"for filter %s" % (search_filter))
for callback in self._query_callbacks:
query = callback(states)
if query:
queries.append(query)
if queries:
query = and_(*queries)
else:
query = None
result = self._query(query)
return result
#
# Private
#
def _default_query(self, query):
return self.session.query(self.table).select(query)
def _construct_state_query(self, table, state, columns):
queries = []
for column in columns:
query = None
table_field = getattr(table.c, column)
if isinstance(state, NumberQueryState):
query = self._parse_number_state(state, table_field)
elif isinstance(state, NumberIntervalQueryState):
query = self._parse_number_interval_state(state, table_field)
elif isinstance(state, StringQueryState):
query = self._parse_string_state(state, table_field)
elif isinstance(state, DateQueryState):
query = self._parse_date_state(state, table_field)
elif isinstance(state, DateIntervalQueryState):
query = self._parse_date_interval_state(state, table_field)
else:
raise NotImplementedError(state.__class__.__name__)
if query:
queries.append(query)
if queries:
return or_(*queries)
def _parse_number_state(self, state, table_field):
if state.value is not None:
return table_field == state.value
def _parse_number_interval_state(self, state, table_field):
queries = []
if state.start:
queries.append(table_field >= state.start)
if state.end:
queries.append(table_field <= state.end)
if queries:
return and_(*queries)
def _parse_string_state(self, state, table_field):
if state.text is not None:
text = '%%%s%%' % state.text.lower()
return table_field.like(text)
def _parse_date_state(self, state, table_field):
if state.date:
return table_field == state.date
def _parse_date_interval_state(self, state, table_field):
queries = []
if state.start:
queries.append(table_field >= state.start)
if state.end:
queries.append(table_field <= state.end)
if queries:
return and_(*queries)
| gpl-3.0 | -430,581,801,005,307,300 | 31.497512 | 78 | 0.58068 | false |
arthurdejong/python-pskc | pskc/exceptions.py | 1 | 1535 | # exceptions.py - collection of pskc exceptions
# coding: utf-8
#
# Copyright (C) 2014 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Collection of exceptions."""
class PSKCError(Exception):
"""General top-level exception."""
pass
class ParseError(PSKCError):
"""Something went wrong with parsing the PSKC file.
Either the file is invalid XML or required elements or attributes are
missing.
"""
pass
class EncryptionError(PSKCError):
"""There was a problem encrypting the value."""
pass
class DecryptionError(PSKCError):
"""There was a problem decrypting the value.
The encrypted value as available but something went wrong with decrypting
it.
"""
pass
class KeyDerivationError(PSKCError):
"""There was a problem performing the key derivation."""
pass
| lgpl-2.1 | 9,194,385,363,497,306,000 | 25.016949 | 77 | 0.72899 | false |
beiko-lab/gengis | bin/Lib/site-packages/numpy/lib/tests/test_ufunclike.py | 1 | 1984 | from numpy.testing import *
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing.decorators import deprecated
class TestUfunclike(TestCase):
def test_isposinf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
out = nx.zeros(a.shape, bool)
tgt = nx.array([True, False, False, False, False, False])
res = ufl.isposinf(a)
assert_equal(res, tgt)
res = ufl.isposinf(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_isneginf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
out = nx.zeros(a.shape, bool)
tgt = nx.array([False, True, False, False, False, False])
res = ufl.isneginf(a)
assert_equal(res, tgt)
res = ufl.isneginf(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_fix(self):
a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
out = nx.zeros(a.shape, float)
tgt = nx.array([[ 1., 1., 1., 1.], [-1., -1., -1., -1.]])
res = ufl.fix(a)
assert_equal(res, tgt)
res = ufl.fix(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
assert_equal(ufl.fix(3.14), 3)
def test_fix_with_subclass(self):
class MyArray(nx.ndarray):
def __new__(cls, data, metadata=None):
res = nx.array(data, copy=True).view(cls)
res.metadata = metadata
return res
def __array_wrap__(self, obj, context=None):
obj.metadata = self.metadata
return obj
a = nx.array([1.1, -1.1])
m = MyArray(a, metadata='foo')
f = ufl.fix(m)
assert_array_equal(f, nx.array([1,-1]))
assert_(isinstance(f, MyArray))
assert_equal(f.metadata, 'foo')
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | 8,864,913,563,870,717,000 | 31.066667 | 70 | 0.513609 | false |
celliern/triflow | triflow/core/simulation.py | 1 | 15719 | #!/usr/bin/env python
# coding=utf8
import inspect
import logging
import pprint
import time
import warnings
from collections import namedtuple
from uuid import uuid1
import pendulum
import streamz
import tqdm
from numpy import isclose
from . import schemes
from ..plugins.container import TriflowContainer
logging.getLogger(__name__).addHandler(logging.NullHandler())
logging = logging.getLogger(__name__)
def is_interactive():
import __main__ as main
return not hasattr(main, '__file__')
tqdm = tqdm.tqdm_notebook if is_interactive() else tqdm.tqdm
class Timer:
def __init__(self, last, total):
self.last = last
self.total = total
def __repr__(self):
repr = """last: {last}
total: {total}"""
return repr.format(last=(pendulum.now()
.subtract(
seconds=self.last)
.diff()),
total=(pendulum.now()
.subtract(
seconds=self.total)
.diff()))
def null_hook(t, fields, pars):
return fields, pars
PostProcess = namedtuple(
"PostProcess", ["name", "function", "description"])
class Simulation(object):
"""High level container used to run simulation build on triflow Model.
This object is an iterable which will yield every time step until the
parameters 'tmax' is reached if provided.
By default, the solver use a 6th order ROW solver, an implicit method
with integrated time-stepping.
Parameters
----------
model : triflow.Model
Contain finite difference approximation and routine of the dynamical
system
fields : triflow.BaseFields or dict (any mappable)
triflow container or mappable filled with initial conditions
parameters : dict
physical parameters of the simulation
dt : float
time stepping for output. if time_stepping is False, the internal
time stepping will be the same.
t : float, optional, default 0.
initial time
tmax : float, optional, default None
Control the end of the simulation. If None (the default), the com-
putation will continue until interrupted by the user (using Ctrl-C
or a SIGTERM signal).
id : None, optional
Name of the simulation. A 2 word slug will be generated if not
provided.
hook : callable, optional, default null_hook.
Any callable taking the actual time, fields and parameters and
return modified fields and parameters.
Will be called every internal time step and can be used to include
time dependent or conditionnal parameters, boundary conditions...
The default null_hook has no impact on the computation.
scheme : callable, optional, default triflow.schemes.RODASPR
An callable object which take the simulation state and return
the next step.
Its signature is scheme.__call__(fields, t, dt, pars, hook)
and it should return the next time and the updated fields.
It take the model and extra positional and named arguments.
time_stepping : boolean, default True
Indicate if the time step is controlled by an algorithm dependant of
the temporal scheme (see the doc on time stepping for extra info).
**kwargs
extra arguments passed to the scheme.
Attributes
----------
dt : float
output time step
fields : triflow.Fields
triflow container filled with actual data
i : int
actual iteration
id : str
name of the simulation
model : triflow.Model
triflow Model used in the simulation
parameters : dict
physical parameters of the simulation
status : str
status of the simulation, one of the following one:
('created', 'running', 'finished', 'failed')
t : float
actual time
tmax : float or None, default None
stopping time of the simulation. Not stopping if set to None.
Properties
----------
post_processes: list of triflow.core.simulation.PostProcess
contain all the post processing function attached to the simulation.
container: triflow.TriflowContainer
give access to the attached container, if any.
timer: triflow.core.simulation.Timer
return the cpu time of the previous step and the total running time of
the simulation.
stream: streamz.Stream
Streamz starting point, fed by the simulation state after each
time_step. This interface is used for post-processing, saving the data
on disk by the TriflowContainer and display the fields in real-time.
Examples
--------
>>> import numpy as np
>>> import triflow
>>> model = triflow.Model(["k1 * dxxU",
... "k2 * dxxV"],
... ["U", "V"],
... ["k1", "k2"])
>>> x = np.linspace(0, 100, 1000, endpoint=False)
>>> U = np.cos(x * 2 * np.pi / 100)
>>> V = np.sin(x * 2 * np.pi / 100)
>>> fields = model.fields_template(x=x, U=U, V=V)
>>> pars = {'k1': 1, 'k2': 1, 'periodic': True}
>>> simulation = triflow.Simulation(model, fields, pars, dt=5., tmax=50.)
>>> for t, fields in simulation:
... pass
>>> print(t)
50.0
""" # noqa
def __init__(self, model, fields, parameters, dt, t=0, tmax=None,
id=None, hook=null_hook,
scheme=schemes.RODASPR,
time_stepping=True, **kwargs):
def intersection_kwargs(kwargs, function):
"""Inspect the function signature to identify the relevant keys
in a dictionary of named parameters.
"""
func_signature = inspect.signature(function)
func_parameters = func_signature.parameters
kwargs = {key: value
for key, value
in kwargs.items() if key in func_parameters}
return kwargs
kwargs["time_stepping"] = time_stepping
self.id = str(uuid1())[:6] if not id else id
self.model = model
self.parameters = parameters
self.fields = model.fields_template(**fields)
self.t = t
self.user_dt = self.dt = dt
self.tmax = tmax
self.i = 0
self._stream = streamz.Stream()
self._pprocesses = []
self._scheme = scheme(model,
**intersection_kwargs(kwargs,
scheme.__init__))
if (time_stepping and
self._scheme not in [schemes.RODASPR,
schemes.ROS3PRL,
schemes.ROS3PRw]):
self._scheme = schemes.time_stepping(
self._scheme,
**intersection_kwargs(kwargs,
schemes.time_stepping))
self.status = 'created'
self._total_running = 0
self._last_running = 0
self._created_timestamp = pendulum.now()
self._started_timestamp = None
self._last_timestamp = None
self._actual_timestamp = pendulum.now()
self._hook = hook
self._container = None
self._iterator = self.compute()
def _compute_one_step(self, t, fields, pars):
"""
Compute one step of the simulation, then update the timers.
"""
fields, pars = self._hook(t, fields, pars)
self.dt = (self.tmax - t
if self.tmax and (t + self.dt >= self.tmax)
else self.dt)
before_compute = time.process_time()
t, fields = self._scheme(t, fields, self.dt,
pars, hook=self._hook)
after_compute = time.process_time()
self._last_running = after_compute - before_compute
self._total_running += self._last_running
self._last_timestamp = self._actual_timestamp
self._actual_timestamp = pendulum.now()
return t, fields, pars
def compute(self):
"""Generator which yield the actual state of the system every dt.
Yields
------
tuple : t, fields
Actual time and updated fields container.
"""
fields = self.fields
t = self.t
pars = self.parameters
self._started_timestamp = pendulum.now()
self.stream.emit(self)
try:
while True:
t, fields, pars = self._compute_one_step(t, fields, pars)
self.i += 1
self.t = t
self.fields = fields
self.parameters = pars
for pprocess in self.post_processes:
pprocess.function(self)
self.stream.emit(self)
yield self.t, self.fields
if self.tmax and (isclose(self.t, self.tmax)):
self._end_simulation()
return
except RuntimeError:
self.status = 'failed'
raise
def _end_simulation(self):
if self.container:
self.container.flush()
self.container.merge()
def run(self, progress=True, verbose=False):
"""Compute all steps of the simulation. Be careful: if tmax is not set,
this function will result in an infinit loop.
Returns
-------
(t, fields):
last time and result fields.
"""
total_iter = int((self.tmax // self.user_dt) if self.tmax else None)
log = logging.info if verbose else logging.debug
if progress:
with tqdm(initial=(self.i if self.i < total_iter else total_iter),
total=total_iter) as pbar:
for t, fields in self:
pbar.update(1)
log("%s running: t: %g" % (self.id, t))
try:
return t, fields
except UnboundLocalError:
warnings.warn("Simulation already ended")
for t, fields in self:
log("%s running: t: %g" % (self.id, t))
try:
return t, fields
except UnboundLocalError:
warnings.warn("Simulation already ended")
def __repr__(self):
repr = """{simulation_name:=^30}
created: {created_date}
started: {started_date}
last: {last_date}
time: {t:g}
iteration: {iter:g}
last step: {step_time}
total time: {running_time}
Physical parameters
-------------------
{parameters}
Hook function
-------------
{hook_source}
=========== Model ===========
{model_repr}"""
repr = repr.format(simulation_name=" %s " % self.id,
parameters="\n\t".join(
[("%s:" % key).ljust(12) +
pprint.pformat(value)
for key, value
in self.parameters.items()]),
t=self.t,
iter=self.i,
model_repr=self.model,
hook_source=inspect.getsource(self._hook),
step_time=(None if not self._last_running else
pendulum.now()
.subtract(
seconds=self._last_running)
.diff()),
running_time=(pendulum.now()
.subtract(
seconds=self._total_running)
.diff()),
created_date=(self._created_timestamp
.to_cookie_string()),
started_date=(self._started_timestamp
.to_cookie_string()
if self._started_timestamp
else "None"),
last_date=(self._last_timestamp
.to_cookie_string()
if self._last_timestamp
else "None"))
return repr
def attach_container(self, path=None, save="all",
mode="w", nbuffer=50, force=False):
"""add a Container to the simulation which allows some
persistance to the simulation.
Parameters
----------
path : str or None (default: None)
path for the container. If None (the default), the data lives only
in memory (and are available with `simulation.container`)
mode : str, optional
"a" or "w" (default "w")
save : str, optional
"all" will save every time-step,
"last" will only get the last time step
nbuffer : int, optional
wait until nbuffer data in the Queue before save on disk.
timeout : int, optional
wait until timeout since last flush before save on disk.
force : bool, optional (default False)
if True, remove the target folder if not empty. if False, raise an
error.
"""
self._container = TriflowContainer("%s/%s" % (path, self.id)
if path else None,
save=save,
mode=mode, metadata=self.parameters,
force=force, nbuffer=nbuffer)
self._container.connect(self.stream)
return self._container
@property
def post_processes(self):
return self._pprocesses
@property
def stream(self):
return self._stream
@property
def container(self):
return self._container
@property
def timer(self):
return Timer(self._last_running, self._total_running)
def add_post_process(self, name, post_process, description=""):
"""add a post-process
Parameters
----------
name : str
name of the post-traitment
post_process : callback (function of a class with a __call__ method
or a streamz.Stream).
this callback have to accept the simulation state as parameter
and return the modifield simulation state.
if a streamz.Stream is provided, it will me plugged_in with the
previous streamz (and ultimately to the initial_stream). All these
stream accept and return the simulation state.
description : str, optional, Default is "".
give extra information about the post-processing
"""
self._pprocesses.append(PostProcess(name=name,
function=post_process,
description=description))
self._pprocesses[-1].function(self)
def remove_post_process(self, name):
"""remove a post-process
Parameters
----------
name : str
name of the post-process to remove.
"""
self._pprocesses = [post_process
for post_process in self._pprocesses
if post_process.name != name]
def __iter__(self):
return self.compute()
def __next__(self):
return next(self._iterator)
| gpl-3.0 | 905,709,783,713,381,900 | 34.888128 | 79 | 0.531204 | false |
TiddlySpace/tiddlyspace | test/test_web_http_api.py | 1 | 2736 | """
Run through the socialusers API testing what's there.
Read the TESTS variable as document of
the capabilities of the API.
If you run this test file by itself, instead
of as a test it will produce a list of test
requests and some associated information.
"""
import os
from test.fixtures import make_test_env
from wsgi_intercept import httplib2_intercept
import wsgi_intercept
import httplib2
import yaml
base_url = 'http://0.0.0.0:8080'
TESTS = {}
def setup_module(module):
global TESTS
make_test_env(module)
httplib2_intercept.install()
wsgi_intercept.add_wsgi_intercept('0.0.0.0', 8080, app_fn)
module.http = httplib2.Http()
TESTS = yaml.load(open('../test/httptest.yaml'))
def test_assert_response():
"""
Make sure our assertion tester is valid.
"""
response = {
'status': '200',
'location': 'http://example.com',
}
content = 'Hello World\n'
status = '200'
headers = {
'location': 'http://example.com',
}
expected = ['Hello']
assert_response(response, content, status, headers, expected)
EMPTY_TEST = {
'name': '',
'desc': '',
'method': 'GET',
'url': '',
'status': '200',
'request_headers': {},
'response_headers': {},
'expected': [],
'data': '',
}
def test_the_TESTS():
"""
Run the entire TEST.
"""
for test_data in TESTS:
test = dict(EMPTY_TEST)
test.update(test_data)
yield test['name'], _run_test, test
def _run_test(test):
full_url = base_url + test['url']
if test['method'] == 'GET' or test['method'] == 'DELETE':
response, content = http.request(full_url, method=test['method'], headers=test['request_headers'])
else:
response, content = http.request(full_url, method=test['method'], headers=test['request_headers'],
body=test['data'].encode('UTF-8'))
assert_response(response, content, test['status'], headers=test['response_headers'], expected=test['expected'])
def assert_response(response, content, status, headers=None, expected=None):
if response['status'] == '500': print content
assert response['status'] == '%s' % status, (response, content)
if headers:
for header in headers:
assert response[header] == headers[header]
if expected:
for expect in expected:
assert expect.encode('UTF-8') in content
if __name__ == '__main__':
for test_data in TESTS:
test = dict(EMPTY_TEST)
test.update(test_data)
full_url = base_url + test['url']
print test['name']
print '%s %s' % (test['method'], full_url)
print
| bsd-3-clause | 3,999,731,388,525,409,000 | 25.823529 | 115 | 0.594298 | false |
BBCVisualJournalism/newsspec_7954 | source/data/uow/main.py | 1 | 1390 | import codecs
import os
from collections import OrderedDict
import math
import operator
f = codecs.open("scores.csv","r")
sports = {}
for line in f:
parts = line.strip().split(",")
sports[parts[0]] = parts[1:]
print(parts)
print(sports)
def get_sports_vec():
for k in sports.keys():
if k != "sport":
yield k, sports[k]
def load_test():
f = codecs.open("cw_games_test_cases.csv", "r")
case = 1
test_cases = []
for line in f:
parts = line.strip().split(",")
if case == 1:
case += 1
else:
test_cases.append(parts[0:])
case += 1
return test_cases
def cosim(v1, v2):
dot_product = sum(float(n1) * float(n2) for n1,n2 in zip(v1, v2) )
magnitude1 = math.sqrt (sum(float(n) ** 2 for n in v1))
magnitude2 = math.sqrt (sum(float(n) ** 2 for n in v2))
return dot_product / (magnitude1 * magnitude2)
def score_cases():
out = codecs.open("results.csv","w")
for case in load_test():
scores = {}
for k, vec in get_sports_vec():
scores[k] = cosim(case,vec)
sorted_scores = sorted(scores.items(), key=lambda x: x[1])
sorted_scores.reverse()
out.write("%s,%s,%s\n" % (sorted_scores[0][0], sorted_scores[1][0], sorted_scores[2][0]))
out.close()
score_cases()
| apache-2.0 | 6,265,075,724,225,608,000 | 25.254902 | 97 | 0.546763 | false |
mattdavis90/re-store-it | src/server.py | 1 | 9242 | import logging
import re
import os
import xmlrpclib
from DocXMLRPCServer import DocXMLRPCServer
from DocXMLRPCServer import DocXMLRPCRequestHandler
from mode import Mode
from helper import get_config
class Server(Mode):
def _initialise(self, args):
logging.debug('Starting server mode checks on config file')
config = get_config(args.config_file)
self._clients = {}
self._backup_location = ''
self._port = 9001
if config.has_option('server', 'backup_location'):
self._backup_location = config.get('server', 'backup_location')
if not os.path.isdir(self._backup_location):
logging.warn("Backup location '%s' does not exist, attempting to create it" % self._backup_location)
try:
os.makedirs(self._backup_location)
except:
raise RuntimeError('Could not create the requested backup location')
else:
raise RuntimeError('Backup location not specified in config file')
if not config.has_option('server', 'port'):
logging.warn('No port specified, using 9001')
else:
try:
self._port = int(config.get('server', 'port'))
except:
raise RuntimeError('Server port must be an integer')
for section in config.sections():
if not section == 'server':
logging.debug('Found a client: %s' % section)
if not config.has_option(section, 'artifacts'):
raise RuntimeError('Client sections require an artifacts option')
artifacts_string = config.get(section, 'artifacts')
artifacts = {}
if artifacts_string == '':
raise RuntimeError('Artifacts list cannot be empty')
for artifact in artifacts_string.split(','):
logging.debug('Found an artifact: %s' % artifact)
file_based = True
filename = ''
backup_command = ''
restore_command = ''
cleanup = False
versions = 1
interval = '1h'
if config.has_option(section, artifact + '_filename'):
filename = config.get(section, artifact + '_filename')
else:
raise RuntimeError("Artifacts must have at least a file specified. Error in client '%s'" % section)
if config.has_option(section, artifact + '_backup_command'):
file_based = False
backup_command = config.get(section, artifact + '_backup_command')
if config.has_option(section, artifact + '_restore_command'):
restore_command = config.get(section, artifact + '_restore_command')
else:
raise RuntimeError("A backup command was specified without a restore command. A restore command is required in client '%s', artifact '%s'" % (section, artifact))
if config.has_option(section, artifact + '_cleanup'):
tmp = config.get(section, artifact + '_cleanup')
if tmp.lower() == 'true':
cleanup = True
elif tmp.lower() == 'false':
cleanup = False
else:
raise RuntimeError("Invalid option for cleanup in client '%s', artifact '%s'" % (section, artifact))
if config.has_option(section, artifact + '_versions'):
try:
versions = int(config.get(section, artifact + '_versions'))
except:
raise RuntimeError("Version option must be an integer in client '%s', artifact '%s'" % (section, artifact))
if config.has_option(section, artifact + '_interval'):
interval = config.get(section, artifact + '_interval')
regex = "^(\d+w ?)?(\d+d ?)?(\d+h ?)?(\d+m ?)?(\d+s ?)?$"
if not re.search(regex, interval):
raise RuntimeError("Interval option must in valid timedelta format. e.g. 1w2d3h4m. In client '%s', artifact '%s'" % (section, artifact))
artifacts[artifact] = {
'file_based': file_based,
'filename': filename,
'backup_command': backup_command,
'restore_command': restore_command,
'cleanup': cleanup,
'versions': versions,
'interval': interval
}
self._clients[section] = artifacts
if not len(self._clients) > 0:
raise RuntimeError('No clients specified')
self._server = None
def _add_arguments(self):
self._parser.add_argument('config_file', metavar='CONFIGFILE')
def run(self):
logging.debug('Starting XMLRPC server')
self._server = DocXMLRPCServer(('0.0.0.0', self._port), logRequests=False)
self._server.register_instance(_XMLRPCServer(self._clients, self._backup_location))
self._server.serve_forever()
def stop(self):
logging.debug('Stopping XMLRPC Server')
if self._server != None:
self._server.shutdown()
class _XMLRPCServer(object):
def __init__(self, clients, backup_location):
self._clients = clients
self._backup_location = backup_location
def get_artifacts(self, client):
logging.info('Client %s - Requested artifacts' % client)
return self._clients.get(client, {})
def store_version(self, client, artifact, binary):
if not self._clients.get(client):
raise
no_versions = self._clients[client][artifact]['versions']
backup_path = os.path.join(self._backup_location, client, artifact)
if not os.path.isdir(backup_path):
logging.warn("Client %s - Artifact %s - directory doesn't exist, attempting to create" % (client, artifact))
try:
os.makedirs(backup_path)
except:
logging.error('Client %s - Artifact %s - Could not create directory')
raise RuntimeError('Could not create backup directory for %s artifact %s' % (client, artifact))
backup_file = os.path.join(backup_path, 'version0')
versions = self.get_versions(client, artifact)
if len(versions) >= no_versions:
logging.debug('Client %s - Artifact %s - Removing old version' % (client, artifact))
os.unlink(versions[-1]['filepath'])
del versions[-1]
if len(versions) > 0:
for i in range(len(versions) - 1, -1, -1):
old_name = versions[i]['filename']
new_name = 'version%d' % (int(re.match('^.*(\d+)$', old_name).group(1)) + 1)
old_path = os.path.join(backup_path, old_name)
new_path = os.path.join(backup_path, new_name)
os.rename(old_path, new_path)
with open(backup_file, 'wb') as handle:
handle.write(binary.data)
logging.info('Client %s - Artifact %s - Stored new version' % (client, artifact))
return True
def get_version(self, client, artifact, version):
if not client in self._clients:
raise RuntimeError('Client not found')
if not artifact in self._clients[client]:
raise RuntimeError('Artifact not found')
filename = self._clients[client][artifact]['filename']
restore_command = self._clients[client][artifact]['restore_command']
if not version.startswith('version'):
version = 'version' + version
version_path = os.path.join(self._backup_location, client, artifact, version)
if os.path.isfile(version_path):
logging.info('Client %s - Downloaded artifact %s, %s' % (client, artifact, version))
with open(version_path, 'rb') as handle:
data = xmlrpclib.Binary(handle.read())
return (filename, restore_command, data)
else:
raise RuntimeError('That version doens\'t exist')
def get_versions(self, client, artifact):
logging.info('Client %s - Requested versions for artifact %s' % (client, artifact))
backup_path = os.path.join(self._backup_location, client, artifact)
versions = []
if os.path.isdir(backup_path):
backup_list = os.listdir(backup_path)
for backup in backup_list:
file_path = os.path.join(backup_path, backup)
modified = os.stat(file_path).st_mtime
versions.append({'filename': backup, 'filepath': file_path, 'modified': modified})
versions.sort(key=lambda itm: itm['modified'], reverse=True)
return versions | mit | -7,871,173,547,847,084,000 | 37.836134 | 189 | 0.543822 | false |
gencer/sentry | src/sentry/api/endpoints/group_events.py | 1 | 2698 | from __future__ import absolute_import
import six
from sentry import tagstore
from sentry.api.base import DocSection, EnvironmentMixin
from sentry.api.bases import GroupEndpoint
from sentry.api.serializers import serialize
from sentry.api.paginator import DateTimePaginator
from sentry.models import Environment, Event, Group
from sentry.search.utils import parse_query
from sentry.utils.apidocs import scenario, attach_scenarios
from rest_framework.response import Response
from sentry.search.utils import InvalidQuery
from django.db.models import Q
@scenario('ListAvailableSamples')
def list_available_samples_scenario(runner):
group = Group.objects.filter(project=runner.default_project).first()
runner.request(method='GET', path='/issues/%s/events/' % group.id)
class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):
doc_section = DocSection.EVENTS
@attach_scenarios([list_available_samples_scenario])
def get(self, request, group):
"""
List an Issue's Events
``````````````````````
This endpoint lists an issue's events.
:pparam string issue_id: the ID of the issue to retrieve.
:auth: required
"""
events = Event.objects.filter(
group_id=group.id,
)
query = request.GET.get('query')
if query:
try:
query_kwargs = parse_query(group.project, query, request.user)
except InvalidQuery as exc:
return Response({'detail': six.text_type(exc)}, status=400)
if query_kwargs['query']:
q = Q(message__icontains=query_kwargs['query'])
if len(query) == 32:
q |= Q(event_id__exact=query_kwargs['query'])
events = events.filter(q)
if query_kwargs['tags']:
try:
environment_id = self._get_environment_id_from_request(
request, group.project.organization_id)
except Environment.DoesNotExist:
event_ids = []
else:
event_ids = tagstore.get_group_event_ids(
group.project_id, group.id, environment_id, query_kwargs['tags'])
if event_ids:
events = events.filter(
id__in=event_ids,
)
else:
events = events.none()
return self.paginate(
request=request,
queryset=events,
order_by='-datetime',
on_results=lambda x: serialize(x, request.user),
paginator_cls=DateTimePaginator,
)
| bsd-3-clause | 6,260,924,192,562,631,000 | 31.902439 | 89 | 0.588213 | false |
derekjchow/models | research/object_detection/core/target_assigner.py | 1 | 29856 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow as tf
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_coder as bcoder
from object_detection.core import box_list
from object_detection.core import matcher as mat
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import standard_fields as fields
from object_detection.matchers import argmax_matcher
from object_detection.matchers import bipartite_matcher
from object_detection.utils import shape_utils
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder,
negative_class_weight=1.0):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder: an object_detection.core.BoxCoder used to encode matching
groundtruth boxes with respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if not isinstance(matcher, mat.Matcher):
raise ValueError('matcher must be a Matcher')
if not isinstance(box_coder, bcoder.BoxCoder):
raise ValueError('box_coder must be a BoxCoder')
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
@property
def box_coder(self):
return self._box_coder
# TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields.
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
unmatched_class_label=None,
groundtruth_weights=None):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1. Generally no
groundtruth boxes with zero weight match to any anchors as matchers are
aware of groundtruth weights. Additionally, `cls_weights` and
`reg_weights` are calculated using groundtruth weights as an added
safety.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if unmatched_class_label is None:
unmatched_class_label = tf.constant([0], tf.float32)
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(unmatched_class_label))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
# set scores on the gt boxes
scores = 1 - groundtruth_labels[:, 0]
groundtruth_boxes.add_field(fields.BoxListFields.scores, scores)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(groundtruth_weights, 0))
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
unmatched_class_label,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
# convert cls_weights from per-anchor to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_shape = tf.shape(cls_weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), class_label_shape],
axis=0)
for _ in range(len(cls_targets.get_shape()[1:])):
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(fields.BoxListFields.keypoints):
groundtruth_keypoints = groundtruth_boxes.get_field(
fields.BoxListFields.keypoints)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(fields.BoxListFields.keypoints,
matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels,
unmatched_class_label, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
# TODO(rathodv): This method pulls in all the implementation dependencies into
# core. Therefore its best to have this factory method outside of core.
def create_target_assigner(reference, stage=None,
negative_class_weight=1.0, use_matmul_gather=False):
"""Factory function for creating standard target assigners.
Args:
reference: string referencing the type of TargetAssigner.
stage: string denoting stage: {proposal, detection}.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0)
use_matmul_gather: whether to use matrix multiplication based gather which
are better suited for TPUs.
Returns:
TargetAssigner: desired target assigner.
Raises:
ValueError: if combination reference+stage is invalid.
"""
if reference == 'Multibox' and stage == 'proposal':
similarity_calc = sim_calc.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
elif reference == 'FasterRCNN' and stage == 'proposal':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7,
unmatched_threshold=0.3,
force_match_for_each_row=True,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FasterRCNN' and stage == 'detection':
similarity_calc = sim_calc.IouSimilarity()
# Uses all proposals with IOU < 0.5 as candidate negatives.
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
negatives_lower_than_unmatched=True,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FastRCNN':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.1,
force_match_for_each_row=False,
negatives_lower_than_unmatched=False,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
else:
raise ValueError('No valid combination of reference and stage.')
return TargetAssigner(similarity_calc, matcher, box_coder,
negative_class_weight=negative_class_weight)
def batch_assign_targets(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_targets_batch,
unmatched_class_label=None,
gt_weights_batch=None):
"""Batched assignment of classification and regression targets.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_targets_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_targets_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
for anchors, gt_boxes, gt_class_targets, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch):
(cls_targets, cls_weights,
reg_targets, reg_weights, match) = target_assigner.assign(
anchors, gt_boxes, gt_class_targets, unmatched_class_label, gt_weights)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list)
def batch_assign_confidences(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_confidences_batch,
gt_weights_batch=None,
unmatched_class_label=None,
include_background_class=True,
implicit_class_weight=1.0):
"""Batched assignment of classification and regression targets.
This differences between batch_assign_confidences and batch_assign_targets:
- 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and
tensor (high-dimensional) targets. 'batch_assign_confidences' only support
scalar (agnostic) and vector (multiclass) targets.
- 'batch_assign_targets' assumes the input class tensor using the binary
one/K-hot encoding. 'batch_assign_confidences' takes the class confidence
scores as the input, where 1 means positive classes, 0 means implicit
negative classes, and -1 means explicit negative classes.
- 'batch_assign_confidences' assigns the targets in the similar way as
'batch_assign_targets' except that it gives different weights for implicit
and explicit classes. This allows user to control the negative gradients
pushed differently for implicit and explicit examples during the training.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_confidences_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch. Note that in this tensor, 1 means explicit positive class,
-1 means explicit negative class, and 0 means implicit negative class.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_gt_boxes_i] containing weights for groundtruth boxes.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
include_background_class: whether or not gt_class_confidences_batch includes
the background class.
implicit_class_weight: the weight assigned to implicit examples.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList, or if any element in gt_class_confidences_batch has rank > 2.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_confidences_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_confidences_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_confidences_batch)
for anchors, gt_boxes, gt_class_confidences, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_confidences_batch,
gt_weights_batch):
if (gt_class_confidences is not None and
len(gt_class_confidences.get_shape().as_list()) > 2):
raise ValueError('The shape of the class target is not supported. ',
gt_class_confidences.get_shape())
cls_targets, _, reg_targets, _, match = target_assigner.assign(
anchors, gt_boxes, gt_class_confidences, unmatched_class_label,
groundtruth_weights=gt_weights)
if include_background_class:
cls_targets_without_background = tf.slice(
cls_targets, [0, 1], [-1, -1])
else:
cls_targets_without_background = cls_targets
positive_mask = tf.greater(cls_targets_without_background, 0.0)
negative_mask = tf.less(cls_targets_without_background, 0.0)
explicit_example_mask = tf.logical_or(positive_mask, negative_mask)
positive_anchors = tf.reduce_any(positive_mask, axis=-1)
regression_weights = tf.to_float(positive_anchors)
regression_targets = (
reg_targets * tf.expand_dims(regression_weights, axis=-1))
regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1)
cls_targets_without_background = (
cls_targets_without_background * (1 - tf.to_float(negative_mask)))
cls_weights_without_background = (
(1 - implicit_class_weight) * tf.to_float(explicit_example_mask)
+ implicit_class_weight)
if include_background_class:
cls_weights_background = (
(1 - implicit_class_weight) * regression_weights_expanded
+ implicit_class_weight)
classification_weights = tf.concat(
[cls_weights_background, cls_weights_without_background], axis=-1)
cls_targets_background = 1 - regression_weights_expanded
classification_targets = tf.concat(
[cls_targets_background, cls_targets_without_background], axis=-1)
else:
classification_targets = cls_targets_without_background
classification_weights = cls_weights_without_background
cls_targets_list.append(classification_targets)
cls_weights_list.append(classification_weights)
reg_targets_list.append(regression_targets)
reg_weights_list.append(regression_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list)
| apache-2.0 | -8,006,630,844,042,189,000 | 45.796238 | 80 | 0.675744 | false |
caihaibin/Blog | handlers/blog.py | 1 | 6084 | import datetime
import config
import PyRSS2Gen
from google.appengine.ext import webapp
from models import blog
import view
class IndexHandler(webapp.RequestHandler):
def get(self):
query = blog.Post.all()
query.filter('publish =', True)
query.order('-pub_date')
template_values = {'page_title': 'Home',
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/index.html', template_values)
class PostHandler(webapp.RequestHandler):
def get(self, year, month, day, slug):
year = int(year)
month = int(month)
day = int(day)
# Build the time span to check for the given slug
start_date = datetime.datetime(year, month, day)
time_delta = datetime.timedelta(days=1)
end_date = start_date + time_delta
# Create a query to check for slug uniqueness in the specified time span
query = blog.Post.all()
query.filter('publish =', True)
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.filter('slug = ', slug)
post = query.get()
if post == None:
page = view.Page()
page.render_error(self, 404)
else:
template_values = {
'post': post,
}
page = view.Page()
page.render(self, 'templates/post.html', template_values)
class TagHandler(webapp.RequestHandler):
def get(self, tag):
query = blog.Post.all()
query.filter('publish =', True)
query.filter('tags = ', tag)
query.order('-pub_date')
template_values = {'page_title': 'Posts tagged "%s"' % (tag),
'page_description': 'Posts tagged "%s"' % (tag),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/post_list.html', template_values)
class YearHandler(webapp.RequestHandler):
def get(self, year):
year = int(year)
# Build the time span to check for posts
start_date = datetime.datetime(year, 1, 1)
end_date = datetime.datetime(year + 1, 1, 1)
# Create a query to find posts in the given time span
query = blog.Post.all()
query.filter('publish =', True)
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.order('-pub_date')
template_values = {'page_title': 'Yearly Post Archive: %d' % (year),
'page_description': 'Yearly Post Archive: %d' % (year),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/post_list.html', template_values)
class MonthHandler(webapp.RequestHandler):
def get(self, year, month):
year = int(year)
month = int(month)
# Build the time span to check for posts
start_date = datetime.datetime(year, month, 1)
end_year = year if month < 12 else year + 1
end_month = month + 1 if month < 12 else 1
end_date = datetime.datetime(end_year, end_month, 1)
# Create a query to find posts in the given time span
query = blog.Post.all()
query.filter('publish =', True)
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.order('-pub_date')
month_text = start_date.strftime('%B %Y')
template_values = {'page_title': 'Monthly Post Archive: %s' % (month_text),
'page_description': 'Monthly Post Archive: %s' % (month_text),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/post_list.html', template_values)
class DayHandler(webapp.RequestHandler):
def get(self, year, month, day):
year = int(year)
month = int(month)
day = int(day)
# Build the time span to check for posts
start_date = datetime.datetime(year, month, day)
time_delta = datetime.timedelta(days=1)
end_date = start_date + time_delta
# Create a query to find posts in the given time span
query = blog.Post.all()
query.filter('publish =', True)
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.order('-pub_date')
day_text = start_date.strftime('%x')
template_values = {'page_title': 'Daily Post Archive: %s' % (day_text),
'page_description': 'Daily Post Archive: %s' % (day_text),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/post_list.html', template_values)
class RSS2Handler(webapp.RequestHandler):
def get(self):
query = blog.Post.all()
query.filter('publish =', True)
query.order('-pub_date')
posts = query.fetch(10)
rss_items = []
for post in posts:
item = PyRSS2Gen.RSSItem(title=post.title,
link="%s%s" % (config.SETTINGS['url'], post.get_absolute_url()),
description=post.excerpt_html or post.body_html,
guid=PyRSS2Gen.Guid("%s%s" % (config.SETTINGS['url'], post.get_absolute_url())),
pubDate=post.pub_date
)
rss_items.append(item)
rss = PyRSS2Gen.RSS2(title=config.SETTINGS['title'],
link=config.SETTINGS['url'],
description=config.SETTINGS['description'],
lastBuildDate=datetime.datetime.now(),
items=rss_items
)
rss_xml = rss.to_xml()
self.response.headers['Content-Type'] = 'application/rss+xml'
self.response.out.write(rss_xml)
| mit | -8,702,056,349,484,685,000 | 34.372093 | 117 | 0.545529 | false |
zoho/books-python-wrappers | books/service/ZohoBooks.py | 1 | 7982 | #$Id$#
from books.api.ContactsApi import ContactsApi
from books.api.ContactPersonsApi import ContactPersonsApi
from books.api.EstimatesApi import EstimatesApi
from books.api.InvoicesApi import InvoicesApi
from books.api.RecurringInvoicesApi import RecurringInvoicesApi
from books.api.CreditNotesApi import CreditNotesApi
from books.api.CustomerPaymentsApi import CustomerPaymentsApi
from books.api.ExpensesApi import ExpensesApi
from books.api.RecurringExpensesApi import RecurringExpensesApi
from books.api.BillsApi import BillsApi
from books.api.VendorPaymentsApi import VendorPaymentsApi
from books.api.BankAccountsApi import BankAccountsApi
from books.api.BankTransactionsApi import BankTransactionsApi
from books.api.BankRulesApi import BankRulesApi
from books.api.ChartOfAccountsApi import ChartOfAccountsApi
from books.api.JournalsApi import JournalsApi
from books.api.BaseCurrencyAdjustmentApi import BaseCurrencyAdjustmentApi
from books.api.ProjectsApi import ProjectsApi
from books.api.SettingsApi import SettingsApi
from books.api.ItemsApi import ItemsApi
from books.api.OrganizationsApi import OrganizationsApi
from books.api.UsersApi import UsersApi
class ZohoBooks:
"""
This class is used to create an object for books service and to provide instance for all APIs.
"""
def __init__(self, authtoken, organization_id):
"""Initialize the parameters for Zoho books.
Args:
authtoken(str): User's Authtoken.
organization_id(str): User's Organization id.
"""
self.authtoken=authtoken
self.organization_id=organization_id
def get_contacts_api(self):
"""Get instance for contacts api.
Returns:
instance: Contacts api instance.
"""
contacts_api = ContactsApi(self.authtoken, self.organization_id)
return contacts_api
def get_contact_persons_api(self):
"""Get instance for contact persons api.
Returns:
instance: Contact persons api.
"""
contact_persons_api = ContactPersonsApi(self.authtoken,
self.organization_id)
return contact_persons_api
def get_estimates_api(self):
"""Get instance for estimates api.
Returns:
instance: Estimates api.
"""
estimates_api = EstimatesApi(self.authtoken, self.organization_id)
return estimates_api
def get_invoices_api(self):
"""Get instance for invoice api.
Returns:
instance: Invoice api.
"""
invoices_api = InvoicesApi(self.authtoken, self.organization_id)
return invoices_api
def get_recurring_invoices_api(self):
"""Get instance for recurring invoices api.
Returns:
instance: Recurring invoice api.
"""
recurring_invoices_api = RecurringInvoicesApi(self.authtoken, \
self.organization_id)
return recurring_invoices_api
def get_creditnotes_api(self):
"""Get instance for creditnotes api.
Returns:
instance: Creditnotes api.
"""
creditnotes_api = CreditNotesApi(self.authtoken, self.organization_id)
return creditnotes_api
def get_customer_payments_api(self):
"""Get instance for customer payments api.
Returns:
instance: Customer payments api.
"""
customer_payments_api = CustomerPaymentsApi(self.authtoken,
self.organization_id)
return customer_payments_api
def get_expenses_api(self):
"""Get instance for expenses api.
Returns:
instance: Expenses api.
"""
expenses_api = ExpensesApi(self.authtoken, self.organization_id)
return expenses_api
def get_recurring_expenses_api(self):
"""Get instance for recurring expenses api.
Returns:
instance: Recurring expenses api.
"""
recurring_expenses_api = RecurringExpensesApi(self.authtoken,
self.organization_id)
return recurring_expenses_api
def get_bills_api(self):
"""Get instance for bills api.
Returns:
instance: Bills api
"""
bills_api = BillsApi(self.authtoken, self.organization_id)
return bills_api
def get_vendor_payments_api(self):
"""Get instance for vendor payments api.
Returns:
instance: vendor payments api
"""
vendor_payments_api = VendorPaymentsApi(self.authtoken,
self.organization_id)
return vendor_payments_api
def get_bank_accounts_api(self):
"""Get instancce for bank accounts api.
Returns:
instance: Bank accounts api.
"""
bank_accounts_api = BankAccountsApi(self.authtoken,
self.organization_id)
return bank_accounts_api
def get_bank_transactions_api(self):
"""Get instance for bank transactions api.
Returns:
instance: Bank Transactions api.
"""
bank_transactions_api = BankTransactionsApi(self.authtoken,
self.organization_id)
return bank_transactions_api
def get_bank_rules_api(self):
"""Get instance for bank rules api.
Returns:
instance: Bank rules api.
"""
bank_rules_api = BankRulesApi(self.authtoken, self.organization_id)
return bank_rules_api
def get_chart_of_accounts_api(self):
"""Get instancce for chart of accounts api
Returns:
instance: Chart of accounts api.
"""
chart_of_accounts_api = ChartOfAccountsApi(self.authtoken,
self.organization_id)
return chart_of_accounts_api
def get_journals_api(self):
"""Get instance for journals api.
Returns:
instance: Journals api.
"""
journals_api = JournalsApi(self.authtoken, self.organization_id)
return journals_api
def get_base_currency_adjustment_api(self):
"""Get instance for base currency adjustment api
Returns:
instance: Base currency adjustments api.
"""
base_currency_adjustment_api = BaseCurrencyAdjustmentApi(\
self.authtoken, self.organization_id)
return base_currency_adjustment_api
def get_projects_api(self):
"""Get instance for projects api.
Returns:
instance: Projects api.
"""
projects_api = ProjectsApi(self.authtoken, self.organization_id)
return projects_api
def get_settings_api(self):
"""Get instance for settings api.
Returns:
instance: Settings api.
"""
settings_api = SettingsApi(self.authtoken, self.organization_id)
return settings_api
def get_items_api(self):
"""Get instance for items api.
Returns:
instance: Items api.
"""
items_api = ItemsApi(self.authtoken, self.organization_id)
return items_api
def get_users_api(self):
"""Get instance for users api.
Returns:
instance: Users api.
"""
users_api = UsersApi(self.authtoken, self.organization_id)
return users_api
def get_organizations_api(self):
"""Get instance for organizations api.
Returns:
instance: Organizations api.
"""
organizations_api = OrganizationsApi(self.authtoken, self.organization_id)
return organizations_api
| mit | -840,055,033,120,695,400 | 28.562963 | 98 | 0.609997 | false |
philtgun/horse | horse-welcome.py | 1 | 1475 | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
import requests
def greeting():
"Plays greeting audio via REST API"
r = requests.get("http://localhost:3000/playAudio/welcome.mp3");
def decCount(i):
if i > 0:
i -= 1
T_POLL = 0.5 # sec
T_INSIDE_ACTIVE = 20 # sec
T_WELCOME_DELAY = 2 # sec
T_WELCOME_COOLDOWN = 60 # 1 min
T_DOOR_INACTIVE = 300 # 5 min
PIN_PIR = 15
PIN_DOOR = 14
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_PIR, GPIO.IN)
GPIO.setup(PIN_DOOR, GPIO.IN)
GPIO.setup(PIN_DOOR, GPIO.IN, pull_up_down=GPIO.PUD_UP)
isInBefore = False # nobody
doorOpenBefore = False
countIn = 0
countDoor = 0
print "Starting"
while True:
doorOpen = GPIO.input(PIN_DOOR)
isIn = GPIO.input(PIN_PIR)
print "[DEBUG] doorOpen: {}, isIn: {}, countIn: {}, countDoor {}".format(doorOpen, isIn, countIn, countDoor)
if doorOpen and not doorOpenBefore:
print "Somebody's opened the door"
countDoor = T_INSIDE_ACTIVE / T_POLL
if isIn and not isInBefore:
if countDoor > 0 and countIn == 0:
print "Hello!"
time.sleep(T_WELCOME_DELAY)
greeting()
time.sleep(T_WELCOME_COOLDOWN)
countOut = 0
# countIn = 0 # probably redundant
else:
print "Somebody's leaving! No hello for {} min".format(T_DOOR_INACTIVE / 60)
countIn = T_DOOR_INACTIVE / T_POLL
time.sleep(T_POLL)
isInBefore = isIn
doorOpenBefore = doorOpen
countDoor -= 1 if countDoor > 0 else 0
countIn -= 1 if countIn > 0 else 0
| mit | 4,151,467,977,349,972,500 | 21.348485 | 110 | 0.661695 | false |
jennywoites/MUSSA | MUSSA_Flask/app/API_Rest/GeneradorPlanCarreras/GeneradorPLE/OptimizadorCodigoPulp.py | 1 | 4966 | from app.API_Rest.GeneradorPlanCarreras.Constantes import *
MENOR_IGUAL = 0
MAYOR_IGUAL = 1
def obtener_variables_candidatas(parametros):
variables_candidatas = {}
with open(parametros.nombre_archivo_pulp, 'r') as arch:
for linea in arch:
linea = linea.rstrip('\n')
ecuacion = linea.split("prob += (")
if len(ecuacion) < 2: #No es la linea buscada
continue
menor_igual = ecuacion[1].split(" <= 0")
mayor_igual = ecuacion[1].split(" >= 0")
variable_actual = menor_igual[0] #o mayor igual, es equivalente
acumulados = variables_candidatas.get(variable_actual, [0,0])
acumulados[MENOR_IGUAL] += 1 if len(menor_igual) == 2 else 0
acumulados[MAYOR_IGUAL] += 1 if len(menor_igual) == 2 else 0
variables_candidatas[variable_actual] = acumulados
return variables_candidatas
def obtener_variables_a_eliminar(parametros):
variables_candidatas = obtener_variables_candidatas(parametros)
variables_a_eliminar = []
for candidata in variables_candidatas:
multiples_variables = candidata.split()
if len(multiples_variables) > 1: #Solo me sirve si es una unica variable
continue
acumulados = variables_candidatas[candidata]
if acumulados[MENOR_IGUAL] == acumulados[MAYOR_IGUAL] == 1:
variables_a_eliminar.append(candidata)
return variables_a_eliminar
def reemplazar_todas_las_apariciones(texto, valor_a_reeemplazar, nuevo_valor):
anterior = ""
while (anterior != texto):
anterior = texto
texto = texto.replace(valor_a_reeemplazar, nuevo_valor)
return texto
def define_variable_mayor_a_cero(linea):
inicio = "prob += ("
final = " >= 0)"
linea_aux = linea[len(inicio):]
if final in linea_aux:
linea_aux = linea_aux.replace(final, "")
variable_mayor_a_cero = linea_aux.split()
if len(variable_mayor_a_cero) == 1:
return True
return False
def define_variable_menor_a_infinito(linea):
inicio = "prob += ("
final = " <= 0 + (1 - 0) * {})".format(INFINITO)
linea_aux = linea[len(inicio):]
if final in linea_aux:
linea_aux = linea_aux.replace(final, "")
variable_menor_a_infinito = linea_aux.split()
if len(variable_menor_a_infinito) == 1:
return True
return False
def reemplazar_productos_franjas_por_cero(parametros, linea):
for franja in range(parametros.franja_minima, parametros.franja_maxima +1):
producto = " {} * 0".format(franja)
linea = reemplazar_todas_las_apariciones(linea, producto, " 0")
return linea
def limpiar_linea(parametros, linea, variables_a_eliminar):
for variable in variables_a_eliminar:
if variable not in linea:
continue
if "LpVariable" in linea:
return "" #La linea no se escribe, no es necesario revisar las demas variables
if "arch.write" in linea:
return """ arch.write("{};0" + '\\n')\n""".format(variable)
linea = reemplazar_todas_las_apariciones(linea, variable, "0")
linea = reemplazar_apariciones_suma_cero(linea)
linea = reemplazar_productos_franjas_por_cero(parametros, linea)
linea = reemplazar_apariciones_suma_cero(linea)
return linea
def reemplazar_apariciones_suma_cero(linea):
linea = reemplazar_todas_las_apariciones(linea, "+ 0 ", "")
linea = reemplazar_todas_las_apariciones(linea, "- 0 ", "")
linea = reemplazar_todas_las_apariciones(linea, " 0 + 0 ", "0")
linea = reemplazar_todas_las_apariciones(linea, "(0 + 0)", "0")
linea = reemplazar_todas_las_apariciones(linea, " 0 + 0)", "0)")
return linea
def limpiar_archivo(parametros, variables_a_eliminar, arch, arch_optimizado):
for linea in arch:
linea = limpiar_linea(parametros, linea, variables_a_eliminar)
if not linea:
continue
if linea == "prob += (0 <= 0)\n" or linea == "prob += (0 >= 0)\n":
continue #Es una tautologia, no hace falta escribirla
if define_variable_mayor_a_cero(linea):
continue #Todas las variables de este problema son mayores o iguales que 0
if define_variable_menor_a_infinito(linea):
continue #Todas las variables son menores a infinito, es una ecuacion anulable
arch_optimizado.write(linea)
def guardar_archivo_optimizado(parametros, variables_a_eliminar):
with open(parametros.nombre_archivo_pulp, 'r') as arch:
with open(parametros.nombre_archivo_pulp_optimizado, 'w') as arch_optimizado:
limpiar_archivo(parametros, variables_a_eliminar, arch, arch_optimizado)
def optimizar_codigo_pulp(parametros):
variables_a_eliminar = obtener_variables_a_eliminar(parametros)
guardar_archivo_optimizado(parametros, variables_a_eliminar)
| gpl-3.0 | -6,937,123,647,893,344,000 | 32.106667 | 90 | 0.638743 | false |
platformio/platformio-core | platformio/debug/config/blackmagic.py | 1 | 1286 | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.debug.config.base import DebugConfigBase
class BlackmagicDebugConfig(DebugConfigBase):
GDB_INIT_SCRIPT = """
define pio_reset_halt_target
set language c
set *0xE000ED0C = 0x05FA0004
set $busy = (*0xE000ED0C & 0x4)
while ($busy)
set $busy = (*0xE000ED0C & 0x4)
end
set language auto
end
define pio_reset_run_target
pio_reset_halt_target
end
target extended-remote $DEBUG_PORT
monitor swdp_scan
attach 1
set mem inaccessible-by-default off
$LOAD_CMDS
$INIT_BREAK
set language c
set *0xE000ED0C = 0x05FA0004
set $busy = (*0xE000ED0C & 0x4)
while ($busy)
set $busy = (*0xE000ED0C & 0x4)
end
set language auto
"""
| apache-2.0 | 6,271,656,748,231,535,000 | 25.244898 | 74 | 0.734059 | false |
ntt-sic/heat | heat/engine/resources/route_table.py | 1 | 6007 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine import clients
from heat.openstack.common import log as logging
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.neutron import neutron
from heat.engine.resources.vpc import VPC
if clients.neutronclient is not None:
from neutronclient.common.exceptions import NeutronClientException
logger = logging.getLogger(__name__)
class RouteTable(resource.Resource):
PROPERTIES = (
VPC_ID, TAGS,
) = (
'VpcId', 'Tags',
)
_TAG_KEYS = (
TAG_KEY, TAG_VALUE,
) = (
'Key', 'Value',
)
properties_schema = {
VPC_ID: properties.Schema(
properties.Schema.STRING,
_('VPC ID for where the route table is created.'),
required=True
),
TAGS: properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
_('List of tags to be attached to this resource.'),
schema={
TAG_KEY: properties.Schema(
properties.Schema.STRING,
required=True
),
TAG_VALUE: properties.Schema(
properties.Schema.STRING,
required=True
),
},
implemented=False,
)
),
}
def handle_create(self):
client = self.neutron()
props = {'name': self.physical_resource_name()}
router = client.create_router({'router': props})['router']
self.resource_id_set(router['id'])
def check_create_complete(self, *args):
client = self.neutron()
attributes = client.show_router(
self.resource_id)['router']
if not neutron.NeutronResource.is_built(attributes):
return False
network_id = self.properties.get(self.VPC_ID)
default_router = VPC.router_for_vpc(client, network_id)
if default_router and default_router.get('external_gateway_info'):
# the default router for the VPC is connected
# to the external router, so do it for this too.
external_network_id = default_router[
'external_gateway_info']['network_id']
client.add_gateway_router(self.resource_id, {
'network_id': external_network_id})
return True
def handle_delete(self):
client = self.neutron()
router_id = self.resource_id
try:
client.delete_router(router_id)
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
# just in case this router has been added to a gateway, remove it
try:
client.remove_gateway_router(router_id)
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
class SubnetRouteTableAssociation(resource.Resource):
PROPERTIES = (
ROUTE_TABLE_ID, SUBNET_ID,
) = (
'RouteTableId', 'SubnetId',
)
properties_schema = {
ROUTE_TABLE_ID: properties.Schema(
properties.Schema.STRING,
_('Route table ID.'),
required=True
),
SUBNET_ID: properties.Schema(
properties.Schema.STRING,
_('Subnet ID.'),
required=True
),
}
def handle_create(self):
client = self.neutron()
subnet_id = self.properties.get(self.SUBNET_ID)
router_id = self.properties.get(self.ROUTE_TABLE_ID)
#remove the default router association for this subnet.
try:
previous_router = self._router_for_subnet(subnet_id)
if previous_router:
client.remove_interface_router(
previous_router['id'],
{'subnet_id': subnet_id})
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
client.add_interface_router(
router_id, {'subnet_id': subnet_id})
def _router_for_subnet(self, subnet_id):
client = self.neutron()
subnet = client.show_subnet(
subnet_id)['subnet']
network_id = subnet['network_id']
return VPC.router_for_vpc(client, network_id)
def handle_delete(self):
client = self.neutron()
subnet_id = self.properties.get(self.SUBNET_ID)
router_id = self.properties.get(self.ROUTE_TABLE_ID)
try:
client.remove_interface_router(router_id, {
'subnet_id': subnet_id})
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
# add back the default router
try:
default_router = self._router_for_subnet(subnet_id)
if default_router:
client.add_interface_router(
default_router['id'], {'subnet_id': subnet_id})
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
def resource_mapping():
if clients.neutronclient is None:
return {}
return {
'AWS::EC2::RouteTable': RouteTable,
'AWS::EC2::SubnetRouteTableAssociation': SubnetRouteTableAssociation,
}
| apache-2.0 | -2,251,101,202,301,618,700 | 30.615789 | 78 | 0.575828 | false |
otsaloma/gaupol | gaupol/agents/open.py | 1 | 19435 | # -*- coding: utf-8 -*-
# Copyright (C) 2005 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Opening subtitle files and creating new projects."""
import aeidon
import gaupol
import os
from aeidon.i18n import _
from gi.repository import Gtk
class OpenAgent(aeidon.Delegate):
"""Opening subtitle files and creating new projects."""
@aeidon.deco.export
def add_page(self, page):
"""Add `page` to the application."""
self.pages.append(page)
page.connect("close-request", self._on_page_close_request)
page.project.connect("action-done", self._on_project_action_done)
page.project.connect("action-redone", self._on_project_action_redone)
page.project.connect("action-undone", self._on_project_action_undone)
callback = self._on_tab_widget_button_press_event
page.tab_widget.connect("button-press-event", callback, page)
self.connect_view_signals(page.view)
page.project.clipboard.set_texts(self.clipboard.get_texts())
scroller = Gtk.ScrolledWindow()
policy = Gtk.PolicyType.AUTOMATIC
scroller.set_policy(policy, policy)
scroller.add(page.view)
self.notebook.append_page(scroller, page.tab_widget)
self.notebook.set_tab_reorderable(scroller, True)
self.notebook.child_set_property(scroller, "tab-expand", True)
self.notebook.child_set_property(scroller, "tab-fill", True)
self.notebook.show_all()
self.set_current_page(page)
self.emit("page-added", page)
@aeidon.deco.export
def add_to_recent_files(self, path, format, doc):
"""Add `path` to recent files managed by the recent manager."""
# XXX: The group field is not available for Python,
# we cannot differentiate between main and translation files.
# https://bugzilla.gnome.org/show_bug.cgi?id=695970
uri = aeidon.util.path_to_uri(path)
recent = Gtk.RecentData()
recent.mime_type = format.mime_type
recent.app_name = "gaupol"
recent.app_exec = "gaupol %F"
self.recent_manager.add_full(uri, recent)
@aeidon.deco.export
def append_file(self, path, encoding=None):
"""Append subtitles from file at `path` to the current project."""
encodings = self._get_encodings(encoding)
doc = aeidon.documents.MAIN
temp = self._open_file(path, encodings, doc, check_open=False)
gaupol.util.set_cursor_busy(self.window)
current = self.get_current_page()
offset = current.project.subtitles[-1].end
temp.project.shift_positions(None, offset)
rows = self._append_subtitles(current, temp.project.subtitles)
amount = len(rows)
current.view.set_focus(rows[0], None)
current.view.select_rows(rows)
current.view.scroll_to_row(rows[0])
basename = temp.get_main_basename()
message = _('Appended {amount:d} subtitles from "{basename}"')
self.flash_message(message.format(**locals()))
gaupol.util.set_cursor_normal(self.window)
def _append_subtitles(self, page, subtitles):
"""Append `subtitles` to `page` and return new indices."""
n = len(page.project.subtitles)
indices = list(range(n, n + len(subtitles)))
page.project.block("action-done")
page.project.insert_subtitles(indices, subtitles)
page.project.set_action_description(
aeidon.registers.DO, _("Appending file"))
page.project.unblock("action-done")
return tuple(indices)
def _check_file_exists(self, path):
"""Raise :exc:`gaupol.Default` if no file at `path`."""
gaupol.util.raise_default(not os.path.isfile(path))
def _check_file_not_open(self, path):
"""Raise :exc:`gaupol.Default` if file at `path` already open."""
for page in self.pages:
files = [page.project.main_file, page.project.tran_file]
paths = [x.path for x in files if x]
if not path in paths: continue
self.set_current_page(page)
message = _('File "{}" is already open')
self.flash_message(message.format(os.path.basename(path)))
raise gaupol.Default
def _check_file_size(self, path):
"""Raise :exc:`gaupol.Default` if size of file at `path` too large."""
size_mb = os.stat(path).st_size / 1048576
if size_mb <= 1: return
basename = os.path.basename(path)
self._show_size_warning_dialog(basename, size_mb)
def _check_sort_count(self, path, sort_count):
"""Raise :exc:`gaupol.Default` if `sort_count` too large."""
if sort_count <= 0: return
basename = os.path.basename(path)
self._show_sort_warning_dialog(basename, sort_count)
@aeidon.deco.export
def connect_view_signals(self, view):
"""Connect to signals emitted by `view`."""
view.connect_selection_changed(self._on_view_selection_changed)
view.connect_after("move-cursor", self._on_view_move_cursor)
view.connect("button-press-event", self._on_view_button_press_event)
for column in view.get_columns():
renderer = column.get_cells()[0]
callback = self._on_view_renderer_edited
renderer.connect("edited", callback, column)
callback = self._on_view_renderer_editing_started
renderer.connect("editing-started", callback, column)
callback = self._on_view_renderer_editing_canceled
renderer.connect("editing-canceled", callback, column)
button = column.get_widget().get_ancestor(Gtk.Button)
callback = self._on_view_header_button_press_event
button.connect("button-press-event", callback)
def _get_encodings(self, first=None):
"""Return a sequence of encodings to try when opening files."""
encodings = [first]
if gaupol.conf.encoding.try_locale:
encoding = aeidon.encodings.get_locale_code()
encodings.append(encoding)
encodings += gaupol.conf.encoding.fallback
try_auto = gaupol.conf.encoding.try_auto
if try_auto and aeidon.util.chardet_available():
encodings.append("auto")
encodings = list(filter(None, encodings))
encodings = encodings or ["utf_8"]
return tuple(aeidon.util.get_unique(encodings))
@aeidon.deco.export
@aeidon.deco.silent(gaupol.Default)
def _on_append_file_activate(self, *args):
"""Append subtitles from file to the current project."""
gaupol.util.set_cursor_busy(self.window)
dialog = gaupol.AppendDialog(self.window)
gaupol.util.set_cursor_normal(self.window)
response = gaupol.util.run_dialog(dialog)
paths = dialog.get_filenames()
encoding = dialog.get_encoding()
dialog.destroy()
if response != Gtk.ResponseType.OK: return
if not paths: return
gaupol.util.iterate_main()
self.append_file(paths[0], encoding)
@aeidon.deco.export
def _on_new_project_activate(self, *args):
"""Create a new project."""
if gaupol.fields.TRAN_TEXT in gaupol.conf.editor.visible_fields:
gaupol.conf.editor.visible_fields.remove(gaupol.fields.TRAN_TEXT)
page = gaupol.Page(next(self.counter))
page.project.insert_subtitles((0,), register=None)
self.add_page(page)
@aeidon.deco.export
def _on_notebook_drag_data_received(self, notebook, context, x, y,
selection_data, info, time):
"""Open main files from dragged URIs."""
uris = selection_data.get_uris()
paths = list(map(aeidon.util.uri_to_path, uris))
videos = list(filter(aeidon.util.is_video_file, paths))
subtitles = list(set(paths) - set(videos))
self.open_main(subtitles)
if self.get_current_page() and len(videos) == 1:
self.load_video(videos[0])
@aeidon.deco.export
@aeidon.deco.silent(gaupol.Default)
def _on_open_main_files_activate(self, *args):
"""Open main files."""
doc = aeidon.documents.MAIN
paths, encoding = self._select_files(_("Open"), doc)
self.open_main(paths, encoding)
@aeidon.deco.export
@aeidon.deco.silent(gaupol.Default)
def _on_open_translation_file_activate(self, *args):
"""Open a translation file."""
page = self.get_current_page()
if page.project.tran_changed:
self._show_translation_warning_dialog(page)
doc = aeidon.documents.TRAN
paths, encoding = self._select_files(_("Open Translation"), doc)
self.open_translation(paths[0], encoding)
@aeidon.deco.export
def _on_select_video_file_activate(self, *args):
"""Select a video file."""
gaupol.util.set_cursor_busy(self.window)
page = self.get_current_page()
path = page.project.video_path
title = _("Select Video")
label = _("_Select")
dialog = gaupol.VideoDialog(self.window, title, label)
if page.project.main_file is not None:
directory = os.path.dirname(page.project.main_file.path)
dialog.set_current_folder(directory)
if page.project.video_path is not None:
dialog.set_filename(page.project.video_path)
gaupol.util.set_cursor_normal(self.window)
response = gaupol.util.run_dialog(dialog)
path = dialog.get_filename()
dialog.destroy()
if response != Gtk.ResponseType.OK: return
page.project.video_path = path
self.update_gui()
@aeidon.deco.export
def _on_split_project_activate(self, *args):
"""Split the current project in two."""
gaupol.util.flash_dialog(gaupol.SplitDialog(self.window, self))
def _open_file(self, path, encodings, doc, check_open=True):
"""Open file at `path` and return corresponding page if successful."""
self._check_file_exists(path)
if check_open:
self._check_file_not_open(path)
self._check_file_size(path)
basename = os.path.basename(path)
page = (gaupol.Page() if doc == aeidon.documents.MAIN
else self.get_current_page())
for encoding in encodings:
with aeidon.util.silent(UnicodeError):
n = self._try_open_file(page, doc, path, encoding)
self._check_sort_count(path, n)
return page
# Report if all codecs failed to decode file.
self._show_encoding_error_dialog(basename)
raise gaupol.Default
@aeidon.deco.export
@aeidon.deco.silent(gaupol.Default)
def open_main(self, path, encoding=None):
"""Open file at `path` as a main file."""
if gaupol.fields.TRAN_TEXT in gaupol.conf.editor.visible_fields:
gaupol.conf.editor.visible_fields.remove(gaupol.fields.TRAN_TEXT)
encodings = self._get_encodings(encoding)
gaupol.util.set_cursor_busy(self.window)
for path in aeidon.util.flatten([path]):
try:
# Skip files that are already open,
# but show a status message when that happens.
self._check_file_not_open(path)
except gaupol.Default:
continue
try:
page = self._open_file(path, encodings, aeidon.documents.MAIN)
except gaupol.Default:
gaupol.util.set_cursor_normal(self.window)
raise # gaupol.Default
self.add_page(page)
format = page.project.main_file.format
self.add_to_recent_files(path, format, aeidon.documents.MAIN)
# Refresh view to get row heights etc. correct.
page.view.set_focus(0, page.view.columns.MAIN_TEXT)
gaupol.util.set_cursor_normal(self.window)
self.update_gui()
@aeidon.deco.export
@aeidon.deco.silent(gaupol.Default)
def open_translation(self, path, encoding=None, align_method=None):
"""Open file at `path` as a translation file."""
if align_method is not None:
gaupol.conf.file.align_method = align_method
encodings = self._get_encodings(encoding)
page = self._open_file(path, encodings, aeidon.documents.TRAN)
gaupol.util.set_cursor_busy(self.window)
col = page.view.columns.TRAN_TEXT
if not page.view.get_column(col).get_visible():
self.get_column_action(gaupol.fields.TRAN_TEXT).activate()
format = page.project.tran_file.format
self.add_to_recent_files(path, format, aeidon.documents.TRAN)
gaupol.util.set_cursor_normal(self.window)
def _select_files(self, title, doc):
"""Show a :class:`gaupol.OpenDialog` to select files."""
gaupol.util.set_cursor_busy(self.window)
dialog = gaupol.OpenDialog(self.window, title, doc)
page = self.get_current_page()
if page is not None and page.project.main_file is not None:
directory = os.path.dirname(page.project.main_file.path)
dialog.set_current_folder(directory)
gaupol.util.set_cursor_normal(self.window)
response = gaupol.util.run_dialog(dialog)
paths = dialog.get_filenames()
encoding = dialog.get_encoding()
dialog.destroy()
gaupol.util.raise_default(response != Gtk.ResponseType.OK)
gaupol.util.iterate_main()
return paths, encoding
def _show_encoding_error_dialog(self, basename):
"""Show an error dialog after failing to decode file."""
title = _('Failed to decode file "{}" with all attempted codecs').format(basename)
message = _("Please try to open the file with a different character encoding.")
dialog = gaupol.ErrorDialog(self.window, title, message)
dialog.add_button(_("_OK"), Gtk.ResponseType.OK)
dialog.set_default_response(Gtk.ResponseType.OK)
gaupol.util.flash_dialog(dialog)
def _show_format_error_dialog(self, basename):
"""Show an error dialog after failing to recognize file format."""
title = _('Failed to recognize format of file "{}"').format(basename)
message = _("Please check that the file you are trying to open is a subtitle file of a format supported by Gaupol.")
dialog = gaupol.ErrorDialog(self.window, title, message)
dialog.add_button(_("_OK"), Gtk.ResponseType.OK)
dialog.set_default_response(Gtk.ResponseType.OK)
gaupol.util.flash_dialog(dialog)
def _show_io_error_dialog(self, basename, message):
"""Show an error dialog after failing to read file."""
title = _('Failed to open file "{}"').format(basename)
dialog = gaupol.ErrorDialog(self.window, title, message)
dialog.add_button(_("_OK"), Gtk.ResponseType.OK)
dialog.set_default_response(Gtk.ResponseType.OK)
gaupol.util.flash_dialog(dialog)
def _show_parse_error_dialog(self, basename, format):
"""Show an error dialog after failing to parse file."""
title = _('Failed to parse file "{}"').format(basename)
message = _("Please check that the file you are trying to open is a valid {} file.").format(format.label)
dialog = gaupol.ErrorDialog(self.window, title, message)
dialog.add_button(_("_OK"), Gtk.ResponseType.OK)
dialog.set_default_response(Gtk.ResponseType.OK)
gaupol.util.flash_dialog(dialog)
def _show_size_warning_dialog(self, basename, size):
"""Show a warning dialog when trying to open a large file."""
title = _('Open abnormally large file "{}"?').format(basename)
message = _("Size of the file is {:.1f} MB, which is abnormally large for a text-based subtitle file. Please, check that you are not trying to open a binary file.").format(size)
dialog = gaupol.WarningDialog(self.window, title, message)
dialog.add_button(_("_Cancel"), Gtk.ResponseType.NO)
dialog.add_button(_("_Open"), Gtk.ResponseType.YES)
dialog.set_default_response(Gtk.ResponseType.NO)
response = gaupol.util.flash_dialog(dialog)
gaupol.util.raise_default(response != Gtk.ResponseType.YES)
def _show_sort_warning_dialog(self, basename, count):
"""Show a warning dialog when subtitles have been sorted."""
title = _('Open unsorted file "{}"?').format(basename)
message = _("The order of {:d} subtitles needs to be changed. If {:d} sounds like a lot, the file may be erroneously composed.")
message = message.format(count, count)
dialog = gaupol.WarningDialog(self.window, title, message)
dialog.add_button(_("_Cancel"), Gtk.ResponseType.NO)
dialog.add_button(_("_Open"), Gtk.ResponseType.YES)
dialog.set_default_response(Gtk.ResponseType.YES)
response = gaupol.util.flash_dialog(dialog)
gaupol.util.raise_default(response != Gtk.ResponseType.YES)
def _show_translation_warning_dialog(self, page):
"""Show a warning dialog if opening a new translation file."""
title = _('Save changes to translation document "{}" before opening a new one?').format(page.get_translation_basename())
message = _("If you don't save, changes will be permanently lost.")
dialog = gaupol.WarningDialog(self.window, title, message)
dialog.add_button(_("Open _Without Saving"), Gtk.ResponseType.NO)
dialog.add_button(_("_Cancel"), Gtk.ResponseType.CANCEL)
dialog.add_button(_("_Save"), Gtk.ResponseType.YES)
dialog.set_default_response(Gtk.ResponseType.YES)
response = gaupol.util.flash_dialog(dialog)
if response == Gtk.ResponseType.YES:
return self.save_translation(page)
gaupol.util.raise_default(response != Gtk.ResponseType.NO)
def _try_open_file(self, page, doc, path, encoding, **kwargs):
"""Try to open file at `path` and return subtitle sort count."""
if encoding == "auto":
encoding = aeidon.encodings.detect(path)
if encoding is None: raise UnicodeError
kwargs["align_method"] = gaupol.conf.file.align_method
basename = os.path.basename(path)
try:
return page.project.open(doc, path, encoding, **kwargs)
except aeidon.FormatError:
self._show_format_error_dialog(basename)
except IOError as error:
self._show_io_error_dialog(basename, str(error))
except aeidon.ParseError:
bom_encoding = aeidon.encodings.detect_bom(path)
encoding = bom_encoding or encoding
with aeidon.util.silent(Exception):
format = aeidon.util.detect_format(path, encoding)
self._show_parse_error_dialog(basename, format)
raise gaupol.Default
| gpl-3.0 | 3,600,033,596,375,179,000 | 45.944444 | 185 | 0.639516 | false |
ULHPC/modules | easybuild/easybuild-framework/easybuild/tools/filetools.py | 1 | 38368 | # #
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Set of file tools.
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Toon Willems (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Fotis Georgatos (Uni.Lu, NTUA)
"""
import glob
import os
import re
import shutil
import stat
import time
import urllib2
import zlib
from vsc.utils import fancylogger
import easybuild.tools.environment as env
from easybuild.tools.build_log import EasyBuildError, print_msg # import build_log must stay, to use of EasyBuildLog
from easybuild.tools.config import build_option
from easybuild.tools import run
_log = fancylogger.getLogger('filetools', fname=False)
# easyblock class prefix
EASYBLOCK_CLASS_PREFIX = 'EB_'
# character map for encoding strings
STRING_ENCODING_CHARMAP = {
r' ': "_space_",
r'!': "_exclamation_",
r'"': "_quotation_",
r'#': "_hash_",
r'$': "_dollar_",
r'%': "_percent_",
r'&': "_ampersand_",
r'(': "_leftparen_",
r')': "_rightparen_",
r'*': "_asterisk_",
r'+': "_plus_",
r',': "_comma_",
r'-': "_minus_",
r'.': "_period_",
r'/': "_slash_",
r':': "_colon_",
r';': "_semicolon_",
r'<': "_lessthan_",
r'=': "_equals_",
r'>': "_greaterthan_",
r'?': "_question_",
r'@': "_atsign_",
r'[': "_leftbracket_",
r'\'': "_apostrophe_",
r'\\': "_backslash_",
r']': "_rightbracket_",
r'^': "_circumflex_",
r'_': "_underscore_",
r'`': "_backquote_",
r'{': "_leftcurly_",
r'|': "_verticalbar_",
r'}': "_rightcurly_",
r'~': "_tilde_",
}
try:
# preferred over md5/sha modules, but only available in Python 2.5 and more recent
import hashlib
md5_class = hashlib.md5
sha1_class = hashlib.sha1
except ImportError:
import md5, sha
md5_class = md5.md5
sha1_class = sha.sha
# default checksum for source and patch files
DEFAULT_CHECKSUM = 'md5'
# map of checksum types to checksum functions
CHECKSUM_FUNCTIONS = {
'md5': lambda p: calc_block_checksum(p, md5_class()),
'sha1': lambda p: calc_block_checksum(p, sha1_class()),
'adler32': lambda p: calc_block_checksum(p, ZlibChecksum(zlib.adler32)),
'crc32': lambda p: calc_block_checksum(p, ZlibChecksum(zlib.crc32)),
'size': lambda p: os.path.getsize(p),
}
class ZlibChecksum(object):
"""
wrapper class for adler32 and crc32 checksums to
match the interface of the hashlib module
"""
def __init__(self, algorithm):
self.algorithm = algorithm
self.checksum = algorithm(r'') # use the same starting point as the module
self.blocksize = 64 # The same as md5/sha1
def update(self, data):
"""Calculates a new checksum using the old one and the new data"""
self.checksum = self.algorithm(data, self.checksum)
def hexdigest(self):
"""Return hex string of the checksum"""
return '0x%s' % (self.checksum & 0xffffffff)
def read_file(path, log_error=True):
"""Read contents of file at given path, in a robust way."""
f = None
# note: we can't use try-except-finally, because Python 2.4 doesn't support it as a single block
try:
f = open(path, 'r')
txt = f.read()
f.close()
return txt
except IOError, err:
# make sure file handle is always closed
if f is not None:
f.close()
if log_error:
raise EasyBuildError("Failed to read %s: %s", path, err)
else:
return None
def write_file(path, txt, append=False):
"""Write given contents to file at given path (overwrites current file contents!)."""
f = None
# note: we can't use try-except-finally, because Python 2.4 doesn't support it as a single block
try:
mkdir(os.path.dirname(path), parents=True)
if append:
f = open(path, 'a')
else:
f = open(path, 'w')
f.write(txt)
f.close()
except IOError, err:
# make sure file handle is always closed
if f is not None:
f.close()
raise EasyBuildError("Failed to write to %s: %s", path, err)
def remove_file(path):
"""Remove file at specified path."""
try:
if os.path.exists(path):
os.remove(path)
except OSError, err:
raise EasyBuildError("Failed to remove %s: %s", path, err)
def extract_file(fn, dest, cmd=None, extra_options=None, overwrite=False):
"""
Given filename fn, try to extract in directory dest
- returns the directory name in case of success
"""
if not os.path.isfile(fn):
raise EasyBuildError("Can't extract file %s: no such file", fn)
mkdir(dest, parents=True)
# use absolute pathnames from now on
abs_dest = os.path.abspath(dest)
# change working directory
try:
_log.debug("Unpacking %s in directory %s.", fn, abs_dest)
os.chdir(abs_dest)
except OSError, err:
raise EasyBuildError("Can't change to directory %s: %s", abs_dest, err)
if not cmd:
cmd = extract_cmd(fn, overwrite=overwrite)
else:
# complete command template with filename
cmd = cmd % fn
if not cmd:
raise EasyBuildError("Can't extract file %s with unknown filetype", fn)
if extra_options:
cmd = "%s %s" % (cmd, extra_options)
run.run_cmd(cmd, simple=True)
return find_base_dir()
def which(cmd):
"""Return (first) path in $PATH for specified command, or None if command is not found."""
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
cmd_path = os.path.join(path, cmd)
# only accept path is command is there, and both readable and executable
if os.access(cmd_path, os.R_OK | os.X_OK):
_log.info("Command %s found at %s" % (cmd, cmd_path))
return cmd_path
_log.warning("Could not find command '%s' (with permissions to read/execute it) in $PATH (%s)" % (cmd, paths))
return None
def det_common_path_prefix(paths):
"""Determine common path prefix for a given list of paths."""
if not isinstance(paths, list):
raise EasyBuildError("det_common_path_prefix: argument must be of type list (got %s: %s)", type(paths), paths)
elif not paths:
return None
# initial guess for common prefix
prefix = paths[0]
found_common = False
while not found_common and prefix != os.path.dirname(prefix):
prefix = os.path.dirname(prefix)
found_common = all([p.startswith(prefix) for p in paths])
if found_common:
# prefix may be empty string for relative paths with a non-common prefix
return prefix.rstrip(os.path.sep) or None
else:
return None
def download_file(filename, url, path):
"""Download a file from the given URL, to the specified path."""
_log.debug("Trying to download %s from %s to %s", filename, url, path)
timeout = build_option('download_timeout')
if timeout is None:
# default to 10sec timeout if none was specified
# default system timeout (used is nothing is specified) may be infinite (?)
timeout = 10
_log.debug("Using timeout of %s seconds for initiating download" % timeout)
# make sure directory exists
basedir = os.path.dirname(path)
mkdir(basedir, parents=True)
# try downloading, three times max.
downloaded = False
max_attempts = 3
attempt_cnt = 0
while not downloaded and attempt_cnt < max_attempts:
try:
# urllib2 does the right thing for http proxy setups, urllib does not!
url_fd = urllib2.urlopen(url, timeout=timeout)
_log.debug('response code for given url %s: %s' % (url, url_fd.getcode()))
write_file(path, url_fd.read())
_log.info("Downloaded file %s from url %s to %s" % (filename, url, path))
downloaded = True
url_fd.close()
except urllib2.HTTPError as err:
if 400 <= err.code <= 499:
_log.warning("URL %s was not found (HTTP response code %s), not trying again" % (url, err.code))
break
else:
_log.warning("HTTPError occured while trying to download %s to %s: %s" % (url, path, err))
attempt_cnt += 1
except IOError as err:
_log.warning("IOError occurred while trying to download %s to %s: %s" % (url, path, err))
attempt_cnt += 1
except Exception, err:
raise EasyBuildError("Unexpected error occurred when trying to download %s to %s: %s", url, path, err)
if not downloaded and attempt_cnt < max_attempts:
_log.info("Attempt %d of downloading %s to %s failed, trying again..." % (attempt_cnt, url, path))
if downloaded:
_log.info("Successful download of file %s from url %s to path %s" % (filename, url, path))
return path
else:
_log.warning("Download of %s to %s failed, done trying" % (url, path))
return None
def find_easyconfigs(path, ignore_dirs=None):
"""
Find .eb easyconfig files in path
"""
if os.path.isfile(path):
return [path]
if ignore_dirs is None:
ignore_dirs = []
# walk through the start directory, retain all files that end in .eb
files = []
path = os.path.abspath(path)
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
for f in filenames:
if not f.endswith('.eb') or f == 'TEMPLATE.eb':
continue
spec = os.path.join(dirpath, f)
_log.debug("Found easyconfig %s" % spec)
files.append(spec)
# ignore subdirs specified to be ignored by replacing items in dirnames list used by os.walk
dirnames[:] = [d for d in dirnames if not d in ignore_dirs]
return files
def search_file(paths, query, short=False, ignore_dirs=None, silent=False):
"""
Search for a particular file (only prints)
"""
if ignore_dirs is None:
ignore_dirs = ['.git', '.svn']
if not isinstance(ignore_dirs, list):
raise EasyBuildError("search_file: ignore_dirs (%s) should be of type list, not %s",
ignore_dirs, type(ignore_dirs))
# compile regex, case-insensitive
query = re.compile(query, re.I)
var_lines = []
hit_lines = []
var_index = 1
var = None
for path in paths:
hits = []
hit_in_path = False
print_msg("Searching (case-insensitive) for '%s' in %s " % (query.pattern, path), log=_log, silent=silent)
for (dirpath, dirnames, filenames) in os.walk(path, topdown=True):
for filename in filenames:
if query.search(filename):
if not hit_in_path:
var = "CFGS%d" % var_index
var_index += 1
hit_in_path = True
hits.append(os.path.join(dirpath, filename))
# do not consider (certain) hidden directories
# note: we still need to consider e.g., .local !
# replace list elements using [:], so os.walk doesn't process deleted directories
# see http://stackoverflow.com/questions/13454164/os-walk-without-hidden-folders
dirnames[:] = [d for d in dirnames if not d in ignore_dirs]
hits = sorted(hits)
if hits:
common_prefix = det_common_path_prefix(hits)
if short and common_prefix is not None and len(common_prefix) > len(var) * 2:
var_lines.append("%s=%s" % (var, common_prefix))
hit_lines.extend([" * %s" % os.path.join('$%s' % var, fn[len(common_prefix) + 1:]) for fn in hits])
else:
hit_lines.extend([" * %s" % fn for fn in hits])
for line in var_lines + hit_lines:
print_msg(line, log=_log, silent=silent, prefix=False)
def compute_checksum(path, checksum_type=DEFAULT_CHECKSUM):
"""
Compute checksum of specified file.
@param path: Path of file to compute checksum for
@param checksum_type: Type of checksum ('adler32', 'crc32', 'md5' (default), 'sha1', 'size')
"""
if not checksum_type in CHECKSUM_FUNCTIONS:
raise EasyBuildError("Unknown checksum type (%s), supported types are: %s",
checksum_type, CHECKSUM_FUNCTIONS.keys())
try:
checksum = CHECKSUM_FUNCTIONS[checksum_type](path)
except IOError, err:
raise EasyBuildError("Failed to read %s: %s", path, err)
except MemoryError, err:
_log.warning("A memory error occured when computing the checksum for %s: %s" % (path, err))
checksum = 'dummy_checksum_due_to_memory_error'
return checksum
def calc_block_checksum(path, algorithm):
"""Calculate a checksum of a file by reading it into blocks"""
# We pick a blocksize of 16 MB: it's a multiple of the internal
# blocksize of md5/sha1 (64) and gave the best speed results
try:
# in hashlib, blocksize is a class parameter
blocksize = algorithm.blocksize * 262144 # 2^18
except AttributeError, err:
blocksize = 16777216 # 2^24
_log.debug("Using blocksize %s for calculating the checksum" % blocksize)
try:
f = open(path, 'rb')
for block in iter(lambda: f.read(blocksize), r''):
algorithm.update(block)
f.close()
except IOError, err:
raise EasyBuildError("Failed to read %s: %s", path, err)
return algorithm.hexdigest()
def verify_checksum(path, checksums):
"""
Verify checksum of specified file.
@param file: path of file to verify checksum of
@param checksum: checksum value (and type, optionally, default is MD5), e.g., 'af314', ('sha', '5ec1b')
"""
# if no checksum is provided, pretend checksum to be valid
if checksums is None:
return True
# make sure we have a list of checksums
if not isinstance(checksums, list):
checksums = [checksums]
for checksum in checksums:
if isinstance(checksum, basestring):
# default checksum type unless otherwise specified is MD5 (most common(?))
typ = DEFAULT_CHECKSUM
elif isinstance(checksum, tuple) and len(checksum) == 2:
typ, checksum = checksum
else:
raise EasyBuildError("Invalid checksum spec '%s', should be a string (MD5) or 2-tuple (type, value).",
checksum)
actual_checksum = compute_checksum(path, typ)
_log.debug("Computed %s checksum for %s: %s (correct checksum: %s)" % (typ, path, actual_checksum, checksum))
if actual_checksum != checksum:
return False
# if we land here, all checksums have been verified to be correct
return True
def find_base_dir():
"""
Try to locate a possible new base directory
- this is typically a single subdir, e.g. from untarring a tarball
- when extracting multiple tarballs in the same directory,
expect only the first one to give the correct path
"""
def get_local_dirs_purged():
# e.g. always purge the log directory
ignoreDirs = ["easybuild"]
lst = os.listdir(os.getcwd())
for ignDir in ignoreDirs:
if ignDir in lst:
lst.remove(ignDir)
return lst
lst = get_local_dirs_purged()
new_dir = os.getcwd()
while len(lst) == 1:
new_dir = os.path.join(os.getcwd(), lst[0])
if not os.path.isdir(new_dir):
break
try:
os.chdir(new_dir)
except OSError, err:
raise EasyBuildError("Changing to dir %s from current dir %s failed: %s", new_dir, os.getcwd(), err)
lst = get_local_dirs_purged()
# make sure it's a directory, and not a (single) file that was in a tarball for example
while not os.path.isdir(new_dir):
new_dir = os.path.dirname(new_dir)
_log.debug("Last dir list %s" % lst)
_log.debug("Possible new dir %s found" % new_dir)
return new_dir
def extract_cmd(filepath, overwrite=False):
"""
Determines the file type of file fn, returns extract cmd
- based on file suffix
- better to use Python magic?
"""
filename = os.path.basename(filepath)
exts = [x.lower() for x in filename.split('.')]
target = '.'.join(exts[:-1])
cmd_tmpl = None
# gzipped or gzipped tarball
if exts[-1] in ['gz']:
if exts[-2] in ['tar']:
# unzip .tar.gz in one go
cmd_tmpl = "tar xzf %(filepath)s"
else:
cmd_tmpl = "gunzip -c %(filepath)s > %(target)s"
elif exts[-1] in ['tgz', 'gtgz']:
cmd_tmpl = "tar xzf %(filepath)s"
# bzipped or bzipped tarball
elif exts[-1] in ['bz2']:
if exts[-2] in ['tar']:
cmd_tmpl = 'tar xjf %(filepath)s'
else:
cmd_tmpl = "bunzip2 %(filepath)s"
elif exts[-1] in ['tbz', 'tbz2', 'tb2']:
cmd_tmpl = "tar xjf %(filepath)s"
# xzipped or xzipped tarball
elif exts[-1] in ['xz']:
if exts[-2] in ['tar']:
cmd_tmpl = "unxz %(filepath)s --stdout | tar x"
else:
cmd_tmpl = "unxz %(filepath)s"
elif exts[-1] in ['txz']:
cmd_tmpl = "unxz %(filepath)s --stdout | tar x"
# tarball
elif exts[-1] in ['tar']:
cmd_tmpl = "tar xf %(filepath)s"
# zip file
elif exts[-1] in ['zip']:
if overwrite:
cmd_tmpl = "unzip -qq -o %(filepath)s"
else:
cmd_tmpl = "unzip -qq %(filepath)s"
if cmd_tmpl is None:
raise EasyBuildError('Unknown file type for file %s (%s)', filepath, exts)
return cmd_tmpl % {'filepath': filepath, 'target': target}
def det_patched_files(path=None, txt=None, omit_ab_prefix=False):
"""Determine list of patched files from a patch."""
# expected format: "+++ path/to/patched/file"
# also take into account the 'a/' or 'b/' prefix that may be used
patched_regex = re.compile(r"^\s*\+{3}\s+(?P<ab_prefix>[ab]/)?(?P<file>\S+)", re.M)
if path is not None:
try:
f = open(path, 'r')
txt = f.read()
f.close()
except IOError, err:
raise EasyBuildError("Failed to read patch %s: %s", path, err)
elif txt is None:
raise EasyBuildError("Either a file path or a string representing a patch should be supplied")
patched_files = []
for match in patched_regex.finditer(txt):
patched_file = match.group('file')
if not omit_ab_prefix and match.group('ab_prefix') is not None:
patched_file = match.group('ab_prefix') + patched_file
if patched_file in ['/dev/null']:
_log.debug("Ignoring patched file %s" % patched_file)
else:
patched_files.append(patched_file)
return patched_files
def guess_patch_level(patched_files, parent_dir):
"""Guess patch level based on list of patched files and specified directory."""
patch_level = None
for patched_file in patched_files:
# locate file by stripping of directories
tf2 = patched_file.split(os.path.sep)
n_paths = len(tf2)
path_found = False
level = None
for level in range(n_paths):
if os.path.isfile(os.path.join(parent_dir, *tf2[level:])):
path_found = True
break
if path_found:
patch_level = level
break
else:
_log.debug('No match found for %s, trying next patched file...' % patched_file)
return patch_level
def apply_patch(patch_file, dest, fn=None, copy=False, level=None):
"""
Apply a patch to source code in directory dest
- assume unified diff created with "diff -ru old new"
"""
if not os.path.isfile(patch_file):
raise EasyBuildError("Can't find patch %s: no such file", patch_file)
return
if fn and not os.path.isfile(fn):
raise EasyBuildError("Can't patch file %s: no such file", fn)
return
if not os.path.isdir(dest):
raise EasyBuildError("Can't patch directory %s: no such directory", dest)
return
# copy missing files
if copy:
try:
shutil.copy2(patch_file, dest)
_log.debug("Copied patch %s to dir %s" % (patch_file, dest))
return 'ok'
except IOError, err:
raise EasyBuildError("Failed to copy %s to dir %s: %s", patch_file, dest, err)
return
# use absolute paths
apatch = os.path.abspath(patch_file)
adest = os.path.abspath(dest)
if not level:
# guess value for -p (patch level)
# - based on +++ lines
# - first +++ line that matches an existing file determines guessed level
# - we will try to match that level from current directory
patched_files = det_patched_files(path=apatch)
if not patched_files:
raise EasyBuildError("Can't guess patchlevel from patch %s: no testfile line found in patch", apatch)
return
patch_level = guess_patch_level(patched_files, adest)
if patch_level is None: # patch_level can also be 0 (zero), so don't use "not patch_level"
# no match
raise EasyBuildError("Can't determine patch level for patch %s from directory %s", patch_file, adest)
else:
_log.debug("Guessed patch level %d for patch %s" % (patch_level, patch_file))
else:
patch_level = level
_log.debug("Using specified patch level %d for patch %s" % (patch_level, patch_file))
try:
os.chdir(adest)
_log.debug("Changing to directory %s" % adest)
except OSError, err:
raise EasyBuildError("Can't change to directory %s: %s", adest, err)
return
patch_cmd = "patch -b -p%d -i %s" % (patch_level, apatch)
result = run.run_cmd(patch_cmd, simple=True)
if not result:
raise EasyBuildError("Patching with patch %s failed", patch_file)
return
return result
def modify_env(old, new):
"""NO LONGER SUPPORTED: use modify_env from easybuild.tools.environment instead"""
_log.nosupport("moved modify_env to easybuild.tools.environment", "2.0")
def convert_name(name, upper=False):
"""
Converts name so it can be used as variable name
"""
# no regexps
charmap = {
'+': 'plus',
'-': 'min'
}
for ch, new in charmap.items():
name = name.replace(ch, new)
if upper:
return name.upper()
else:
return name
def adjust_permissions(name, permissionBits, add=True, onlyfiles=False, onlydirs=False, recursive=True,
group_id=None, relative=True, ignore_errors=False):
"""
Add or remove (if add is False) permissionBits from all files (if onlydirs is False)
and directories (if onlyfiles is False) in path
"""
name = os.path.abspath(name)
if recursive:
_log.info("Adjusting permissions recursively for %s" % name)
allpaths = [name]
for root, dirs, files in os.walk(name):
paths = []
if not onlydirs:
paths += files
if not onlyfiles:
paths += dirs
for path in paths:
allpaths.append(os.path.join(root, path))
else:
_log.info("Adjusting permissions for %s" % name)
allpaths = [name]
failed_paths = []
fail_cnt = 0
for path in allpaths:
try:
if relative:
# relative permissions (add or remove)
perms = os.stat(path)[stat.ST_MODE]
if add:
os.chmod(path, perms | permissionBits)
else:
os.chmod(path, perms & ~permissionBits)
else:
# hard permissions bits (not relative)
os.chmod(path, permissionBits)
if group_id:
# only change the group id if it the current gid is different from what we want
cur_gid = os.stat(path).st_gid
if not cur_gid == group_id:
_log.debug("Changing group id of %s to %s" % (path, group_id))
os.chown(path, -1, group_id)
else:
_log.debug("Group id of %s is already OK (%s)" % (path, group_id))
except OSError, err:
if ignore_errors:
# ignore errors while adjusting permissions (for example caused by bad links)
_log.info("Failed to chmod/chown %s (but ignoring it): %s" % (path, err))
fail_cnt += 1
else:
failed_paths.append(path)
if failed_paths:
raise EasyBuildError("Failed to chmod/chown several paths: %s (last error: %s)", failed_paths, err)
# we ignore some errors, but if there are to many, something is definitely wrong
fail_ratio = fail_cnt / float(len(allpaths))
max_fail_ratio = 0.5
if fail_ratio > max_fail_ratio:
raise EasyBuildError("%.2f%% of permissions/owner operations failed (more than %.2f%%), "
"something must be wrong...", 100 * fail_ratio, 100 * max_fail_ratio)
elif fail_cnt > 0:
_log.debug("%.2f%% of permissions/owner operations failed, ignoring that..." % (100 * fail_ratio))
def patch_perl_script_autoflush(path):
# patch Perl script to enable autoflush,
# so that e.g. run_cmd_qa receives all output to answer questions
txt = read_file(path)
origpath = "%s.eb.orig" % path
write_file(origpath, txt)
_log.debug("Patching Perl script %s for autoflush, original script copied to %s" % (path, origpath))
# force autoflush for Perl print buffer
lines = txt.split('\n')
newtxt = '\n'.join([
lines[0], # shebang line
"\nuse IO::Handle qw();",
"STDOUT->autoflush(1);\n", # extra newline to separate from actual script
] + lines[1:])
write_file(path, newtxt)
def mkdir(path, parents=False, set_gid=None, sticky=None):
"""
Create a directory
Directory is the path to create
@param parents: create parent directories if needed (mkdir -p)
@param set_gid: set group ID bit, to make subdirectories and files inherit group
@param sticky: set the sticky bit on this directory (a.k.a. the restricted deletion flag),
to avoid users can removing/renaming files in this directory
"""
if set_gid is None:
set_gid = build_option('set_gid_bit')
if sticky is None:
sticky = build_option('sticky_bit')
if not os.path.isabs(path):
path = os.path.abspath(path)
# exit early if path already exists
if not os.path.exists(path):
_log.info("Creating directory %s (parents: %s, set_gid: %s, sticky: %s)", path, parents, set_gid, sticky)
# set_gid and sticky bits are only set on new directories, so we need to determine the existing parent path
existing_parent_path = os.path.dirname(path)
try:
if parents:
# climb up until we hit an existing path or the empty string (for relative paths)
while existing_parent_path and not os.path.exists(existing_parent_path):
existing_parent_path = os.path.dirname(existing_parent_path)
os.makedirs(path)
else:
os.mkdir(path)
except OSError, err:
raise EasyBuildError("Failed to create directory %s: %s", path, err)
# set group ID and sticky bits, if desired
bits = 0
if set_gid:
bits |= stat.S_ISGID
if sticky:
bits |= stat.S_ISVTX
if bits:
try:
new_subdir = path[len(existing_parent_path):].lstrip(os.path.sep)
new_path = os.path.join(existing_parent_path, new_subdir.split(os.path.sep)[0])
adjust_permissions(new_path, bits, add=True, relative=True, recursive=True, onlydirs=True)
except OSError, err:
raise EasyBuildError("Failed to set groud ID/sticky bit: %s", err)
else:
_log.debug("Not creating existing path %s" % path)
def path_matches(path, paths):
"""Check whether given path matches any of the provided paths."""
if not os.path.exists(path):
return False
for somepath in paths:
if os.path.exists(somepath) and os.path.samefile(path, somepath):
return True
return False
def rmtree2(path, n=3):
"""Wrapper around shutil.rmtree to make it more robust when used on NFS mounted file systems."""
ok = False
for i in range(0, n):
try:
shutil.rmtree(path)
ok = True
break
except OSError, err:
_log.debug("Failed to remove path %s with shutil.rmtree at attempt %d: %s" % (path, n, err))
time.sleep(2)
if not ok:
raise EasyBuildError("Failed to remove path %s with shutil.rmtree, even after %d attempts.", path, n)
else:
_log.info("Path %s successfully removed." % path)
def move_logs(src_logfile, target_logfile):
"""Move log file(s)."""
mkdir(os.path.dirname(target_logfile), parents=True)
src_logfile_len = len(src_logfile)
try:
# there may be multiple log files, due to log rotation
app_logs = glob.glob('%s*' % src_logfile)
for app_log in app_logs:
# retain possible suffix
new_log_path = target_logfile + app_log[src_logfile_len:]
# retain old logs
if os.path.exists(new_log_path):
i = 0
oldlog_backup = "%s_%d" % (new_log_path, i)
while os.path.exists(oldlog_backup):
i += 1
oldlog_backup = "%s_%d" % (new_log_path, i)
shutil.move(new_log_path, oldlog_backup)
_log.info("Moved existing log file %s to %s" % (new_log_path, oldlog_backup))
# move log to target path
shutil.move(app_log, new_log_path)
_log.info("Moved log file %s to %s" % (src_logfile, new_log_path))
except (IOError, OSError), err:
raise EasyBuildError("Failed to move log file(s) %s* to new log file %s*: %s" ,
src_logfile, target_logfile, err)
def cleanup(logfile, tempdir, testing):
"""Cleanup the specified log file and the tmp directory"""
if not testing and logfile is not None:
try:
for log in glob.glob('%s*' % logfile):
os.remove(log)
except OSError, err:
raise EasyBuildError("Failed to remove log file(s) %s*: %s", logfile, err)
print_msg('temporary log file(s) %s* have been removed.' % (logfile), log=None, silent=testing)
if not testing and tempdir is not None:
try:
shutil.rmtree(tempdir, ignore_errors=True)
except OSError, err:
raise EasyBuildError("Failed to remove temporary directory %s: %s", tempdir, err)
print_msg('temporary directory %s has been removed.' % (tempdir), log=None, silent=testing)
def copytree(src, dst, symlinks=False, ignore=None):
"""
Copied from Lib/shutil.py in python 2.7, since we need this to work for python2.4 aswell
and this code can be improved...
Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
class Error(EnvironmentError):
pass
try:
WindowsError # @UndefinedVariable
except NameError:
WindowsError = None
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
_log.debug("copytree: skipping copy of %s" % ignored_names)
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
shutil.copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error, errors
def encode_string(name):
"""
This encoding function handles funky software names ad infinitum, like:
example: '0_foo+0x0x#-$__'
becomes: '0_underscore_foo_plus_0x0x_hash__minus__dollar__underscore__underscore_'
The intention is to have a robust escaping mechanism for names like c++, C# et al
It has been inspired by the concepts seen at, but in lowercase style:
* http://fossies.org/dox/netcdf-4.2.1.1/escapes_8c_source.html
* http://celldesigner.org/help/CDH_Species_01.html
* http://research.cs.berkeley.edu/project/sbp/darcsrepo-no-longer-updated/src/edu/berkeley/sbp/misc/ReflectiveWalker.java
and can be extended freely as per ISO/IEC 10646:2012 / Unicode 6.1 names:
* http://www.unicode.org/versions/Unicode6.1.0/
For readability of >2 words, it is suggested to use _CamelCase_ style.
So, yes, '_GreekSmallLetterEtaWithPsiliAndOxia_' *could* indeed be a fully
valid software name; software "electron" in the original spelling anyone? ;-)
"""
# do the character remapping, return same char by default
result = ''.join(map(lambda x: STRING_ENCODING_CHARMAP.get(x, x), name))
return result
def decode_string(name):
"""Decoding function to revert result of encode_string."""
result = name
for (char, escaped_char) in STRING_ENCODING_CHARMAP.items():
result = re.sub(escaped_char, char, result)
return result
def encode_class_name(name):
"""return encoded version of class name"""
return EASYBLOCK_CLASS_PREFIX + encode_string(name)
def decode_class_name(name):
"""Return decoded version of class name."""
if not name.startswith(EASYBLOCK_CLASS_PREFIX):
# name is not encoded, apparently
return name
else:
name = name[len(EASYBLOCK_CLASS_PREFIX):]
return decode_string(name)
def run_cmd(cmd, log_ok=True, log_all=False, simple=False, inp=None, regexp=True, log_output=False, path=None):
"""NO LONGER SUPPORTED: use run_cmd from easybuild.tools.run instead"""
_log.nosupport("run_cmd was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def run_cmd_qa(cmd, qa, no_qa=None, log_ok=True, log_all=False, simple=False, regexp=True, std_qa=None, path=None):
"""NO LONGER SUPPORTED: use run_cmd_qa from easybuild.tools.run instead"""
_log.nosupport("run_cmd_qa was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def parse_log_for_error(txt, regExp=None, stdout=True, msg=None):
"""NO LONGER SUPPORTED: use parse_log_for_error from easybuild.tools.run instead"""
_log.nosupport("parse_log_for_error was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def det_size(path):
"""
Determine total size of given filepath (in bytes).
"""
installsize = 0
try:
# walk install dir to determine total size
for (dirpath, _, filenames) in os.walk(path):
for filename in filenames:
fullpath = os.path.join(dirpath, filename)
if os.path.exists(fullpath):
installsize += os.path.getsize(fullpath)
except OSError, err:
_log.warn("Could not determine install size: %s" % err)
return installsize
| mit | -7,227,287,605,544,151,000 | 34.362212 | 125 | 0.607225 | false |
RaminNietzsche/GuiNegar | setup.py | 1 | 1179 | from distutils.core import setup
from setuptools import find_packages
if __name__ == '__main__':
setup(
name = "gnegar",
version = "0.1.1",
author = "Ramin Najjarbashi",
author_email = "[email protected]",
#packages = find_packages() + ['lib'],
package_dir = {'':'lib'},
packages = [''],
scripts=['bin/gnegar'],
description = "Negar is a spell corrector and Persian text editor",
license = "GPL",
url = "http://RaminNietzsche.github.com/GuiNegar",
keywords=['spellcheck','Persian','editor','python'],
data_files = [('share/doc/gnegar',['README', 'COPYING', 'CHANGES']),
('share/man/man1/', ['man/gnegar.1']),
('share/applications/',['etc/gnegar.desktop']),
('share/icons/',['etc/gnegar.png']) ],
classifiers=[
'Development Status :: 0 - Beta',
'Intended Audience :: Persian Users',
'Intended Audience :: Persian Programmers',
'License :: OSI Approved :: GNU General Public License v3',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Editor',
'Topic :: Persian',
'Topic :: Spellcheck'
],
)
| gpl-3.0 | 4,409,665,534,196,861,400 | 31.75 | 75 | 0.585242 | false |
sadig/proteus-api | proteus/api/client.py | 1 | 7692 | # -*- coding: utf-8 -*-
###############################################################################
# python-proteus - Proteus IPAM Python Library
# Copyright (C) 2012 Stephan Adig <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
###############################################################################
###############################################################################
#
# Additional Copyright and Trademark Information
#
# Proteus (tm) IP Addess Management (IPAM)
# is a product of BLUECAT Networks (tm) and is not OpenSource.
#
###############################################################################
import sys
try:
from suds.client import Client
except ImportError, e:
print "You don't have the python suds library installed."
sys.exit(1)
from constants import *
from proteus.objects import *
from dns import DNS
class ProteusClientApi(object):
""" Low Level Proteus SOAP Wrapper Class"""
def __init__(self, api_url=None, api_user=None, api_password=None):
"""Constructor
:Parameters:
- `api_url` : string
- `api_user` : string
- `api_password` : string
Example:
>>> from proteus.api import ProteusClientApi
>>> pc=ProteusClientApi(
'http://proteus.domain.tld/',
'username',
'password')
"""
self._api_url = api_url
self._api_user = api_user
self._api_password = api_password
self._client = None
self._is_connected = None
self._is_authenticated = None
def _connect(self):
"""
Establish connection to Proteus SOAP Service
"""
if self._client is not None:
raise Exception('Disconnect first')
if self._api_url[-1] != '/':
self._api_url += '/'
self._client = Client('%sServices/API?wsdl' % self._api_url)
self._client.set_options(location='%sServices/API' % self._api_url)
self._is_connected = True
def _disconnect(self):
"""
Disconnect from Proteus SOAP Service
"""
self._client = None
self._is_connected = False
def login(self):
"""
Connect and login
Example:
>>> from proteus.api import ProteusClientApi
>>> pc=ProteusClientApi(
'http://proteus.domain.tld/',
'username',
'password')
>>> pc.login()
"""
try:
self._connect()
self._client.service.login(self._api_user, self._api_password)
self._is_authenticated = True
return True
except Exception, e:
print e
return False
def logout(self):
"""
Logout and disconnect
Example:
>>> from proteus.api import ProteusClientApi
>>> pc=ProteusClientApi(
'http://proteus.domain.tld/',
'username',
'password')
>>> pc.login()
>>> ...
>>> pc.logout()
"""
try:
self._client.service.logout()
self._is_authenticated = False
self._disconnect()
return True
except Exception, e:
print e
def _get_entity_by_name(self, parent_id, entity_name, entity_type):
"""
Wrapper for Proteus SOAP API Method getEntityByName
:Parameters:
- `parent_id` : int
- `entity_name` : string
- `entity_type` : string [ use one of the TYPE_* constants from :py:mod:`proteus.api.constants` ]
:return:
APIEntity
"""
if entity_type not in ALL_TYPES:
raise Exception("Unknown Entity Type")
if self._is_connected:
try:
entity = self._client.service.getEntityByName(
parent_id,
entity_name,
entity_type
)
return entity
except Exception, e:
print e
return False
return None
def _get_entities(self, parent_id, entity_type, start=1, count=1):
"""
Get a list of Proteus Entities
:Parameters:
- `parent_id` : int
- `entity_type` : string [ use one of the TYPE_* constants from :py:mod:`proteus.api.constants` ]
- `start` : int [1-based]
- `count` : int
:return:
`APIEntityArray`
"""
if self._is_connected:
try:
entity = self._client.service.getEntities(
parent_id,
entity_type,
start,
count
)
return entity
except Exception, e:
print e
return False
return None
def is_valid_connection(self):
"""
Checks if the client is connected and authenticated
"""
if self._is_connected and self._is_authenticated:
return True
return False
class ProteusClient(ProteusClientApi):
"""
Usable Proteus Client
"""
def __init__(
self,
api_url=None,
api_user=None,
api_password=None,
config_name=None):
"""
:Parameters:
- `api_url` : string
- `api_user` : string
- `api_password` : string
- `config_name` : string
Example:
>>> from proteus.api import ProteusClientApi
>>> pc=ProteusClientApi(
'http://proteus.domain.tld/',
'username',
'password',
'proteus_configuration_object_name')
"""
super(ProteusClient, self).__init__(api_url, api_user, api_password)
self._config_name = config_name
self._configuration = None
self._get_configuration()
self._dns = DNS(self)
def _get_configuration(self):
if self.is_valid_connection():
try:
# parent_id is 0 for configuratioin objects
configuration = self._get_entity_by_name(
0,
self._config_name,
TYPE_CONFIGURATION
)
self._configuration = APIObject(
TypeRecord=configuration, client=self._client)
return True
except Exception, e:
print e
return False
return False
def get_dns(self):
return self._dns
DNS = property(get_dns, doc='DNS Class Property')
def get_configuration(self):
if self._configuration is None:
self._get_configuration()
return self._configuration
Configuration = property(get_configuration, doc='Configuration Property')
| lgpl-2.1 | -1,669,822,552,541,167,900 | 29.645418 | 109 | 0.5117 | false |
awadhn/robotframework-run-keyword-async | runKeywordAsync/runKeywordAsync.py | 1 | 3148 | import sys
import os
import time
from robot.libraries.BuiltIn import BuiltIn
from robot.output.logger import LOGGER
class runKeywordAsync:
def __init__(self):
self._thread_pool = {}
self._last_thread_handle = 1
#self._robot_log_level = BuiltIn().get_variable_value("${LOG_LEVEL}")
def run_method_async(self, keyword, *args, **kwargs):
#BuiltIn().set_log_level("NONE")
handle = self._last_thread_handle
thread = self._threaded_method(keyword, *args, **kwargs)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
def run_keyword_async(self, keyword, *args):
#BuiltIn().set_log_level("NONE")
handle = self._last_thread_handle
thread = self._threaded(keyword, *args)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
def wait_async_all(self, timeout=60):
timeout = int(timeout)
results = []
for thread in self._thread_pool:
try:
result = self._thread_pool[thread].result_queue.get(True, timeout)
results.append(result)
except:
#BuiltIn().set_log_level(self._robot_log_level)
for thread in self._thread_pool:
self._thread_pool[thread].terminate()
raise Exception("Process " + str(thread) + " Failed")
#BuiltIn().set_log_level(self._robot_log_level)
self._thread_pool = {}
self._last_thread_handle = 1
return results
def get_async_return(self, handle, timeout=60):
timeout = int(timeout)
if handle in self._thread_pool:
try:
result = self._thread_pool[handle].result_queue.get(True, timeout)
del self._thread_pool[handle]
BuiltIn().set_log_level(self._robot_log_level)
return result
except:
raise Exception("Process " + str(handle) + " Failed")
else:
raise Exception("Passed Process id " + str(handle) + " is not a valid id")
def _threaded_method(self, keyword, *args, **kwargs):
from multiprocessing import Queue
from multiprocessing import Process
def wrapped_f(q, *args, **kwargs):
''' Calls the decorated function and puts the result in a queue '''
ret = BuiltIn().call_method(keyword, *args, **kwargs)
q.put(ret)
q = Queue()
th = Process(target=wrapped_f, args=(q,)+args, kwargs=kwargs)
th.result_queue = q
return th
def _threaded(self, keyword, *args):
from multiprocessing import Queue
from multiprocessing import Process
def wrapped_f(q, *args):
''' Calls the decorated function and puts the result in a queue '''
LOGGER.unregister_xml_logger()
ret = BuiltIn().run_keyword(keyword, *args)
q.put(ret)
q = Queue()
th = Process(target=wrapped_f, args=(q,)+args)
th.result_queue = q
return th
| mit | 6,771,067,669,261,232,000 | 34.772727 | 86 | 0.576239 | false |
zooniverse/panoptes-cli | panoptes_cli/commands/workflow.py | 1 | 5255 | import yaml
import click
from panoptes_cli.scripts.panoptes import cli
from panoptes_client import Workflow
@cli.group()
def workflow():
"""Contains commands for managing workflows."""
pass
@workflow.command()
@click.argument('workflow-id', required=False, type=int)
@click.option(
'--project-id',
'-p',
help="List workflows linked to the given project.",
required=False,
type=int,
)
@click.option(
'--quiet',
'-q',
is_flag=True,
help='Only print workflow IDs (omit names).',
)
def ls(workflow_id, project_id, quiet):
"""Lists workflow IDs and names."""
if workflow_id and not project_id:
workflow = Workflow.find(workflow_id)
if quiet:
click.echo(workflow.id)
else:
echo_workflow(workflow)
return
args = {}
if project_id:
args['project_id'] = project_id
if workflow_id:
args['workflow_id'] = workflow_id
workflows = Workflow.where(**args)
if quiet:
click.echo(" ".join([w.id for w in workflows]))
else:
for workflow in workflows:
echo_workflow(workflow)
@workflow.command()
@click.argument('workflow-id', required=True)
def info(workflow_id):
workflow = Workflow.find(workflow_id)
click.echo(yaml.dump(workflow.raw))
@workflow.command(name='retire-subjects')
@click.argument('workflow-id', type=int)
@click.argument('subject-ids', type=int, nargs=-1)
@click.option(
'--reason',
'-r',
help="The reason for retiring the subject.",
type=click.Choice((
'classification_count',
'flagged',
'blank',
'consensus',
'other'
)),
default='other'
)
def retire_subjects(workflow_id, subject_ids, reason):
"""
Retires subjects from the given workflow.
The subjects will no longer be served to volunteers for classification.
"""
workflow = Workflow.find(workflow_id)
workflow.retire_subjects(subject_ids, reason)
@workflow.command(name='add-subject-sets')
@click.argument('workflow-id', type=int)
@click.argument('subject-set-ids', type=int, nargs=-1)
def add_subject_sets(workflow_id, subject_set_ids):
"""Links existing subject sets to the given workflow."""
workflow = Workflow.find(workflow_id)
workflow.add_subject_sets(subject_set_ids)
@workflow.command(name='remove-subject-sets')
@click.argument('workflow-id', type=int)
@click.argument('subject-set-ids', type=int, nargs=-1)
def remove_subject_sets(workflow_id, subject_set_ids):
"""Unlinks the given subject sets from the given workflow."""
workflow = Workflow.find(workflow_id)
workflow.remove_subject_sets(subject_set_ids)
@workflow.command()
@click.argument('workflow-id', type=int)
def activate(workflow_id):
"""Activates the given workflow."""
workflow = Workflow.find(workflow_id)
workflow.active = True
workflow.save()
@workflow.command()
@click.argument('workflow-id', type=int)
def deactivate(workflow_id):
"""Deactivates the given workflow."""
workflow = Workflow.find(workflow_id)
workflow.active = False
workflow.save()
@workflow.command(name="download-classifications")
@click.argument('workflow-id', required=True, type=int)
@click.argument('output-file', required=True, type=click.File('wb'))
@click.option(
'--generate',
'-g',
help="Generates a new export before downloading.",
is_flag=True
)
@click.option(
'--generate-timeout',
'-T',
help=(
"Time in seconds to wait for new export to be ready. Defaults to "
"unlimited. Has no effect unless --generate is given."
),
required=False,
type=int,
)
def download_classifications(
workflow_id,
output_file,
generate,
generate_timeout
):
"""
Downloads a workflow-specific classifications export for the given workflow.
OUTPUT_FILE will be overwritten if it already exists. Set OUTPUT_FILE to -
to output to stdout.
"""
workflow = Workflow.find(workflow_id)
if generate:
click.echo("Generating new export...", err=True)
export = workflow.get_export(
'classifications',
generate=generate,
wait_timeout=generate_timeout
)
with click.progressbar(
export.iter_content(chunk_size=1024),
label='Downloading',
length=(int(export.headers.get('content-length')) / 1024 + 1),
file=click.get_text_stream('stderr'),
) as chunks:
for chunk in chunks:
output_file.write(chunk)
@workflow.command()
@click.option(
'--force',
'-f',
is_flag=True,
help='Delete without asking for confirmation.',
)
@click.argument('workflow-ids', required=True, nargs=-1, type=int)
def delete(force, workflow_ids):
for workflow_id in workflow_ids:
workflow = Workflow.find(workflow_id)
if not force:
click.confirm(
'Delete workflow {} ({})?'.format(
workflow_id,
workflow.display_name,
),
abort=True,
)
workflow.delete()
def echo_workflow(workflow):
click.echo(
u'{} {}'.format(
workflow.id,
workflow.display_name
)
) | apache-2.0 | 8,488,461,908,351,289,000 | 24.028571 | 80 | 0.634634 | false |
noironetworks/group-based-policy | gbpservice/contrib/nfp_service/reference_configurator/scripts/configure_fw_rules.py | 1 | 3818 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from subprocess import call
from subprocess import PIPE
from subprocess import Popen
import sys
from oslo_log import log as logging
from oslo_serialization import jsonutils
LOG = logging.getLogger(__name__)
class ConfigureIPtables(object):
def __init__(self, json_blob):
ps = Popen(["sysctl", "net.ipv4.ip_forward"], stdout=PIPE)
output = ps.communicate()[0]
if "0" in output:
LOG.info("Enabling IP forwarding ...")
call(["sysctl", "-w", "net.ipv4.ip_forward=1"])
else:
LOG.info("IP forwarding already enabled")
try:
self.rules_json = jsonutils.loads(json_blob)
except ValueError:
sys.exit('Given json_blob is not a valid json')
def update_chain(self):
ps = Popen(["iptables", "-L"], stdout=PIPE)
output = ps.communicate()[0]
# check if chain is present if not create new chain
if "testchain" not in output:
LOG.info("Creating new chain ...")
call(["iptables", "-F"])
call(["iptables", "-N", "testchain"])
call(
["iptables", "-t", "filter",
"-A", "FORWARD", "-j", "testchain"])
call(["iptables", "-A", "FORWARD", "-j", "DROP"])
# flush chain of existing rules
call(["iptables", "-F", "testchain"])
# return
# Update chain with new rules
LOG.info("Updating chain with new rules ...")
count = 0
for rule in self.rules_json.get('rules'):
LOG.info("adding rule %(count)d", {'count': count})
try:
action_values = ["LOG", "ACCEPT"]
action = rule['action'].upper()
if action not in action_values:
sys.exit(
"Action %s is not valid action! Please enter "
"valid action (LOG or ACCEPT)" % (action))
service = rule['service'].split('/')
except KeyError as e:
sys.exit('KeyError: Rule does not have key %s' % (e))
if len(service) > 1:
ps = Popen(["iptables", "-A", "testchain", "-p", service[
0], "--dport", service[1], "-j", action],
stdout=PIPE)
else:
ps = Popen(
["iptables", "-A", "testchain", "-p", service[0],
"-j", action], stdout=PIPE)
output = ps.communicate()[0]
if output:
LOG.error("Unable to add rule to chain due to: %(msg)s",
{'msg': output})
count = count + 1
ps = Popen(["iptables", "-A", "testchain", "-m", "state", "--state",
"ESTABLISHED,RELATED", "-j", "ACCEPT"], stdout=PIPE)
output = ps.communicate()[0]
if output:
LOG.error("Unable to add rule to chain due to: %(output)s",
{'output': output})
def main():
if len(sys.argv) < 2:
sys.exit('Usage: %s json-blob' % sys.argv[0])
else:
json_blob = sys.argv[1]
test = ConfigureIPtables(json_blob)
test.update_chain()
if __name__ == "__main__":
main()
| apache-2.0 | 8,109,188,629,917,404,000 | 35.711538 | 78 | 0.529073 | false |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/core/indexes/multi.py | 1 | 99739 |
# pylint: disable=E1101,E1103,W0232
import datetime
import warnings
from functools import partial
from sys import getsizeof
import numpy as np
from pandas._libs import index as libindex, lib, Timestamp
from pandas.compat import range, zip, lrange, lzip, map
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_platform_int,
is_object_dtype,
is_iterator,
is_list_like,
is_scalar)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.common import (_any_not_none,
_values_from_object,
is_bool_indexer,
is_null_slice,
is_true_slices)
import pandas.core.base as base
from pandas.util._decorators import (Appender, cache_readonly,
deprecate, deprecate_kwarg)
import pandas.core.common as com
import pandas.core.missing as missing
import pandas.core.algorithms as algos
from pandas.io.formats.printing import pprint_thing
from pandas.core.config import get_option
from pandas.core.indexes.base import (
Index, _ensure_index,
_get_na_value, InvalidIndexError,
_index_shared_docs)
from pandas.core.indexes.frozen import (
FrozenNDArray, FrozenList, _ensure_frozen)
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='MultiIndex',
target_klass='MultiIndex or list of tuples'))
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects
Parameters
----------
levels : sequence of arrays
The unique labels for each level
labels : sequence of arrays
Integers for each level designating which label at each location
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level)
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat)
copy : boolean, default False
Copy the meta-data
verify_integrity : boolean, default True
Check that the levels/labels are consistent and valid
Examples
---------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
labels=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/advanced.html>`_ for more.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex
Index : The base pandas Index type
"""
# initialize to zero-length tuples to make everything work
_typ = 'multiindex'
_names = FrozenList()
_levels = FrozenList()
_labels = FrozenList()
_comparables = ['names']
rename = Index.set_names
def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
copy=False, verify_integrity=True, _set_identity=True,
name=None, **kwargs):
# compat with Index
if name is not None:
names = name
if levels is None or labels is None:
raise TypeError("Must pass both levels and labels")
if len(levels) != len(labels):
raise ValueError('Length of levels and labels must be the same.')
if len(levels) == 0:
raise ValueError('Must pass non-zero number of levels/labels')
result = object.__new__(MultiIndex)
# we've already validated levels and labels, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_labels(labels, copy=copy, validate=False)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
result._verify_integrity()
if _set_identity:
result._reset_identity()
return result
def _verify_integrity(self, labels=None, levels=None):
"""
Parameters
----------
labels : optional list
Labels to check for validity. Defaults to current labels.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
* if length of levels and labels don't match or any label would
exceed level bounds
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
labels = labels or self.labels
levels = levels or self.levels
if len(levels) != len(labels):
raise ValueError("Length of levels and labels must match. NOTE:"
" this index is in an inconsistent state.")
label_length = len(self.labels[0])
for i, (level, label) in enumerate(zip(levels, labels)):
if len(label) != label_length:
raise ValueError("Unequal label lengths: %s" %
([len(lab) for lab in labels]))
if len(label) and label.max() >= len(level):
raise ValueError("On level %d, label max (%d) >= length of"
" level (%d). NOTE: this index is in an"
" inconsistent state" % (i, label.max(),
len(level)))
def _get_levels(self):
return self._levels
def _set_levels(self, levels, level=None, copy=False, validate=True,
verify_integrity=False):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate and len(levels) == 0:
raise ValueError('Must set non-zero number of levels.')
if validate and level is None and len(levels) != self.nlevels:
raise ValueError('Length of levels must match number of levels.')
if validate and level is not None and len(levels) != len(level):
raise ValueError('Length of levels must match length of level.')
if level is None:
new_levels = FrozenList(
_ensure_index(lev, copy=copy)._shallow_copy()
for lev in levels)
else:
level = [self._get_level_number(l) for l in level]
new_levels = list(self._levels)
for l, v in zip(level, levels):
new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
if verify_integrity:
self._verify_integrity(levels=new_levels)
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
def set_levels(self, levels, level=None, inplace=False,
verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning
new index.
Parameters
----------
levels : sequence or list of sequence
new level(s) to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a','b'], [1,2]])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level=0)
MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level='bar')
MultiIndex(levels=[[1, 2], [u'a', u'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels([['a','b'], [1,2]], level=[0,1])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(levels, level=level, validate=True,
verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to read only property
__set_levels = deprecate("setting `levels` directly",
partial(set_levels, inplace=True,
verify_integrity=True),
alt_name="set_levels")
levels = property(fget=_get_levels, fset=__set_levels)
def _get_labels(self):
return self._labels
def _set_labels(self, labels, level=None, copy=False, validate=True,
verify_integrity=False):
if validate and level is None and len(labels) != self.nlevels:
raise ValueError("Length of labels must match number of levels")
if validate and level is not None and len(labels) != len(level):
raise ValueError('Length of labels must match length of levels.')
if level is None:
new_labels = FrozenList(
_ensure_frozen(lab, lev, copy=copy)._shallow_copy()
for lev, lab in zip(self.levels, labels))
else:
level = [self._get_level_number(l) for l in level]
new_labels = list(self._labels)
for l, lev, lab in zip(level, self.levels, labels):
new_labels[l] = _ensure_frozen(
lab, lev, copy=copy)._shallow_copy()
new_labels = FrozenList(new_labels)
if verify_integrity:
self._verify_integrity(labels=new_labels)
self._labels = new_labels
self._tuples = None
self._reset_cache()
def set_labels(self, labels, level=None, inplace=False,
verify_integrity=True):
"""
Set new labels on MultiIndex. Defaults to returning
new index.
Parameters
----------
labels : sequence or list of sequence
new labels to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([1,0,1,0], level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([0,0,1,1], level='bar')
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]], level=[0,1])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(labels):
raise TypeError("Labels must be list-like")
if is_list_like(labels[0]):
raise TypeError("Labels must be list-like")
level = [level]
labels = [labels]
elif level is None or is_list_like(level):
if not is_list_like(labels) or not is_list_like(labels[0]):
raise TypeError("Labels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_labels(labels, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to readonly property
__set_labels = deprecate("setting labels directly",
partial(set_labels, inplace=True,
verify_integrity=True),
alt_name="set_labels")
labels = property(fget=_get_labels, fset=__set_labels)
def copy(self, names=None, dtype=None, levels=None, labels=None,
deep=False, _set_identity=False, **kwargs):
"""
Make a copy of this object. Names, dtype, levels and labels can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
labels : sequence, optional
Returns
-------
copy : MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
name = kwargs.get('name')
names = self._validate_names(name=name, names=names, deep=deep)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if labels is None:
labels = deepcopy(self.labels)
else:
if levels is None:
levels = self.levels
if labels is None:
labels = self.labels
return MultiIndex(levels=levels, labels=labels, names=names,
sortorder=self.sortorder, verify_integrity=False,
_set_identity=_set_identity)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
return MultiIndex(levels=[[] for _ in range(self.nlevels)],
labels=[[] for _ in range(self.nlevels)],
**kwargs)
return self._shallow_copy(values, **kwargs)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
if 'name' in kwargs:
kwargs['names'] = kwargs.pop('name', None)
# discards freq
kwargs.pop('freq', None)
return MultiIndex.from_tuples(values, **kwargs)
return self.view()
@cache_readonly
def dtype(self):
return np.dtype('O')
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
def f(l):
return 'mixed' in l or 'string' in l or 'unicode' in l
return any([f(l) for l in self._inferred_type_levels])
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation uncessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep=False):
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum((i.memory_usage(deep=deep) for i in self.levels))
label_nbytes = sum((i.nbytes for i in self.labels))
names_nbytes = sum((getsizeof(i, objsize) for i in self.names))
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = [
('levels', ibase.default_pprint(self._levels,
max_seq_items=False)),
('labels', ibase.default_pprint(self._labels,
max_seq_items=False))]
if _any_not_none(*self.names):
attrs.append(('names', ibase.default_pprint(self.names)))
if self.sortorder is not None:
attrs.append(('sortorder', ibase.default_pprint(self.sortorder)))
return attrs
def _format_space(self):
return "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def __len__(self):
return len(self.labels[0])
def _get_names(self):
return FrozenList(level.name for level in self.levels)
def _set_names(self, names, level=None, validate=True):
"""
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError('Names should be list-like for a MultiIndex')
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError('Length of names must match number of levels in '
'MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
self.levels[l].rename(name, inplace=True)
names = property(fset=_set_names, fget=_get_names,
doc="Names of levels in MultiIndex")
def _reference_duplicate_name(self, name):
"""
Returns True if the name refered to in self.names is duplicated.
"""
# count the times name equals an element in self.names.
return sum(name == n for n in self.names) > 1
def _format_native_types(self, na_rep='nan', **kwargs):
new_levels = []
new_labels = []
# go through the levels and format them
for level, label in zip(self.levels, self.labels):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = (label == -1)
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
label = label.values()
label[mask] = nan_index
new_levels.append(level)
new_labels.append(label)
# reconstruct the multi-index
mi = MultiIndex(levels=new_levels, labels=new_labels, names=self.names,
sortorder=self.sortorder, verify_integrity=False)
return mi.values
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level):
indexer = self.labels[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
labels, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_labels, uniques = algos.factorize(indexer[mask],
sort=True)
labels = np.empty(len(indexer), dtype=indexer.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
grouper = level_index.take(labels)
return grouper, labels, level_index
@property
def _constructor(self):
return MultiIndex.from_tuples
@cache_readonly
def inferred_type(self):
return 'mixed'
@staticmethod
def _from_elements(values, labels=None, levels=None, names=None,
sortorder=None):
return MultiIndex(levels, labels, names, sortorder=sortorder)
def _get_level_number(self, level):
try:
count = self.names.count(level)
if count > 1:
raise ValueError('The name %s occurs multiple times, use a '
'level number' % level)
level = self.names.index(level)
except ValueError:
if not isinstance(level, int):
raise KeyError('Level %s not found' % str(level))
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError('Too many levels: Index has only %d '
'levels, %d is not a valid level number' %
(self.nlevels, orig_level))
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError('Too many levels: Index has only %d levels, '
'not %d' % (self.nlevels, level + 1))
return level
_tuples = None
@cache_readonly
def _engine(self):
# choose our engine based on our size
# the hashing based MultiIndex for larger
# sizes, and the MultiIndexOjbect for smaller
# xref: https://github.com/pandas-dev/pandas/pull/16324
l = len(self)
if l > 10000:
return libindex.MultiIndexHashEngine(lambda: self, l)
return libindex.MultiIndexObjectEngine(lambda: self.values, l)
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for lev, lab in zip(self.levels, self.labels):
# Need to box timestamps, etc.
box = hasattr(lev, '_box_values')
# Try to minimize boxing.
if box and len(lev) > len(lab):
taken = lev._box_values(algos.take_1d(lev._values, lab))
elif box:
taken = algos.take_1d(lev._box_values(lev._values), lab,
fill_value=_get_na_value(lev.dtype.type))
else:
taken = algos.take_1d(np.asarray(lev._values), lab)
values.append(taken)
self._tuples = lib.fast_zip(values)
return self._tuples
# fml
@property
def _is_v1(self):
return False
@property
def _is_v2(self):
return False
@property
def _has_complex_internals(self):
# to disable groupby tricks
return True
@cache_readonly
def is_monotonic(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
return self.is_monotonic_increasing
@cache_readonly
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
# reversed() because lexsort() wants the most significant key last.
values = [self._get_level_values(i).values
for i in reversed(range(len(self.levels)))]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self.values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def is_unique(self):
return not self.duplicated().any()
@cache_readonly
def _have_mixed_levels(self):
""" return a boolean list indicated if we have mixed levels """
return ['mixed' in l for l in self._inferred_type_levels]
@cache_readonly
def _inferred_type_levels(self):
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@cache_readonly
def _hashed_values(self):
""" return a uint64 ndarray of my hashed values """
from pandas.core.util.hashing import hash_tuples
return hash_tuples(self)
def _hashed_indexing_key(self, key):
"""
validate and return the hash for the provided key
*this is internal for use for the cython routines*
Paramters
---------
key : string or tuple
Returns
-------
np.uint64
Notes
-----
we need to stringify if we have mixed levels
"""
from pandas.core.util.hashing import hash_tuples, hash_tuple
if not isinstance(key, tuple):
return hash_tuples(key)
if not len(key) == self.nlevels:
raise KeyError
def f(k, stringify):
if stringify and not isinstance(k, compat.string_types):
k = str(k)
return k
key = tuple([f(k, stringify)
for k, stringify in zip(key, self._have_mixed_levels)])
return hash_tuple(key)
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64
shape = map(len, self.levels)
ids = get_group_index(self.labels, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
# isna is not implemented for MultiIndex
raise NotImplementedError('isna is not defined for MultiIndex')
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
nans = [label == -1 for label in self.labels]
if how == 'any':
indexer = np.any(nans, axis=0)
elif how == 'all':
indexer = np.all(nans, axis=0)
else:
raise ValueError("invalid how option: {0}".format(how))
new_labels = [label[~indexer] for label in self.labels]
return self.copy(labels=new_labels, deep=True)
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
# Label-based
s = _values_from_object(series)
k = _values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return series._constructor(new_values, index=new_index,
name=series.name).__finalize__(self)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return libindex.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if (isinstance(key, (datetime.datetime, np.datetime64)) or
(compat.PY3 and isinstance(key, compat.string_types))):
try:
return _try_mi(key)
except (KeyError):
raise
except:
pass
try:
return _try_mi(Timestamp(key))
except:
pass
raise InvalidIndexError(key)
def _get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
Returns
-------
values : ndarray
"""
unique = self.levels[level]
labels = self.labels[level]
filled = algos.take_1d(unique._values, labels,
fill_value=unique._na_value)
values = unique._shallow_copy(filled)
return values
def get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``values`` is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
---------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
def format(self, space=2, sparsify=None, adjoin=True, names=False,
na_rep=None, formatter=None):
if len(self) == 0:
return []
stringified_levels = []
for lev, lab in zip(self.levels, self.labels):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(lab).format(formatter=formatter)
# we have some NA
mask = lab == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [pprint_thing(na if isna(x) else x,
escape_chars=('\t', '\r', '\n'))
for x in algos.take_1d(lev._values, lab)]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
if name is not None else '')
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ''
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthey" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(result_levels, start=int(names),
sentinel=sentinel)
if adjoin:
from pandas.io.formats.format import _get_adjustment
adj = _get_adjustment()
return adj.adjoin(space, *result_levels).split('\n')
else:
return result_levels
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
def to_frame(self, index=True):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
.. versionadded:: 0.20.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original MultiIndex.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
"""
from pandas import DataFrame
result = DataFrame({(name or level):
self._get_level_values(level)
for name, level in
zip(self.names, range(len(self.levels)))},
copy=False)
if index:
result.index = self
return result
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
labels = [np.repeat(x, n_repeat) for x in self.labels]
# Assumes that each label is divisible by n_shuffle
labels = [x.reshape(n_shuffle, -1).ravel(order='F') for x in labels]
names = self.names
return MultiIndex(levels=levels, labels=labels, names=names)
@property
def is_all_dates(self):
return False
def is_lexsorted(self):
"""
Return True if the labels are lexicographically sorted
"""
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
if self.sortorder == 0:
return self.nlevels
else:
return 0
int64_labels = [_ensure_int64(lab) for lab in self.labels]
for k in range(self.nlevels, 0, -1):
if lib.is_lexsorted(int64_labels[:k]):
return k
return 0
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> MultiIndex.from_arrays(arrays, names=('number', 'color'))
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError('all arrays must be same length')
from pandas.core.categorical import _factorize_from_iterables
labels, levels = _factorize_from_iterables(arrays)
if names is None:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(levels=levels, labels=labels, sortorder=sortorder,
names=names, verify_integrity=False)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> tuples = [(1, u'red'), (1, u'blue'),
(2, u'red'), (2, u'blue')]
>>> MultiIndex.from_tuples(tuples, names=('number', 'color'))
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
if len(tuples) == 0:
if names is None:
msg = 'Cannot infer number of levels from empty list'
raise TypeError(msg)
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = lzip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of strings or None
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = [u'green', u'purple']
>>> MultiIndex.from_product([numbers, colors],
names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=[u'number', u'color'])
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
"""
from pandas.core.categorical import _factorize_from_iterables
from pandas.core.reshape.util import cartesian_product
labels, levels = _factorize_from_iterables(iterables)
labels = cartesian_product(labels)
return MultiIndex(levels, labels, sortorder=sortorder, names=names)
def _sort_levels_monotonic(self):
"""
.. versionadded:: 0.20.0
This is an *internal* function.
create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i.sort_monotonic()
MultiIndex(levels=[['a', 'b'], ['aa', 'bb']],
labels=[[0, 0, 1, 1], [1, 0, 1, 0]])
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_labels = []
for lev, lab in zip(self.levels, self.labels):
if lev.is_monotonic:
new_levels.append(lev)
new_labels.append(lab)
continue
# indexer to reorder the levels
indexer = lev.argsort()
lev = lev.take(indexer)
# indexer to reorder the labels
indexer = _ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
lab = algos.take_1d(ri, lab)
new_levels.append(lev)
new_labels.append(lab)
return MultiIndex(new_levels, new_labels,
names=self.names, sortorder=self.sortorder,
verify_integrity=False)
def remove_unused_levels(self):
"""
create a new MultiIndex from the current that removing
unused levels, meaning that they are not expressed in the labels
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
.. versionadded:: 0.20.0
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex.from_product([range(2), list('ab')])
MultiIndex(levels=[[0, 1], ['a', 'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i[2:]
MultiIndex(levels=[[0, 1], ['a', 'b']],
labels=[[1, 1], [0, 1]])
The 0 from the first level is not represented
and can be removed
>>> i[2:].remove_unused_levels()
MultiIndex(levels=[[1], ['a', 'b']],
labels=[[0, 0], [0, 1]])
"""
new_levels = []
new_labels = []
changed = False
for lev, lab in zip(self.levels, self.labels):
uniques = algos.unique(lab)
# nothing unused
if len(uniques) == len(lev):
new_levels.append(lev)
new_labels.append(lab)
continue
changed = True
# labels get mapped from uniques to 0:len(uniques)
label_mapping = np.zeros(len(lev))
label_mapping[uniques] = np.arange(len(uniques))
lab = label_mapping[lab]
# new levels are simple
lev = lev.take(uniques)
new_levels.append(lev)
new_labels.append(lab)
result = self._shallow_copy()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_labels(new_labels, validate=False)
return result
@property
def nlevels(self):
return len(self.levels)
@property
def levshape(self):
return tuple(len(x) for x in self.levels)
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
self.get_loc(key)
return True
except LookupError:
return False
contains = __contains__
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(levels=[lev for lev in self.levels],
labels=[label for label in self.labels],
sortorder=self.sortorder, names=list(self.names))
return ibase._new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
levels = state.get('levels')
labels = state.get('labels')
sortorder = state.get('sortorder')
names = state.get('names')
elif isinstance(state, tuple):
nd_state, own_state = state
levels, labels, sortorder, names = own_state
self._set_levels([Index(x) for x in levels], validate=False)
self._set_labels(labels)
self._set_names(names)
self.sortorder = sortorder
self._verify_integrity()
self._reset_identity()
def __getitem__(self, key):
if is_scalar(key):
retval = []
for lev, lab in zip(self.levels, self.labels):
if lab[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[lab[key]])
return tuple(retval)
else:
if is_bool_indexer(key):
key = np.asarray(key)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_labels = [lab[key] for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
taken = self._assert_take_fillable(self.labels, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return MultiIndex(levels=self.levels, labels=taken,
names=self.names, verify_integrity=False)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=None):
""" Internal method to handle NA filling of take """
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = [lab.take(indices) for lab in self.labels]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label.values()
label_values[mask] = na_value
masked.append(FrozenNDArray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.labels]
return taken
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels)
for o in other):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values, ) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except:
return Index(new_tuples)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
nv.validate_repeat(args, kwargs)
return MultiIndex(levels=self.levels,
labels=[label.view(np.ndarray).repeat(repeats)
for label in self.labels], names=self.names,
sortorder=self.sortorder, verify_integrity=False)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for "
"MultiIndex operations")
def drop(self, labels, level=None, errors='raise'):
"""
Make new MultiIndex with passed list of labels deleted
Parameters
----------
labels : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(labels, level)
try:
if not isinstance(labels, (np.ndarray, Index)):
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise ValueError('labels %s not contained in axis' %
labels[mask])
indexer = indexer[~mask]
except Exception:
pass
inds = []
for label in labels:
try:
loc = self.get_loc(label)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(lrange(loc.start, loc.stop))
elif is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn('dropping on a non-lexsorted multi-index'
' without a level parameter may impact '
'performance.',
PerformanceWarning,
stacklevel=3)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = 'unsupported indexer of type {}'.format(type(loc))
raise AssertionError(msg)
except KeyError:
if errors != 'ignore':
raise
return self.delete(inds)
def _drop_from_level(self, labels, level):
labels = com._index_labels_to_array(labels)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(labels)
mask = ~algos.isin(self.labels[i], values)
return self[mask]
def droplevel(self, level=0):
"""
Return Index with requested level removed. If MultiIndex has only 2
levels, the result will be of Index type not MultiIndex.
Parameters
----------
level : int/level name or list thereof
Notes
-----
Does not check if result index is unique or not
Returns
-------
index : Index or MultiIndex
"""
levels = level
if not isinstance(levels, (tuple, list)):
levels = [level]
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1]
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j. Do not change the ordering of anything
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : MultiIndex
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_labels[i], new_labels[j] = new_labels[j], new_labels[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError('Length of order must be same as '
'number of levels (%d), got %d' %
(self.nlevels, len(order)))
new_levels = [self.levels[i] for i in order]
new_labels = [self.labels[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def _get_labels_for_sorting(self):
"""
we categorizing our labels by using the
available catgories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
from pandas.core.categorical import Categorical
def cats(label):
return np.arange(np.array(label).max() + 1 if len(label) else 0,
dtype=label.dtype)
return [Categorical.from_codes(label, cats(label), ordered=True)
for label in self.labels]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level.
Returns
-------
sorted_index : pd.MultiIndex
Resulting index
indexer : np.ndarray
Indices of output values in original index
"""
from pandas.core.sorting import indexer_from_factorized
if isinstance(level, (compat.string_types, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer([self.labels[lev] for lev in level],
orders=ascending)
# level ordering
else:
labels = list(self.labels)
shape = list(self.levshape)
# partition labels and shape
primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level))
primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
if sort_remaining:
primary += primary + tuple(labels)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = _ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
new_index = MultiIndex(labels=new_labels, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super(MultiIndex, self)._convert_listlike_indexer(
keyarr, kind=kind)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0],
tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % keyarr[mask])
return indexer, keyarr
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return _ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self.values).get_indexer(target,
method=method,
limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise Exception('Reindexing only valid with uniquely valued Index '
'objects')
if method == 'pad' or method == 'backfill':
if tolerance is not None:
raise NotImplementedError("tolerance not implemented yet "
'for MultiIndex')
indexer = self._get_fill_indexer(target, method, limit)
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for MultiIndex; see GitHub issue 9365')
else:
# we may not compare equally because of hashing if we
# don't have the same dtypes
if self._inferred_type_levels != target._inferred_type_levels:
return Index(self.values).get_indexer(target.values)
indexer = self._engine.get_indexer(target)
return _ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
return super(MultiIndex, self).get_indexer_non_unique(target)
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'names')
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase._ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype),
**attrs)
else:
target = _ensure_index(target)
target, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True,
keep_order=False)
else:
target = _ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
raise Exception("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (preserve_names and target.nlevels == self.nlevels and
target.names != self.names):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
def get_slice_bound(self, label, side, kind):
if not isinstance(label, tuple):
label = label,
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lex-sorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super(MultiIndex, self).slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
'Key length (%d) was greater than MultiIndex'
' lexsort depth (%d)' %
(len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.labels)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab])):
raise TypeError('Level type mismatch: %s' % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == 'right' and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side='right')
start = start + section.searchsorted(idx, side='left')
else:
return start + section.searchsorted(idx, side=side)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels as an integer, slice or
boolean mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
Notes
------
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
See also
--------
Index.get_loc : get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if method is not None:
raise NotImplementedError('only the default get_loc method is '
'currently supported for MultiIndex')
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype='bool')
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError('Key length ({0}) exceeds index depth ({1})'
''.format(keylen, self.nlevels))
if keylen == self.nlevels and self.is_unique:
def _maybe_str_to_time_stamp(key, lev):
if lev.is_all_dates and not isinstance(key, Timestamp):
try:
return Timestamp(key, tz=getattr(lev, 'tz', None))
except Exception:
pass
return key
key = _values_from_object(key)
key = tuple(map(_maybe_str_to_time_stamp, key, self.levels))
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (self.slice_locs(lead_key, lead_key)
if lead_key else (0, len(self)))
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn('indexing past lexsort depth may impact performance.',
PerformanceWarning, stacklevel=10)
loc = np.arange(start, stop, dtype='int64')
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.labels[i][loc] == self.levels[i].get_loc(k)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return (_maybe_to_slice(loc) if len(loc) != stop - start else
slice(start, stop))
def get_loc_level(self, key, level=0, drop_level=True):
"""
Get both the location for the requested label(s) and the
resulting sliced index.
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
if ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False], dtype=bool),
Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
See Also
---------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such
"""
def maybe_droplevels(indexer, levels, drop_level):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError('Key for location must have same '
'length as number of levels')
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_droplevels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
if len(key) == self.nlevels:
if self.is_unique:
# here we have a completely specified key, but are
# using some partial string matching here
# GH4758
all_dates = [(l.is_all_dates and
not isinstance(k, compat.string_types))
for k, l in zip(key, self.levels)]
can_index_exactly = any(all_dates)
if (any([l.is_all_dates
for k, l in zip(key, self.levels)]) and
not can_index_exactly):
indexer = self.get_loc(key)
# we have a multiple selection here
if (not isinstance(indexer, slice) or
indexer.stop - indexer.start != 1):
return partial_selection(key, indexer)
key = tuple(self[indexer].tolist()[0])
return (self._engine.get_loc(
_values_from_object(key)), None)
else:
return partial_selection(key)
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
labels = self.labels[level]
def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
# given the inputs and the labels/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(labels):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = labels.take(_ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)._values
else:
m = np.zeros(len(labels), dtype=bool)
m[np.in1d(labels, r,
assume_unique=Index(labels).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returnig a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(key.start, key.stop,
key.step, kind='loc')
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
return convert_indexer(start.start, stop.stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = labels.searchsorted(start, side='left')
j = labels.searchsorted(stop, side='right')
return slice(i, j, step)
else:
loc = level_index.get_loc(key)
if isinstance(loc, slice):
return loc
elif level > 0 or self.lexsort_depth == 0:
return np.array(labels == loc, dtype=bool)
i = labels.searchsorted(loc, side='left')
j = labels.searchsorted(loc, side='right')
return slice(i, j)
def get_locs(self, seq):
"""
Get location for a given label/slice/list/mask or a sequence of such as
an array of integers.
Parameters
----------
seq : label/slice/list/mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
locs : array of integers suitable for passing to iloc
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b')
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']])
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')])
array([2], dtype=int64)
See also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
"""
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError('MultiIndex slicing requires the index '
'to be lexsorted: slicing on levels {0}, '
'lexsort depth {1}'
.format(true_slices, self.lexsort_depth))
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r):
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif is_bool_indexer(r):
if len(r) != n:
raise ValueError("cannot index with a boolean indexer "
"that is not the same length as the "
"index")
r = r.nonzero()[0]
from .numeric import Int64Index
return Int64Index(r)
def _update_indexer(idxr, indexer=indexer):
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
return indexer & idxr
for i, k in enumerate(seq):
if is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k),
indexer=indexer)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i,
indexer=indexer))
indexers = (idxrs if indexers is None
else indexers | idxrs)
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
from .numeric import Int64Index
# no matches we are done
return Int64Index([])._values
elif is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)),
indexer=indexer)
else:
# a single label
indexer = _update_indexer(_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]),
indexer=indexer)
# empty indexer
if indexer is None:
return Int64Index([])._values
return indexer._values
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError('after < before')
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_labels = [lab[left:right] for lab in self.labels]
new_labels[0] = new_labels[0] - i
return MultiIndex(levels=new_levels, labels=new_labels,
verify_integrity=False)
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
return array_equivalent(self._values,
_values_from_object(_ensure_index(other)))
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
slabels = self.labels[i]
slabels = slabels[slabels != -1]
svalues = algos.take_nd(np.asarray(self.levels[i]._values),
slabels, allow_fill=False)
olabels = other.labels[i]
olabels = olabels[olabels != -1]
ovalues = algos.take_nd(np.asarray(other.levels[i]._values),
olabels, allow_fill=False)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(svalues) == 0 and len(ovalues) == 0:
continue
if not array_equivalent(svalues, ovalues):
return False
return True
def equal_levels(self, other):
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
def union(self, other):
"""
Form the union of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
>>> index.union(index2)
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
uniq_tuples = lib.fast_unique_multiple([self._values, other._values])
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def intersection(self, other):
"""
Form the intersection of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
self_tuples = self._values
other_tuples = other._values
uniq_tuples = sorted(set(self_tuples) & set(other_tuples))
if len(uniq_tuples) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def difference(self, other):
"""
Compute sorted set difference of two MultiIndex objects
Returns
-------
diff : MultiIndex
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
difference = sorted(set(self._values) - set(other._values))
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if not is_object_dtype(np.dtype(dtype)):
raise TypeError('Setting %s dtype to anything other than object '
'is not supported' % self.__class__)
elif copy is True:
return self._shallow_copy()
return self
def _convert_can_do_setop(self, other):
result_names = self.names
if not hasattr(other, 'names'):
if len(other) == 0:
other = MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
verify_integrity=False)
else:
msg = 'other must be a MultiIndex or a list of tuples'
try:
other = MultiIndex.from_tuples(other)
except:
raise TypeError(msg)
else:
result_names = self.names if self.names == other.names else None
return other, result_names
def insert(self, loc, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item, ) + ('', ) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError('Item must have length equal to number of '
'levels.')
new_levels = []
new_labels = []
for k, level, labels in zip(item, self.levels, self.labels):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other labels
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc))
return MultiIndex(levels=new_levels, labels=new_labels,
names=self.names, verify_integrity=False)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_labels = [np.delete(lab, loc) for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, verify_integrity=False)
get_major_bounds = slice_locs
__bounds = None
@property
def _bounds(self):
"""
Return or compute and return slice points for level 0, assuming
sortedness
"""
if self.__bounds is None:
inds = np.arange(len(self.levels[0]))
self.__bounds = self.labels[0].searchsorted(inds)
return self.__bounds
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values,
names=self.names).values
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
levs = self.levels[num]
labs = self.labels[num]
sought_labels = levs.isin(values).nonzero()[0]
if levs.size == 0:
return np.zeros(len(labs), dtype=np.bool_)
else:
return np.lib.arraysetops.in1d(labs, sought_labels)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
def _sparsify(label_list, start=0, sentinel=''):
pivoted = lzip(*label_list)
k = len(label_list)
result = pivoted[:start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1:]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return lzip(*result)
def _get_na_rep(dtype):
return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')
| apache-2.0 | -8,210,810,238,779,362,000 | 34.406106 | 79 | 0.524389 | false |
UASLab/ImageAnalysis | video/archive/2a-gen-hud-overlay.py | 1 | 17297 | #!/usr/bin/python3
import argparse
import copy
import cv2
import math
import navpy
import numpy as np
import os
import re
import sys
from props import PropertyNode
import props_json
from aurauas.flightdata import flight_loader, flight_interp
sys.path.append('../lib')
import transformations
import correlate
import hud
import hud_glass
import features
# helpful constants
d2r = math.pi / 180.0
r2d = 180.0 / math.pi
# default sizes of primatives
render_w = 1920
render_h = 1080
# configure
experimental_overlay = False
parser = argparse.ArgumentParser(description='correlate movie data to flight data.')
parser.add_argument('--flight', help='load specified aura flight log')
parser.add_argument('--movie', required=True, help='original movie')
parser.add_argument('--camera', help='select camera calibration file')
parser.add_argument('--cam-mount', choices=['forward', 'down', 'rear'],
default='forward',
help='approximate camera mounting orientation')
parser.add_argument('--rot180', action='store_true')
parser.add_argument('--scale', type=float, default=1.0, help='scale input')
parser.add_argument('--scale-preview', type=float, default=0.25,
help='scale preview')
parser.add_argument('--alpha', type=float, default=0.7, help='hud alpha blend')
parser.add_argument('--resample-hz', type=float, default=60.0,
help='resample rate (hz)')
parser.add_argument('--start-time', type=float, help='fast forward to this flight log time before begining movie render.')
parser.add_argument('--time-shift', type=float, help='skip autocorrelation and use this offset time')
parser.add_argument('--plot', action='store_true', help='Plot stuff at the end of the run')
parser.add_argument('--auto-switch', choices=['old', 'new', 'none', 'on'], default='new', help='auto/manual switch logic helper')
parser.add_argument('--airspeed-units', choices=['kt', 'mps'], default='kt', help='display units for airspeed')
parser.add_argument('--altitude-units', choices=['ft', 'm'], default='ft', help='display units for airspeed')
parser.add_argument('--aileron-scale', type=float, default=1.0, help='useful for reversing aileron in display')
parser.add_argument('--elevator-scale', type=float, default=1.0, help='useful for reversing elevator in display')
parser.add_argument('--rudder-scale', type=float, default=1.0, help='useful for reversing rudder in display')
parser.add_argument('--flight-track-seconds', type=float, default=600.0, help='how many seconds of flight track to draw')
parser.add_argument('--features', help='feature database')
args = parser.parse_args()
counter = 0
# pathname work
abspath = os.path.abspath(args.movie)
filename, ext = os.path.splitext(abspath)
dirname = os.path.dirname(args.movie)
movie_log = filename + ".csv"
local_config = dirname + "/camera.json"
# combinations that seem to work on linux
# ext = avi, fourcc = MJPG
# ext = avi, fourcc = XVID
# ext = m4v (was mov), fourcc = MP4V
ext = 'avi'
tmp_movie = filename + "_tmp." + ext
output_movie = filename + "_hud.mov"
config = PropertyNode()
if args.camera:
# seed the camera calibration and distortion coefficients from a
# known camera config
print('Setting camera config from:', args.camera)
props_json.load(args.camera, config)
config.setString('name', args.camera)
props_json.save(local_config, config)
elif os.path.exists(local_config):
# load local config file if it exists
props_json.load(local_config, config)
name = config.getString('name')
config.setLen('mount_ypr', 3, 0.0)
cam_yaw = config.getFloatEnum('mount_ypr', 0)
cam_pitch = config.getFloatEnum('mount_ypr', 1)
cam_roll = config.getFloatEnum('mount_ypr', 2)
K_list = []
for i in range(9):
K_list.append( config.getFloatEnum('K', i) )
K = np.copy(np.array(K_list)).reshape(3,3)
dist = []
for i in range(5):
dist.append( config.getFloatEnum("dist_coeffs", i) )
print('Camera:', name)
print('K:\n', K)
print('dist:', dist)
# adjust K for output scale.
K = K * args.scale
K[2,2] = 1.0
if 'recalibrate' in args:
recal_file = args.recalibrate
else:
recal_file = None
data, flight_format = flight_loader.load(args.flight, recal_file)
print("imu records:", len(data['imu']))
print("gps records:", len(data['gps']))
if 'air' in data:
print("airdata records:", len(data['air']))
print("filter records:", len(data['filter']))
if 'pilot' in data:
print("pilot records:", len(data['pilot']))
if 'act' in data:
print("act records:", len(data['act']))
if len(data['imu']) == 0 and len(data['gps']) == 0:
print("not enough data loaded to continue.")
quit()
interp = flight_interp.FlightInterpolate()
interp.build(data)
time_shift, flight_min, flight_max = \
correlate.sync_clocks(data, interp, movie_log, hz=args.resample_hz,
cam_mount=args.cam_mount,
force_time_shift=args.time_shift, plot=args.plot)
# quick estimate ground elevation
sum = 0.0
count = 0
for f in data['filter']:
if interp.air_speed(f.time) < 5.0:
sum += f.alt
count += 1
if count > 0:
ground_m = sum / float(count)
else:
ground_m = data['filter'][0].alt
print("ground est:", ground_m)
# overlay hud(s)
hud1 = hud_glass.HUD(K)
hud2 = hud.HUD(K)
# these are fixed tranforms between ned and camera reference systems
proj2ned = np.array( [[0, 0, 1], [1, 0, 0], [0, 1, 0]],
dtype=float )
ned2proj = np.linalg.inv(proj2ned)
#cam_ypr = [-3.0, -12.0, -3.0] # yaw, pitch, roll
#ref = [44.7260320000, -93.0771072000, 0]
ref = [ data['gps'][0].lat, data['gps'][0].lon, 0.0 ]
hud1.set_ned_ref(data['gps'][0].lat, data['gps'][0].lon)
hud2.set_ned_ref(data['gps'][0].lat, data['gps'][0].lon)
print('ned ref:', ref)
print('temporarily disabling airport loading')
hud1.load_airports()
hud1.set_ground_m(ground_m)
hud2.set_ground_m(ground_m)
if args.features:
feats = features.load(args.features, ref)
hud1.update_features(feats)
else:
feats = []
print("Opening ", args.movie)
try:
capture = cv2.VideoCapture(args.movie)
except:
print("error opening video")
capture.read()
counter += 1
print("ok reading first frame")
fps = capture.get(cv2.CAP_PROP_FPS)
print("fps = %.2f" % fps)
fourcc = int(capture.get(cv2.CAP_PROP_FOURCC))
print("input fourcc: ", fourcc)
w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH) * args.scale )
h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT) * args.scale )
hud1.set_render_size(w, h)
hud2.set_render_size(w, h)
#outfourcc = cv2.cv.CV_FOURCC('M', 'J', 'P', 'G')
#outfourcc = cv2.cv.CV_FOURCC('H', '2', '6', '4')
#outfourcc = cv2.cv.CV_FOURCC('X', '2', '6', '4')
#outfourcc = cv2.cv.CV_FOURCC('X', 'V', 'I', 'D')
#outfourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
#outfourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
#outfourcc = cv2.VideoWriter_fourcc(*'XVID')
#outfourcc = cv2.VideoWriter_fourcc(*'X264'); # ext = 'mkv'
#outfourcc = cv2.VideoWriter_fourcc(*'mp4v'); # ext = 'm4v'
outfourcc = cv2.VideoWriter_fourcc(*'MJPG'); # ext = 'avi'
print(outfourcc, fps, w, h)
output = cv2.VideoWriter(tmp_movie, outfourcc, fps, (w, h), isColor=True)
last_time = 0.0
# set primative sizes based on rendered resolution.
size = math.sqrt(h*h + w*w)
hud1.set_line_width( int(round(size/1000.0)) )
hud1.set_font_size( size / 1400.0 )
hud1.set_color( hud.green2 )
hud1.set_units( args.airspeed_units, args.altitude_units)
hud2.set_line_width( int(round(size/1000.0)) )
hud2.set_font_size( size / 1400.0 )
hud2.set_color( hud.red2 )
hud2.set_units( args.airspeed_units, args.altitude_units)
filt_alt = None
if time_shift > 0:
# catch up the flight path history (in case the movie starts
# mid-flight.) Note: flight_min is the starting time of the filter data
# set.
print('seeding flight track ...')
for time in np.arange(flight_min, time_shift, 1.0 / float(fps)):
lat_deg = float(interp.filter_lat(time))*r2d
lon_deg = float(interp.filter_lon(time))*r2d
#altitude_m = float(interp.air_true_alt(time))
altitude_m = float(interp.filter_alt(time))
ned = navpy.lla2ned( lat_deg, lon_deg, altitude_m,
ref[0], ref[1], ref[2] )
hud1.update_time(time, interp.gps_unixtime(time))
hud1.update_ned(ned, args.flight_track_seconds)
shift_mod_hack = False
while True:
ret, frame = capture.read()
if not ret:
# no frame
print("no more frames:")
break
if frame is None:
print("Skipping bad frame ...")
continue
if args.rot180:
frame = np.rot90(frame)
frame = np.rot90(frame)
time = float(counter) / fps + time_shift
print("frame: ", counter, "%.3f" % time, 'time shift:', time_shift)
counter += 1
if args.start_time and time < args.start_time:
continue
vn = interp.filter_vn(time)
ve = interp.filter_ve(time)
vd = interp.filter_vd(time)
#yaw_rad = interp.filter_yaw(time)*d2r
psix = interp.filter_psix(time)
psiy = interp.filter_psiy(time)
yaw_rad = math.atan2(psiy, psix)
pitch_rad = interp.filter_the(time)
roll_rad = interp.filter_phi(time)
lat_deg = interp.filter_lat(time)*r2d
lon_deg = interp.filter_lon(time)*r2d
#altitude_m = float(interp.air_true_alt(time))
altitude_m = interp.filter_alt(time)
if filt_alt == None:
filt_alt = altitude_m
else:
filt_alt = 0.95 * filt_alt + 0.05 * altitude_m
if interp.air_speed:
airspeed_kt = interp.air_speed(time)
else:
airspeed_kt = 0.0
if interp.air_wind_dir:
wind_deg = interp.air_wind_dir(time)
wind_kt = interp.air_wind_speed(time)
if interp.air_alpha and interp.air_beta:
alpha_rad = float(interp.air_alpha(time))*d2r
beta_rad = float(interp.air_beta(time))*d2r
#print alpha_rad, beta_rad
else:
alpha_rad = None
beta_rad = None
#print 'no alpha/beta'
if interp.ap_hdgx:
ap_hdgx = float(interp.ap_hdgx(time))
ap_hdgy = float(interp.ap_hdgy(time))
ap_hdg = math.atan2(ap_hdgy, ap_hdgx)*r2d
ap_roll = float(interp.ap_roll(time))
ap_pitch = float(interp.ap_pitch(time))
ap_speed = float(interp.ap_speed(time))
ap_alt_ft = float(interp.ap_alt(time))
if interp.pilot_ail:
pilot_ail = float(interp.pilot_ail(time)) * args.aileron_scale
pilot_ele = float(interp.pilot_ele(time)) * args.elevator_scale
pilot_thr = float(interp.pilot_thr(time))
pilot_rud = float(interp.pilot_rud(time)) * args.rudder_scale
auto_switch = float(interp.pilot_auto(time))
else:
auto_switch = 0
if interp.act_ail:
act_ail = float(interp.act_ail(time)) * args.aileron_scale
act_ele = float(interp.act_ele(time)) * args.elevator_scale
act_thr = float(interp.act_thr(time))
act_rud = float(interp.act_rud(time)) * args.rudder_scale
if args.auto_switch == 'none':
flight_mode = 'manual'
elif (args.auto_switch == 'new' and auto_switch < 0) or (args.auto_switch == 'old' and auto_switch > 0):
flight_mode = 'manual'
elif args.auto_switch == 'on':
flight_mode = 'auto'
else:
flight_mode = 'auto'
if interp.excite_mode:
excite_mode = float(interp.excite_mode(time))
test_index = float(interp.test_index(time))
body2cam = transformations.quaternion_from_euler( cam_yaw * d2r,
cam_pitch * d2r,
cam_roll * d2r,
'rzyx')
# this function modifies the parameters you pass in so, avoid
# getting our data changed out from under us, by forcing copies (a
# = b, wasn't sufficient, but a = float(b) forced a copy.
tmp_yaw = float(yaw_rad)
tmp_pitch = float(pitch_rad)
tmp_roll = float(roll_rad)
ned2body = transformations.quaternion_from_euler(tmp_yaw,
tmp_pitch,
tmp_roll,
'rzyx')
body2ned = transformations.quaternion_inverse(ned2body)
#print 'ned2body(q):', ned2body
ned2cam_q = transformations.quaternion_multiply(ned2body, body2cam)
ned2cam = np.matrix(transformations.quaternion_matrix(np.array(ned2cam_q))[:3,:3]).T
#print 'ned2cam:', ned2cam
R = ned2proj.dot( ned2cam )
rvec, jac = cv2.Rodrigues(R)
ned = navpy.lla2ned( lat_deg, lon_deg, filt_alt,
ref[0], ref[1], ref[2] )
#print 'ned:', ned
tvec = -np.matrix(R) * np.matrix(ned).T
R, jac = cv2.Rodrigues(rvec)
# is this R the same as the earlier R?
PROJ = np.concatenate((R, tvec), axis=1)
#print 'PROJ:', PROJ
#print lat_deg, lon_deg, altitude, ref[0], ref[1], ref[2]
#print ned
method = cv2.INTER_AREA
#method = cv2.INTER_LANCZOS4
frame_scale = cv2.resize(frame, (0,0), fx=args.scale, fy=args.scale,
interpolation=method)
frame_undist = cv2.undistort(frame_scale, K, np.array(dist))
# Create hud draw space
if not experimental_overlay:
hud1_frame = frame_undist.copy()
else:
hud1_frame = np.zeros((frame_undist.shape), np.uint8)
hud1.update_time(time, interp.gps_unixtime(time))
if 'event' in data:
hud1.update_events(data['event'])
if interp.excite_mode:
hud1.update_test_index(excite_mode, test_index)
hud1.update_proj(PROJ)
hud1.update_cam_att(cam_yaw, cam_pitch, cam_roll)
hud1.update_ned(ned, args.flight_track_seconds)
hud1.update_lla([lat_deg, lon_deg, altitude_m])
hud1.update_vel(vn, ve, vd)
hud1.update_att_rad(roll_rad, pitch_rad, yaw_rad)
if interp.air_wind_dir:
hud1.update_airdata(airspeed_kt, altitude_m, wind_deg, wind_kt, alpha_rad, beta_rad)
else:
hud1.update_airdata(airspeed_kt, altitude_m)
if interp.ap_hdgx:
hud1.update_ap(flight_mode, ap_roll, ap_pitch, ap_hdg,
ap_speed, ap_alt_ft)
else:
hud1.update_ap(flight_mode, 0.0, 0.0, 0.0, 0.0, 0.0)
if interp.pilot_ail:
hud1.update_pilot(pilot_ail, pilot_ele, pilot_thr, pilot_rud)
if interp.act_ail:
hud1.update_act(act_ail, act_ele, act_thr, act_rud)
if time >= flight_min and time <= flight_max:
# only draw hud for time range when we have actual flight data
hud1.update_frame(hud1_frame)
hud1.draw()
if not experimental_overlay:
# weighted add of the HUD frame with the original frame to
# emulate alpha blending
alpha = args.alpha
if alpha < 0: alpha = 0
if alpha > 1: alpha = 1
cv2.addWeighted(hud1_frame, alpha, frame_undist, 1 - alpha, 0, hud1_frame)
else:
# Now create a mask of hud and create its inverse mask also
tmp = cv2.cvtColor(hud1_frame, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(tmp, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the hud from the original image
tmp_bg = cv2.bitwise_and(frame_undist, frame_undist, mask=mask_inv)
# Put hud onto the main image
hud1_frame = cv2.add(tmp_bg, hud1_frame)
# cv2.imshow('hud', hud1_frame)
cv2.imshow('hud', cv2.resize(hud1_frame, None, fx=args.scale_preview, fy=args.scale_preview))
output.write(hud1_frame)
key = cv2.waitKeyEx(5)
if key == -1:
# no key press
continue
print('key:', key)
if key == 27:
break
elif key == ord('y'):
if shift_mod_hack:
cam_yaw -= 0.5
else:
cam_yaw += 0.5
config.setFloatEnum('mount_ypr', 0, cam_yaw)
props_json.save(local_config, config)
shift_mod_hack = False
elif key == ord('p'):
if shift_mod_hack:
cam_pitch -= 0.5
else:
cam_pitch += 0.5
config.setFloatEnum('mount_ypr', 1, cam_pitch)
props_json.save(local_config, config)
shift_mod_hack = False
elif key == ord('r'):
if shift_mod_hack:
cam_roll += 0.5
else:
cam_roll -= 0.5
config.setFloatEnum('mount_ypr', 2, cam_roll)
props_json.save(local_config, config)
shift_mod_hack = False
elif key == ord('-'):
time_shift -= 1.0/60.0
shift_mod_hack = False
elif key == ord('+'):
time_shift += 1.0/60.0
shift_mod_hack = False
elif key == 65505 or key == 65506:
shift_mod_hack = True
output.release()
cv2.destroyAllWindows()
# now run ffmpeg as an external command to combine original audio
# track with new overlay video
# ex: ffmpeg -i opencv.avi -i orig.mov -c copy -map 0:v -map 1:a final.avi
from subprocess import call
result = call(["ffmpeg", "-i", tmp_movie, "-i", args.movie, "-c", "copy", "-map", "0:v", "-map", "1:a", output_movie])
print("ffmpeg result code:", result)
if result == 0:
print("removing temp movie:", tmp_movie)
os.remove(tmp_movie)
print("output movie:", output_movie)
| mit | 1,616,879,827,897,714,000 | 34.085193 | 129 | 0.623576 | false |
ricardoy/coccimorph | coccimorph/content.py | 1 | 23248 | import math
import numpy as np
import os
import pandas as pd
from coccimorph.aux import load_image
import cv2
RED = (0, 0, 255)
fowl_species = [
'E. acervulina',
'E. maxima',
'E. brunetti',
'E. mitis',
'E. praecox',
'E. tenella',
'E. necatrix'
]
rabbit_species = [
'E. coecicola',
'E. exigua',
'E. flavescens',
'E. intestinalis',
'E. irresidua',
'E. magna',
'E. media',
'E. perforans',
'E. piriformis',
'E. stiedai',
'E. vejdovskyi'
]
basedir = os.path.dirname(__file__) + '/../prototypes'
def dilate(ima):
'''
Morphological dilation of binary matrix ima using
as default the structuring element(SE)
[0 1 0
1 1 1
0 1 0]
:param ima: a binary array
:return:
'''
dx, dy = ima.shape
se = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
ima_temp = np.zeros((dx, 500), dtype=np.int)
for m in range(dx):
for n in range(dy):
ima_temp[m, n] = ima[m, n]
for m in range(1, dx - 1):
for n in range(1, dy - 1):
if ima_temp[m, n] == 1:
for i in range(3):
for j in range(3):
mw = m - 1
nw = n - 1
if ima[mw + i, nw + j] == 0:
ima[mw + i, nw + j] = ima[mw + i, nw + j] or se[i][j]
return ima
def erode(ima: np.ndarray):
dx, dy = ima.shape
ima_temp = np.zeros((dx, 500), dtype=np.int)
for m in range(dx):
for n in range(dy):
ima_temp[m, n] = ima[m, n]
for m in range(1, dx - 1):
for n in range(1, dy - 1):
if ima_temp[m, n] == 1:
aux = 1
aux *= ima_temp[m, n]
aux *= ima_temp[m - 1, n]
aux *= ima_temp[m + 1, n]
aux *= ima_temp[m, n - 1]
aux *= ima_temp[m, n + 1]
ima[m, n] = aux
for i in range(dx):
ima[i, 0] = 0
ima[i, dy - 1] = 0
for i in range(dy):
ima[0, i] = 0
ima[dx - 1, i] = 0
return ima
class FeatureExtractor:
def __init__(self, filename, scale):
self.img = load_image(filename, scale)
self.height, self.width, _ = self.img.shape
self.img_gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
self.ima = np.zeros((self.height, self.width), dtype=np.int)
self.vx = []
self.vy = []
self.wEnt = None
self.obj_entropy = 0.0
self.obj_size = 0.0
self.mcc = None
def set_co_matrix(self, d: int):
aux_mcc = np.zeros((256, 256), dtype=np.int)
ro = 0
for x in range(self.height):
for y in range(self.width-d):
if self.ima[x, y] > 0 and self.ima[x, y + d] > 0:
# if aux_mcc[self.ima[x, y], self.ima[x, y + d]] > 0:
# print(self.ima[x, y], self.ima[x, y + d], aux_mcc[self.ima[x, y], self.ima[x, y + d]])
aux_mcc[self.ima[x, y], self.ima[x, y + d]] += 1
ro += 1
for x in range(self.height):
y = self.width-1
while y > d - 1:
if self.ima[x, y] > 0 and self.ima[x, y - d] > 0:
# if aux_mcc[self.ima[x, y], self.ima[x, y - d]] > 0:
# print(self.ima[x, y], self.ima[x, y - d], aux_mcc[self.ima[x, y], self.ima[x, y - d]])
aux_mcc[self.ima[x, y], self.ima[x, y - d]] += 1
ro += 1
y -= 1
# print('ro', ro)
# self.ima.tofile('/tmp/ima_novo')
self.mcc = aux_mcc / float(ro)
# with open('/tmp/mcc_novo', 'w') as fh:
# for i in range(255):
#
# for j in range(255):
# fh.write('%.14f ' % (self.mcc[i][j]))
#
# fh.write('%.14f' % (self.mcc[i][255]))
# fh.write('\n')
#
# print('soma total mcc', np.sum(aux_mcc), np.std(self.mcc) ** 2)
def mcc_asm(self):
return np.sum(np.power(self.mcc, 2))
def mcc_con(self):
sm = 0.0
for i in range(256):
for j in range(256):
sm += self.mcc[i, j]*(i-j)*(i-j)
return sm
def mcc_idf(self):
sm = 0.0
for i in range(256):
for j in range(256):
sm += self.mcc[i, j] / float(1 + (i-j)*(i-j))
return sm
def mcc_ent(self):
sm = 0.0
for i in range(256):
for j in range(256):
if self.mcc[i, j] > 0:
sm += self.mcc[i, j]*np.log(self.mcc[i, j])
return sm * sm / 2.
# sm = np.sum(self.mcc * np.log(self.mcc))
# return sm * sm / 2.0
def eigens(self):
c = np.zeros(4, dtype=np.float)
mean_x = np.average(self.vx)
mean_y = np.average(self.vy)
sum0 = 0.
sum1 = 0.
sum2 = 0.
sum3 = 0.
for i in range(len(self.vx)):
sum0 += (self.vx[i] - mean_x) * (self.vx[i] - mean_x)
sum1 += (self.vx[i] - mean_x) * (self.vy[i] - mean_y)
sum2 += (self.vy[i] - mean_y) * (self.vx[i] - mean_x)
sum3 += (self.vy[i] - mean_y) * (self.vy[i] - mean_y)
n = len(self.vx)
c[0] = sum0/n
c[1] = sum1/n
c[2] = sum2/n
c[3] = sum3/n
k = np.reshape(c, (-1, 2))
# print('k', k)
# compute eigen vectors and eigen values
eigenvalues, eigenvectors = np.linalg.eigh(k)
# print('autovalores', eigenvalues)
#
# print('eigenvectors\n', eigenvectors)
evec_inv = np.linalg.inv(eigenvectors)
# transform to new space using inverse matrix of eigen vectors
vx1 = np.zeros(n, dtype=np.float)
vy1 = np.zeros(n, dtype=np.float)
# print('inversa: ', evec_inv)
for i in range(n):
vx_w = evec_inv[0, 0] * self.vx[i] + evec_inv[0, 1] * self.vy[i]
vy_w = evec_inv[1, 0] * self.vx[i] + evec_inv[1, 1] * self.vy[i]
vx1[i] = vx_w
vy1[i] = vy_w
# vx1 = -1 * vx1
# vy1 = -1 * vy1
# with open('/tmp/novo', 'w') as fh:
# fh.write('valor de vx1\n')
# for blah in vx1:
# fh.write(str(blah))
# fh.write('\n')
# exit()
meanvx1 = np.average(vx1)
meanvy1 = np.average(vy1)
vx1 = vx1 - meanvx1
vy1 = vy1 - meanvy1
vx2 = np.copy(vx1)
vy2 = np.copy(vy1)
# searching for diameters
# highX = np.max(vx1)
# lessX = np.min(vx1)
# highY = np.max(vy1)
# lessY = np.min(vy1)
highX = float('-Inf')
lessX = float('Inf')
highY = float('-Inf')
lessY = float('Inf')
for i in range(len(self.vx)):
if int(vx1[i]) == 0 and vy1[i] > highY:
highY = vy1[i]
if int(vx1[i]) == 0 and vy1[i] < lessY:
lessY = vy1[i]
if int(vy1[i]) == 0 and vx1[i] > highX:
highX = vx1[i]
if int(vy1[i]) == 0 and vx1[i] < lessX:
lessX = vx1[i]
# print('meanvx1', meanvx1, 'meanvy1', meanvy1)
# print('highX', highX, 'lessX', lessX)
# print('highY', highY, 'lessY', lessY)
# print('high diameter', (highY - lessY + 1))
self.high_diameter = highY - lessY + 1
self.less_diameter = highX - lessX + 1
# reflects accoding to principal components
if np.abs(int(eigenvalues[0])) > np.abs(int(eigenvalues[1])):
for i in range(n):
vy1[i] = -1. * vy1[i]
vx2[i] = -1. * vx2[i]
else:
for i in range(n):
vx1[i] = -1. * vx1[i]
vy2[i] = -1. * vy2[i]
# translate to original localization
vx1 = vx1 + meanvx1
vy1 = vy1 + meanvy1
vx2 = vx2 + meanvx1
vy2 = vy2 + meanvy1
# return to original base
for i in range(n):
vx_w = eigenvectors[0,0]*vx1[i] + eigenvectors[0,1]*vy1[i]
vy_w = eigenvectors[1,0]*vx1[i] + eigenvectors[1,1]*vy1[i]
vx1[i] = vx_w
vy1[i] = vy_w
vx_w = eigenvectors[0,0]*vx2[i] + eigenvectors[0,1]*vy2[i]
vy_w = eigenvectors[1,0]*vx2[i] + eigenvectors[1,1]*vy2[i]
vx2[i] = vx_w
vy2[i] = vy_w
# compute the symmetry
highX1 = float('-Inf')
highY1 = float('-Inf')
highX2 = float('-Inf')
highY2 = float('-Inf')
for i in range(len(self.vx)):
if int(round(vx1[i])) > highX1:
highX1 = int(round(vx1[i]))
if int(round(vy1[i])) > highY1:
highY1 = int(round(vy1[i]))
if int(round(vx2[i])) > highX2:
highX2 = int(round(vx2[i]))
if int(round(vy2[i])) > highY2:
highY2 = int(round(vy2[i]))
"""
TODO: original program was +3... this and the 500 columns look like
hard constraints over the image size
"""
highX1 += 3
highY1 += 3
highX2 += 3
highY2 += 3
# create temporal matrices to compute erosion, dilation and rate symmetry
ima3a = np.zeros((highX1, highY1))
ima3b = np.zeros((highX2, highY2))
try:
assert (np.max(self.vx) < highX1)
except AssertionError:
print('Constraint for max(vx) < highX1 does not hold!')
print(np.max(self.vx), highX1)
try:
assert (np.max(self.vx) < highX2)
except AssertionError as e:
print('Constraint for max(vx) < highX2 does not hold!')
print(np.max(self.vx), highX2)
#TODO write a better bound for the images dimensions
ima2a = np.zeros((highX1*2, 500), dtype=np.int)
ima2b = np.zeros((highX2*2, 500), dtype=np.int)
ima4a = np.zeros((highX1*2, 500), dtype=np.int)
ima4b = np.zeros((highX2*2, 500), dtype=np.int)
for i in range(n):
ima2a[int(self.vx[i]), int(self.vy[i])] = 1
ima2b[int(self.vx[i]), int(self.vy[i])] = 1
ima3a[int(np.round(vx1[i])), int(np.round(vy1[i]))] = 1
ima3b[int(np.round(vx2[i])), int(np.round(vy2[i]))] = 1
ima3a = dilate(ima3a)
ima3a = erode(ima3a)
for i in range(highX1):
for j in range(highY1):
ima4a[i, j] = ima2a[i, j] + ima3a[i, j]
ima3b = dilate(ima3b)
ima3b = erode(ima3b)
for i in range(highX2):
for j in range(highY2):
ima4b[i, j] = ima2b[i, j] + ima3b[i, j]
# compute symmetry index for high principal component
sa_one = 0
sa_two = 0
for i in range(highX1):
for j in range(highY1):
if ima4a[i, j] == 1:
sa_one += 1
if ima4a[i, j] == 2:
sa_two += 1
self.sym_high_pc = float(sa_one) / sa_two
# compute symmetry index for less principal component
sa_one = 0
sa_two = 0
for i in range(highX2):
for j in range(highY2):
if ima4b[i, j] == 1:
sa_one += 1
if ima4b[i, j] == 2:
sa_two += 1
self.sym_less_pc = float(sa_one) / sa_two
def _round(self, x):
f = np.vectorize(int)
return f(np.round(x))
def content_read(self, f1, f2, n):
sm = 0.0
x_max = float('-Inf')
x_min = float('Inf')
y_max = float('-Inf')
y_min = float('Inf')
for i in range(n):
if f1[i] > x_max:
x_max = f1[i]
if f1[i] < x_min:
x_min = f1[i]
if f2[i] > y_max:
y_max = f2[i]
if f2[i] < y_min:
y_min = f2[i]
self.ima[int(f1[i]), int(f2[i])] = 1
self.img[int(f1[i]), int(f2[i])] = RED
cx = int(np.average(f1))
cy = int(np.average(f2))
# print(len(f1))
# print(len(f2))
# print('cx:', cx)
# print('cy:', cy)
# print('average:', np.average(self.img_gray))
self.ima[cx][cy] = int(self.img_gray[cx, cy])
# print('centro', int(self.img_gray[cx, cy]))
sm += self.ima[cx][cy] * np.log(self.ima[cx][cy])
self.vx.append(cx)
self.vy.append(cy)
self.wEnt = np.zeros(256, dtype=np.float)
sw2 = 0
# print('x: ', x_min, x_max, "y:", y_min, y_max)
#
# print('size vx:', len(self.vx))
k = 0
while k < len(self.vx):
lx = self.vx[k]
ly = self.vy[k]
if lx > int(x_min)-1 and lx < int(x_max)+1 and ly>int(y_min)-1 and ly < int(y_max)+1:
self.contour_and_entropy(lx + 1, ly)
self.contour_and_entropy(lx - 1, ly)
self.contour_and_entropy(lx, ly + 1)
self.contour_and_entropy(lx, ly - 1)
else:
sw2 = 1
k += 1
if sw2 == 0:
sm = 0.0
for i in range(256):
self.wEnt[i] = self.wEnt[i] / float(len(self.vx))
if self.wEnt[i] > 0:
sm = sm + self.wEnt[i] * np.log(self.wEnt[i])
self.obj_entropy = sm*sm/2.0
self.obj_size = len(self.vx)
else:
self.obj_entropy = 0.0
self.obj_size = 0.0
# print('entropy:', self.obj_entropy)
# print('size:', self.obj_size)
#
# print('height', self.height, 'width', self.width)
#
# print('pixel', self.img[65, 135]) # [240 254 243]
# print('gray: ', self.img_gray[65, 135]) # 249 here, 250 c++
# print('pixel', self.img[65, 136])
# print('gray: ', self.img_gray[65, 136])
#
#
# for i in range(self.height):
# print('aaa')
# for j in range(self.width):
# print(i, j, self.ima[i][j], end=', ')
#
# print(self.ima.shape)
def contour_and_entropy(self, i, j):
if self.ima[i, j] == 0:
self.vx.append(i)
self.vy.append(j)
self.ima[i, j] = self.img_gray[i, j]
self.wEnt[self.ima[i, j]] = self.wEnt[self.ima[i, j]] + 1
def generate_similarity_classifier_fowl():
kl = []
for i in range(1, 8):
filename = 'kl9596_%d.txt' % (i)
kl.append(read_csv(basedir, filename))
ml_w = read_csv(basedir, 'ml9596.txt')
acerto_medio = [25.637479,
26.916101,
25.665415,
27.480373,
25.245048,
25.213264,
25.585858]
pw = np.repeat(0.14285, 7)
return ClassificaGauss(kl, ml_w, acerto_medio, pw, fowl_species)
def generate_similarity_classifier_rabbit():
kl = []
for i in range(1, 12):
filename = 'klrabbit_%d.txt' % (i)
kl.append(read_csv(basedir, filename))
ml_w = read_csv(basedir, 'mlrabbit.txt')
acerto_medio = [19.302075,
27.880435,
22.425938,
21.380911,
23.390403,
22.006214,
17.269468,
20.519256,
22.786217,
19.94028,
21.71183]
pw = np.repeat(0.090909091, 11)
return ClassificaGauss(kl, ml_w, acerto_medio, pw, rabbit_species)
class ClassificaGauss(object):
def __init__(self, kl, ml_w, acerto_medio, pw, species):
self.kl = kl
self.ml_w = ml_w
self.acerto_medio = acerto_medio
self.pw = pw
self.species = species
def classify(self, x):
class_density_value = []
for i, kl_w in enumerate(self.kl):
class_density_value.append(self._find_class_density(x, kl_w, i + 1))
# for x in class_density_value:
# print('density:', x)
taxa_acerto = np.zeros(7, dtype=np.float)
for i in range(7):
if class_density_value[i] > 0.0:
taxa_acerto[i] = class_density_value[i] * 100. / self.acerto_medio[i]
classification = dict()
for i in reversed(np.argsort(taxa_acerto)):
if taxa_acerto[i] > 0.0:
classification[self.species[i]] = taxa_acerto[i]
return classification
def _find_class_density(self, x, kl_w, w_especie):
gx = .0
if not math.isclose(np.linalg.det(kl_w), 0): # det(kl_w) != 0.0
mx = np.zeros((1, 13), dtype=np.float)
mxt = np.zeros((13, 1), dtype=np.float)
for i in range(13):
mx[0, i] = x[i] - self.ml_w[w_especie-1, i]
# print('x_i:', x[i])
# print('subtraendo:', self.ml_w[w_especie-1, i])
# print('mx:', mx[0, i])
mxt[i, 0] = x[i] - self.ml_w[w_especie-1, i]
mx_inv = np.dot(mx, np.linalg.inv(kl_w))
mx_inv_mx = np.dot(mx_inv, mxt)
# print('mx shape', mx.shape)
# print('inv shape', np.linalg.inv(kl_w).shape)
# print('mx_inv', mx_inv.shape)
#
# print('x', x)
# print('mx', mx)
aa = mx_inv_mx[0, 0]
# print('aa:', aa)
bb = np.linalg.det(kl_w)
# print('det:', bb)
cc = np.log(bb)
# cc = round(cc, 4)
# print('log:', cc)
# print ('aa:', aa, ' bb:', bb, ' cc:', cc)
gx = (-0.5) * aa - (0.5 * cc)
if not math.isclose(self.pw[w_especie-1], 0.0):
gx = gx + np.log(self.pw[w_especie-1])
# print('gx: ', gx)
return gx
def generate_probability_classifier_rabbit():
fq = []
for i in range(1, 14):
filename = 'freqRabbit_%d.txt' % (i)
fq.append(np.array(read_csv(basedir, filename), dtype=np.float64))
per_w = read_csv(basedir, 'PerRabbit.txt')
vpriori = np.repeat(0.090909091, 11)
return ClassificaProb(fq, per_w, vpriori, rabbit_species)
def generate_probability_classifier_fowl():
fq = []
for i in range(1, 14):
filename = 'freqFowl_%d.txt' % (i)
fq.append(np.array(read_csv(basedir, filename), dtype=np.float64))
per_w = read_csv(basedir, 'PerFowl.txt')
vpriori = np.repeat(0.14285, 7)
return ClassificaProb(fq, per_w, vpriori, fowl_species)
class ClassificaProb:
def __init__(self, fq, per_w, vpriori, species):
self.fq = fq
self.per_w = per_w
self.vpriori = vpriori
self.species = species
self.nclass = len(species)
def classify(self, x):
self._find_posteriori(x, self.fq[0], self.fq[0], 0)
for i in range(1, 13):
self._find_posteriori(x, self.fq[i - 1], self.fq[i], i)
"""
The last frequency matrix stores the final classification results;
detection is done locating the percetil where the last feature is.
Then, the column of the percentil elected is the posterior probability
classification.
"""
wflag = False
taxa_acerto = np.zeros(self.nclass, dtype=np.float)
for wcont in range(self.nclass):
wper = self.per_w[12, wcont]
if not wflag and x[12] <= wper:
for i in range(self.nclass):
taxa_acerto[i] = self.fq[-1][i, wcont] * 100
wflag = True
if not wflag:
"""
If the element is greater than higher value, it is considered
in last percentil
"""
for i in range(self.nclass):
taxa_acerto[i] = self.fq[-1][i, -1] * 100
classification = dict()
for i in reversed(np.argsort(taxa_acerto)):
if taxa_acerto[i] > 1e-4:
classification[self.species[i]] = taxa_acerto[i]
return classification
def _find_posteriori(self, x, fq0, fq2, w_feature):
"""
Computes the posterior probability of the frequency matrix; this approach
is based on the Dirichlet density (frequency and percentiles matrices).
:param x: features vector
:param fq0: previous frequency matrix
:param fq2: current frequency matrix
:param w_feature:
"""
wsum = 0.0
aa = 0.0
wper = 0.0
# TODO: acho que é possível simplificar os for's
for i in range(self.nclass):
wsum = 0.0
for j in range(self.nclass):
aa = fq2[i, j]
aa = aa * (2.0 / self.nclass)
fq2[i, j] = aa
wsum += aa
for j in range(self.nclass):
aa = fq2[i, j]
if wsum > 0.0:
aa = aa / wsum
fq2[i, j] = aa
if w_feature == 0:
for i in range(self.nclass):
wsum = 0.0
for j in range(self.nclass):
aa = fq2[j, i]
aa = aa * self.vpriori[j]
fq2[j, i] = aa
wsum += aa
for j in range(self.nclass):
aa = fq2[j, i]
if wsum > 0.0:
aa = aa / wsum
fq2[j, i] = aa
else:
wflag = False
for wcont in range(self.nclass):
"""
if the number of features is greater than 0,
the correct percentil was found in the previous matrix
and the column-percentil will be the priori probability
"""
wper = self.per_w[w_feature-1, wcont]
if not wflag and x[w_feature-1] <= wper:
for i in range(self.nclass):
self.vpriori[i] = fq0[i, wcont]
wflag = True
if not wflag:
"""
if the element is greater than the highest value, it is
connsidered in last percentil
"""
for i in range(self.nclass):
self.vpriori[i] = fq0[i, self.nclass-1]
for i in range(self.nclass):
wsum = 0.0
for j in range(self.nclass):
"""
frequency matrix is multiplied by the new priori
probability vector, computed from the previous matrix
"""
aa = fq2[j, i]
aa = aa * self.vpriori[j]
fq2[j, i] = aa
wsum += aa
for j in range(self.nclass):
aa = fq2[j, i]
if wsum > 0.0:
aa = aa / wsum
fq2[j, i] = aa
def read_csv(basedir, filename):
return np.array(pd.read_csv('%s/%s'%(basedir, filename), sep='\s+', header=None).as_matrix(), dtype=np.float64)
| gpl-3.0 | 6,436,709,685,596,016,000 | 30.843836 | 115 | 0.462789 | false |
xju2/hzzws | bsubs/submit_limit.py | 1 | 2128 | #!/usr/bin/env python
import os
import sys
import commands
workdir = os.getcwd()
submit = True
do_hist = False
do_scalar = True
exe = "/afs/cern.ch/user/x/xju/work/h4l/h4lcode/hzzws/bsubs/run_limit.sh"
cal_opt = "pvalue" # limit,pvalue
data_opt = "obs" #obs, exp
ws_name = "combWS"
mu_name = "xs"
#mG_low = 500
#mG_hi = 3500
mG_low = 200
mG_hi = 2000
mG_step = 10
#kappa_list = [0.00, 0.01, 0.06, 0.1]
kappa_list = [0.01]
n_mG = int((mG_hi - mG_low)/mG_step)
out_name = workdir
if do_hist:
input_ws = "/afs/cern.ch/user/x/xju/work/diphoton/limits_hist_floating/inputs/2015_Graviton_histfactory_EKHI_v6.root"
data_name = "combDatabinned"
out_name += "/histofactory/"
else:
#input_ws = "/afs/cern.ch/user/x/xju/work/diphoton/limits_hist_floating/inputs/2015_Graviton_2D_EKHI_200.root"
#input_ws = "/afs/cern.ch/user/x/xju/work/diphoton/limits_hist_floating/inputs/2015_Graviton_2D_EKHI_200_Mar23.root"
#out_name += "/functional_Mar23/"
input_ws = "/afs/cern.ch/user/x/xju/work/HWWStatisticsCode/workspaces/2015_Scalar_2D_v4.root"
data_name = "combData"
out_name += "/scalar_2d/"
goodjobs = []
badjobs = []
print out_name
for kappa in kappa_list:
for mG in range(mG_low, mG_hi+mG_step, mG_step):
if not do_scalar:
fix_vars = "mG:"+str(mG)+",GkM:"+str(kappa)
else:
width = mG*kappa
fix_vars = "mX:"+str(mG)+",wX:"+str(width)
run_cmd = exe+" "+input_ws+" "+ws_name+" "+mu_name+" "+\
data_name+" "+fix_vars+" "+cal_opt+" "+data_opt+" "+out_name
if not submit: print run_cmd
#-G u_zp -q 8nh for atlas sources
#-G ATLASWISC_GEN -q wisc for wisconsin sources
bsubs_cmd = "bsub -q wisc -R 'pool>4000' -C 0 -o" + \
workdir+ "/output "+ run_cmd
if submit:
status,output=commands.getstatusoutput(bsubs_cmd)
else:
continue
if status != 0:
print output
badjobs.append(0)
else:
goodjobs.append(1)
print "Good jobs: "+ str(len(goodjobs))+", "+str(len(badjobs))+" failed!"
| mit | -4,650,468,770,263,121,000 | 28.555556 | 121 | 0.598214 | false |
g2p/systems | lib/systems/plugins/apache2/a2mod.py | 1 | 1252 | # vim: set fileencoding=utf-8 sw=2 ts=2 et :
from __future__ import absolute_import
from systems.dsl import resource, transition
from systems.registry import get_registry
from systems.typesystem import AttrType, ResourceType, EResource
from systems.util.templates import build_and_render
class A2Mod(EResource):
"""
An apache2 module.
"""
def expand_into(self, rg):
name = self.id_attrs['name']
enabled = self.wanted_attrs['enabled']
apache2 = rg.add_to_top(resource('AptitudePackage',
name='apache2.2-common',
))
cmd = '/usr/sbin/a2%smod' % { True: 'en', False: 'dis', }[enabled]
endis = rg.add_transition(transition('Command',
cmdline=[cmd, name, ]
),
depends=[apache2])
# We don't need to restart everytime, which takes some time.
reload = rg.add_transition(transition('Command',
cmdline=['/usr/sbin/invoke-rc.d', 'apache2', 'restart', ],
),
depends=[endis],
)
def register():
restype = ResourceType('A2Mod', A2Mod,
id_type={
'name': AttrType(
pytype=str),
},
state_type={
'enabled': AttrType(
default_value=True,
pytype=bool),
})
get_registry().resource_types.register(restype)
| gpl-2.0 | -8,509,602,508,099,704,000 | 25.638298 | 70 | 0.626198 | false |
amaurywalbert/twitter | n6/n6_alters_timeline_collect_v5.0_50egos.py | 2 | 14123 | # -*- coding: latin1 -*-
################################################################################################
#
#
import tweepy, datetime, sys, time, json, os, os.path, shutil, time, struct, random
import multi_oauth_n6
#Script que contém as chaves para autenticação do twitter
reload(sys)
sys.setdefaultencoding('utf-8')
######################################################################################################################################################################
## Status - Versão 5.0 - Coletar timeline dos alters (seguidroes dos amigos dos egos) - Timeline dos Alters - identificar conjunto de retweeets da timeline.
##
##
## 5.1 - Uso do Tweepy para controlar as autenticações...
##
##
## SALVAR APENAS O NECESSÁRIO PARA ECONOMIZAR ESPAÇO EM DISCO. Coletar tweets completos ocupa muito espaço.
##
## OBS> Twitter bloqueou diversas contas por suspeita de spam... redobrar as atenções com os scripts criados.
##
## STATUS - Coletando - OK - Salvar arquivos BINÀRIOS!! contendo o id do retweet e id do autor a partir da lista de alters dos egos.
##
## STATUS - Refazer a coleta até que não tenha nenhuma mensagem de "Rate Limit Exceeded" - A cada mensagem há um usuário que ficou sem ser coletada
##
##
######################################################################################################################################################################
######################################################################################################################################################################
#
# Realiza autenticação da aplicação.
#
######################################################################################################################################################################
def autentication(auths):
global key
key += 1
if (key >= key_limit):
key = key_init
print
print("######################################################################")
print ("Autenticando usando chave número: "+str(key)+"/"+str(key_limit))
print("######################################################################\n")
time.sleep(wait)
api_key = tweepy.API(auths[key])
return (api_key)
######################################################################################################################################################################
#
# Converte formato data para armazenar em formato JSON
#
######################################################################################################################################################################
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
encoded_object = list(obj.timetuple())[0:6]
else:
encoded_object =json.JSONEncoder.default(self, obj)
return encoded_object
################################################################################################
# Imprime os arquivos binários com os ids dos amigos
################################################################################################
def read_arq_bin(file):
with open(file, 'r') as f:
f.seek(0,2)
tamanho = f.tell()
f.seek(0)
retweets_list = []
while f.tell() < tamanho:
buffer = f.read(timeline_struct.size)
retweet, user = timeline_struct.unpack(buffer)
status = {'retweet':retweet, 'user':user}
retweets_list.append(status)
return retweets_list
################################################################################################
# Imprime os arquivos binários com os ids dos amigos
################################################################################################
def read_arq_followers_bin(file):
with open(file, 'r') as f:
f.seek(0,2)
tamanho = f.tell()
f.seek(0)
followers_file = []
while f.tell() < tamanho:
buffer = f.read(followers_struct.size)
follower = followers_struct.unpack(buffer)
followers_file.append(follower[0])
return followers_file
######################################################################################################################################################################
#
# Tweepy - Realiza a busca e devolve a timeline de um usuário específico
#
######################################################################################################################################################################
def get_timeline(user): #Coleta da timeline
global key
global dictionary
global api
global i
timeline = []
try:
for page in tweepy.Cursor(api.user_timeline,id=user, count=200).pages(16): #Retorna os últimos 3200 tweets (16*20)
for tweet in page:
timeline.append(tweet)
return (timeline)
except tweepy.error.RateLimitError as e:
print("Limite de acesso à API excedido. User: "+str(user)+" - Autenticando novamente... "+str(e))
api = autentication(auths)
except tweepy.error.TweepError as e:
agora = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M') # Recupera o instante atual na forma AnoMesDiaHoraMinuto
error = {}
with open(error_dir+"timeline_collect.err", "a+") as outfile: # Abre o arquivo para gravação no final do arquivo
if e.message:
error = {'user':user,'reason': e.message,'date':agora, 'key':key}
outfile.write(json.dumps(error, cls=DateTimeEncoder, separators=(',', ':'))+"\n")
print error
else:
error = {'user':user,'reason': str(e),'date':agora, 'key':key}
outfile.write(json.dumps(error, cls=DateTimeEncoder, separators=(',', ':'))+"\n")
print error
try:
if e.message[0]['code'] == 32 or e.message[0]['code'] == 215:
key = random.randint(key_init,key_limit)
api = autentication(auths)
if e.message[0]['code'] == 34: # Usuários não existentes
dictionary[user] = user # Insere o usuário coletado na tabela em memória
with open(data_dir+str(user)+".json", "w") as f: # Cria arquivo vazio
print ("Usuário inexistente. User: "+str(user)+" - Arquivo criado com sucesso!")
i +=1
except Exception as e2:
print ("E2: "+str(e2))
try:
if e.message == 'Not authorized.': # Usuários não autorizados
dictionary[user] = user # Insere o usuário coletado na tabela em memória
with open(data_dir+str(user)+".json", "w") as f: # Cria arquivo vazio
print ("Usuário não autorizada. User: "+str(user)+" - Arquivo criado com sucesso!")
i +=1
except Exception as e3:
print ("E3: "+str(e3))
######################################################################################################################################################################
#
# Obtem timeline dos usuários
#
######################################################################################################################################################################
def save_timeline(j,k,l,user):
global i # numero de usuários com arquivos já coletados / Numero de arquivos no diretório
# Dicionário - Tabela Hash contendo os usuários já coletados
global dictionary
#Chama a função e recebe como retorno a lista de tweets do usuário
t = 0 # Número de Tweets por usuário
timeline = get_timeline(user)
if timeline:
try:
with open(data_dir+str(user)+".dat", "w+b") as f:
for status in timeline:
if hasattr(status, 'retweeted_status'):
t+=1
f.write(timeline_struct.pack(status.retweeted_status.id, status.retweeted_status.user.id)) # Grava os ids dos retweet e o id do autor no arquivo binário do usuário
###
# retweets_list = read_arq_bin(data_dir+str(user)+".dat") # Função para converter o binário de volta em string em formato json.
# print retweets_list
####
dictionary[user] = user # Insere o usuário coletado na tabela em memória
i +=1
print ("Egos_Friend nº: "+str(j)+" - Alter("+str(k)+"/"+str(l)+"): "+str(user)+" coletado com sucesso. "+str(t)+" retweets. Total de usuários coletados: "+str(i))
except Exception as e:
agora = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M') # Recupera o instante atual na forma AnoMesDiaHoraMinuto
with open(error_dir+"timeline_collect.err", "a+") as outfile: # Abre o arquivo para gravação no final do arquivo
if e.message:
error = {'user':user,'reason': e.message,'date':agora}
else:
error = {'user':user,'reason': str(e),'date':agora}
outfile.write(json.dumps(error, cls=DateTimeEncoder, separators=(',', ':'))+"\n")
print error
if os.path.exists(data_dir+str(user)+".dat"):
os.remove(data_dir+str(user)+".dat")
######################################################################################################################################################################
######################################################################################################################################################################
#
# Método principal do programa.
# Realiza teste e coleta dos favoritos do user especificado no arquivo.
#
######################################################################################################################################################################
######################################################################################################################################################################
def main():
global i # numero de usuários com arquivos já coletados / Numero de arquivos no diretório
j = 0 # Exibe o número ordinal do friend que está sendo usado para a coleta da timeline
k = 0 # Exibe o número ordinal do alter(follower) que está sendo usado para a coleta da timeline
for file in os.listdir(followers_collected_dir): # Verifica no diretorio.
j+=1
followers_list = read_arq_followers_bin(followers_collected_dir+file) # Lista de alters (friends) de um determinado ego
l = len(followers_list) # Exibe o tamanho/quantidade de seguidores do amigo do ego
for follower in followers_list:
k+=1
if not dictionary.has_key(follower):
save_timeline(j,k,l,follower) #Inicia função de busca
print
print("######################################################################")
print("Coleta finalizada!")
print("######################################################################\n")
######################################################################################################################################################################
#
# INÍCIO DO PROGRAMA
#
######################################################################################################################################################################
################################### DEFINIR SE É TESTE OU NÃO!!! ### ['auths_ok'] OU ['auths_test'] ################
oauth_keys = multi_oauth_n6.keys()
auths = oauth_keys['auths_ok']
################################### CONFIGURAR AS LINHAS A SEGUIR ####################################################
######################################################################################################################
key_init = 0 #################################################### Essas duas linhas atribuem as chaves para cada script
key_limit = len(auths) #################################################### Usa todas as chaves (tamanho da lista de chaves)
key = random.randint(key_init,key_limit) ###################################### Inicia o script a partir de uma chave aleatória do conjunto de chaves
followers_collected_dir = "/home/amaury/coleta/n5/alters_followers/50/bin/"#### Diretório contendo o conjunto de amigos dos ego já coletados. Cada arquivo contém o conjunto de seguidores dos amigos.
data_dir = "/home/amaury/coleta/n6/timeline_collect/alters/bin/" ############## Diretório para armazenamento dos arquivos
error_dir = "/home/amaury/coleta/n6/timeline_collect/alters/error/" ########### Diretório para armazenamento dos arquivos de erro
formato = 'll' ####################################################### Long para o código ('l') e depois o array de chars de X posições:
timeline_struct = struct.Struct(formato) ###################################### Inicializa o objeto do tipo struct para poder armazenar o formato específico no arquivo binário
formato_followers = 'l' ############################################## Long para o código ('l') e depois o array de chars de X posições:
followers_struct = struct.Struct(formato_followers) ########################### Inicializa o objeto do tipo struct para poder armazenar o formato específico no arquivo binário
wait = 60
dictionary = {} #################################################### Tabela {chave:valor} para facilitar a consulta dos usuários já coletados
######################################################################################################################
######################################################################################################################
######################################################################################################################
#Cria os diretórios para armazenamento dos arquivos
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(error_dir):
os.makedirs(error_dir)
###### Iniciando dicionário - tabela hash a partir dos arquivos já criados.
print
print("######################################################################")
print ("Criando tabela hash...")
i = 0 #Conta quantos usuários já foram coletados (todos arquivos no diretório)
for file in os.listdir(data_dir):
user_id = file.split(".dat")
user_id = long(user_id[0])
dictionary[user_id] = user_id
i+=1
print ("Tabela hash criada com sucesso...")
print("######################################################################\n")
#Autenticação
api = autentication(auths)
#Executa o método main
if __name__ == "__main__": main() | gpl-3.0 | 3,922,565,552,435,736,600 | 50.547794 | 198 | 0.461341 | false |
Kagee/youtube-dl | youtube_dl/extractor/teamcoco.py | 1 | 3246 | from __future__ import unicode_literals
import base64
import re
from .common import InfoExtractor
from ..utils import qualities
class TeamcocoIE(InfoExtractor):
_VALID_URL = r'http://teamcoco\.com/video/(?P<video_id>[0-9]+)?/?(?P<display_id>.*)'
_TESTS = [
{
'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant',
'md5': '3f7746aa0dc86de18df7539903d399ea',
'info_dict': {
'id': '80187',
'ext': 'mp4',
'title': 'Conan Becomes A Mary Kay Beauty Consultant',
'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.',
'age_limit': 0,
}
}, {
'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
'info_dict': {
'id': '19705',
'ext': 'mp4',
'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.',
'title': 'Louis C.K. Interview Pt. 1 11/3/11',
'age_limit': 0,
}
}
]
_VIDEO_ID_REGEXES = (
r'"eVar42"\s*:\s*(\d+)',
r'Ginger\.TeamCoco\.openInApp\("video",\s*"([^"]+)"',
r'"id_not"\s*:\s*(\d+)'
)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
video_id = mobj.group('video_id')
if not video_id:
video_id = self._html_search_regex(
self._VIDEO_ID_REGEXES, webpage, 'video id')
embed_url = 'http://teamcoco.com/embed/v/%s' % video_id
embed = self._download_webpage(
embed_url, video_id, 'Downloading embed page')
encoded_data = self._search_regex(
r'"preload"\s*:\s*"([^"]+)"', embed, 'encoded data')
data = self._parse_json(
base64.b64decode(encoded_data.encode('ascii')).decode('utf-8'), video_id)
formats = []
get_quality = qualities(['500k', '480p', '1000k', '720p', '1080p'])
for filed in data['files']:
m_format = re.search(r'(\d+(k|p))\.mp4', filed['url'])
if m_format is not None:
format_id = m_format.group(1)
else:
format_id = filed['bitrate']
tbr = (
int(filed['bitrate'])
if filed['bitrate'].isdigit()
else None)
formats.append({
'url': filed['url'],
'ext': 'mp4',
'tbr': tbr,
'format_id': format_id,
'quality': get_quality(format_id),
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': data['title'],
'thumbnail': data.get('thumb', {}).get('href'),
'description': data.get('teaser'),
'age_limit': self._family_friendly_search(webpage),
}
| unlicense | -4,121,415,404,715,677,700 | 34.282609 | 156 | 0.496303 | false |
ProjectBabbler/ebird-api | tests/mixins/location_tests.py | 1 | 1403 | from tests.mixins.base import BaseMixin
class LocationTestsMixin(BaseMixin):
def test_location_is_comma_separated_string(self):
query = self.api_call(locations="L001,L002")[1]
self.assertTrue(query["r"], "L001,L002")
def test_location_string_whitespace_is_removed(self):
query = self.api_call(locations=" L001 , L002 ")[1]
self.assertTrue(query["r"], "L001,L002")
def test_location_is_list(self):
query = self.api_call(locations=["L001", "L002"])[1]
self.assertTrue(query["r"], "L001,US_ID")
def test_location_list_whitespace_is_removed(self):
query = self.api_call(locations=[" L001 ", " L002 "])[1]
self.assertTrue(query["r"], "L001,L002")
def test_invalid_location_raises_error(self):
self.api_raises(ValueError, locations="L")
def test_blank_location_raises_error(self):
self.api_raises(ValueError, locations="")
def test_blank_location_in_string_raises_error(self):
self.api_raises(ValueError, locations="L001,")
def test_blank_location_in_list_raises_error(self):
self.api_raises(ValueError, locations=["L001", ""])
def test_more_than_10_in_string_raises_error(self):
self.api_raises(ValueError, locations=",".join(["L001"] * 11))
def test_more_than_10_in_list_raises_error(self):
self.api_raises(ValueError, locations=["L001"] * 11)
| mit | 957,352,645,143,008,800 | 36.918919 | 70 | 0.653599 | false |
OWASP/django-DefectDojo | tests/Finding_unit_test.py | 1 | 16399 | from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
import unittest
import re
import sys
import os
# importing Product_unit_test as a module
# set relative path
dir_path = os.path.dirname(os.path.realpath(__file__))
try: # First Try for python 3
import importlib.util
product_unit_test_module = importlib.util.spec_from_file_location("Product_unit_test",
os.path.join(dir_path, 'Product_unit_test.py')) # using ',' allows python to determine the type of separator to use.
product_unit_test = importlib.util.module_from_spec(product_unit_test_module)
product_unit_test_module.loader.exec_module(product_unit_test)
except: # This will work for python2 if above fails
import imp
product_unit_test = imp.load_source('Product_unit_test',
os.path.join(dir_path, 'Product_unit_test.py'))
class FindingTest(unittest.TestCase):
def setUp(self):
# Initialize the driver
# When used with Travis, chromdriver is stored in the same
# directory as the unit tests
self.driver = webdriver.Chrome('chromedriver')
# Allow a little time for the driver to initialize
self.driver.implicitly_wait(30)
# Set the base address of the dojo
self.base_url = "http://localhost:8000/"
self.verificationErrors = []
self.accept_next_alert = True
def login_page(self):
# Make a member reference to the driver
driver = self.driver
# Navigate to the login page
driver.get(self.base_url + "login")
# Good practice to clear the entry before typing
driver.find_element_by_id("id_username").clear()
# These credentials will be used by Travis when testing new PRs
# They will not work when testing on your own build
# Be sure to change them before submitting a PR
driver.find_element_by_id("id_username").send_keys(os.environ['DD_ADMIN_USER'])
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(os.environ['DD_ADMIN_PASSWORD'])
# "Click" the but the login button
driver.find_element_by_css_selector("button.btn.btn-success").click()
return driver
def test_edit_finding(self):
# The Name of the Finding created by test_add_product_finding => 'App Vulnerable to XSS'
# Test To Add Finding To product
# login to site, password set to fetch from environ
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "finding")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on the 'dropdownMenu1 button'
driver.find_element_by_id("dropdownMenu1").click()
# Click on `Edit Finding`
driver.find_element_by_link_text("Edit Finding").click()
# Change: 'Severity' and 'Mitigation'
# finding Severity
Select(driver.find_element_by_id("id_severity")).select_by_visible_text("Critical")
# finding Description
driver.find_element_by_id("id_severity").send_keys(Keys.TAB, "This is a crucial update to finding description.")
# "Click" the Done button to Edit the finding
driver.find_element_by_xpath("//input[@name='_Finished']").click()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'Finding saved successfully', productTxt))
def test_add_image(self):
# The Name of the Finding created by test_add_product_finding => 'App Vulnerable to XSS'
# Test To Add Finding To product
# login to site, password set to fetch from environ
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "finding")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on the 'dropdownMenu1 button'
driver.find_element_by_id("dropdownMenu1").click()
# Click on `Edit Finding`
driver.find_element_by_link_text("Manage Images").click()
# select first file input field: form-0-image
# Set full image path for image file 'strange.png
image_path = os.path.join(dir_path, 'finding_image.png')
driver.find_element_by_id("id_form-0-image").send_keys(image_path)
# Save uploaded image
driver.find_element_by_css_selector("button.btn.btn-success").click()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'Images updated successfully', productTxt))
def test_mark_finding_for_review(self):
# login to site, password set to fetch from environ
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "finding")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on the 'dropdownMenu1 button'
driver.find_element_by_id("dropdownMenu1").click()
# Click on `Request Peer Reveiw`
driver.find_element_by_link_text("Request Peer Review").click()
# select Reviewer
# Let's make the first user in the list a reviewer
# set select element style from 'none' to 'inline'
driver.execute_script("document.getElementsByName('reviewers')[0].style.display = 'inline'")
# select the first option tag
element = driver.find_element_by_xpath("//select[@name='reviewers']")
reviewer_option = element.find_elements_by_tag_name('option')[0]
Select(element).select_by_value(reviewer_option.get_attribute("value"))
# Add Review notes
driver.find_element_by_id("id_entry").clear()
driver.find_element_by_id("id_entry").send_keys("This is to be reveiwed critically. Make sure it is well handled.")
# Click 'Mark for reveiw'
driver.find_element_by_name("submit").click()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'Finding marked for review and reviewers notified.', productTxt))
def test_clear_review_from_finding(self):
# login to site, password set to fetch from environ
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "finding")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on `Clear Review` link text
driver.find_element_by_link_text("Clear Review").click()
# Mark Active and Verified checkboxes
driver.find_element_by_id('id_active').click()
driver.find_element_by_id('id_verified').click()
# Add Review notes
driver.find_element_by_id("id_entry").clear()
driver.find_element_by_id("id_entry").send_keys("This has been reviewed and confirmed. A fix needed here.")
# Click 'Clear reveiw' button
driver.find_element_by_name("submit").click()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'Finding review has been updated successfully.', productTxt))
def test_delete_image(self):
# login to site, password set to fetch from environ
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "finding")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on the 'dropdownMenu1 button'
driver.find_element_by_id("dropdownMenu1").click()
# Click on `Edit Finding`
driver.find_element_by_link_text("Manage Images").click()
# mark delete checkbox for first file input field: form-0-DELETE
driver.find_element_by_id("id_form-0-DELETE").click()
# Save selection(s) for image deletion
driver.find_element_by_css_selector("button.btn.btn-success").click()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'Images updated successfully', productTxt))
def test_close_finding(self):
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "finding")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on the 'dropdownMenu1 button'
driver.find_element_by_id("dropdownMenu1").click()
# Click on `Close Finding`
driver.find_element_by_link_text("Close Finding").click()
# fill notes stating why finding should be closed
driver.find_element_by_id("id_entry").send_keys("All issues in this Finding have been resolved successfully")
# click 'close Finding' submission button
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'Finding closed.', productTxt))
def test_make_finding_a_template(self):
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "finding")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on the 'dropdownMenu1 button'
driver.find_element_by_id("dropdownMenu1").click()
# Click on `Make Finding a Template`
driver.find_element_by_link_text("Make Finding a Template").click()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'Finding template added successfully. You may edit it here.', productTxt) or
re.search(r'A finding template with that title already exists.', productTxt))
def test_apply_template_to_a_finding(self):
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "finding")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on the 'dropdownMenu1 button'
driver.find_element_by_id("dropdownMenu1").click()
# Click on `Apply Template to Finding`
driver.find_element_by_link_text("Apply Template to Finding").click()
# click on the template of 'App Vulnerable to XSS'
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on 'Replace all' button
driver.find_element_by_xpath("//button[@data-option='Replace']").click()
# Click the 'finished' button to submit
driver.find_element_by_name('_Finished').click()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'App Vulnerable to XSS', productTxt))
def test_delete_finding_template(self):
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "template")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on `Delete Template` button
driver.find_element_by_name("delete_template").click()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'Finding Template deleted successfully.', productTxt))
def test_import_scan_result(self):
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "finding")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on the 'Finding' dropdown menubar
driver.find_element_by_partial_link_text("Findings").click()
# Click on `Import Scan Results` link text
driver.find_element_by_link_text("Import Scan Results").click()
# Select `ZAP Scan' as Scan Type
Select(driver.find_element_by_id("id_scan_type")).select_by_visible_text('ZAP Scan')
# upload scan file
file_path = os.path.join(dir_path, 'zap_sample.xml')
driver.find_element_by_name("file").send_keys(file_path)
# Click Submit button
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'ZAP Scan processed, a total of 4 findings were processed', productTxt))
def test_delete_finding(self):
# The Name of the Finding created by test_add_product_finding => 'App Vulnerable to XSS'
# Test To Add Finding To product
# login to site, password set to fetch from environ
driver = self.login_page()
# Navigate to All Finding page
driver.get(self.base_url + "finding")
# Select and click on the particular finding to edit
driver.find_element_by_link_text("App Vulnerable to XSS").click()
# Click on the 'dropdownMenu1 button'
driver.find_element_by_id("dropdownMenu1").click()
# Click on `Delete Finding`
driver.find_element_by_link_text("Delete Finding").click()
# Click 'Yes' on Alert popup
driver.switch_to.alert.accept()
# Query the site to determine if the finding has been added
productTxt = driver.find_element_by_tag_name("BODY").text
# Assert ot the query to dtermine status of failure
self.assertTrue(re.search(r'Finding deleted successfully', productTxt))
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
def suite():
suite = unittest.TestSuite()
# Add each test the the suite to be run
# success and failure is output by the test
suite.addTest(product_unit_test.ProductTest('test_create_product'))
suite.addTest(product_unit_test.ProductTest('test_add_product_finding'))
suite.addTest(FindingTest('test_edit_finding'))
suite.addTest(FindingTest('test_add_image'))
suite.addTest(FindingTest('test_mark_finding_for_review'))
suite.addTest(FindingTest('test_clear_review_from_finding'))
suite.addTest(FindingTest('test_close_finding'))
suite.addTest(FindingTest('test_make_finding_a_template'))
suite.addTest(FindingTest('test_apply_template_to_a_finding'))
suite.addTest(FindingTest('test_import_scan_result'))
suite.addTest(FindingTest('test_delete_image'))
suite.addTest(FindingTest('test_delete_finding'))
suite.addTest(FindingTest('test_delete_finding_template'))
suite.addTest(product_unit_test.ProductTest('test_delete_product'))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(descriptions=True, failfast=True)
ret = not runner.run(suite()).wasSuccessful()
sys.exit(ret)
| bsd-3-clause | 4,445,232,039,997,329,000 | 51.226115 | 125 | 0.667053 | false |
qpython-android/QPython3-core | pybuild/packages/pandas.py | 1 | 1975 | from ..source import GitSource
from ..package import Package
from ..patch import LocalPatch
from ..util import target_arch
import os
class Pandas(Package):
source = GitSource('https://github.com/AIPYX/pandas.git', alias='pandas', branch='qpyc/0.23.4')
patches = [
#LocalPatch('0001-cross-compile'),
#LocalPatch('0001-add-ftello64'),
]
#use_gcc = True
def prepare(self):
#self.run(['cp', self.filesdir / 'site.cfg', './'])
pass
def build(self):
#self.system("find . -iname '*.pyx' -exec cython {} \;")
PY_BRANCH = os.getenv('PY_BRANCH')
PY_M_BRANCH = os.getenv('PY_M_BRANCH')
self.run([
'python',
'setup.py',
'cython',
])
self.run([
'python',
'setup.py',
'build_ext',
f'-I../../build/target/python/usr/include/python{PY_BRANCH}.{PY_M_BRANCH}'\
f':../../build/target/openblas/usr/include'\
f':{self.env["ANDROID_NDK"]}/sources/cxx-stl/gnu-libstdc++/4.9/include'\
f':{self.env["ANDROID_NDK"]}/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a/include',
f'-L../../build/target/python/usr/lib'\
f':../../build/target/openblas/usr/lib:{self.env["ANDROID_NDK"]}/toolchains/renderscript/prebuilt/linux-x86_64/platform/arm'\
f':{self.env["ANDROID_NDK"]}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/lib/gcc/arm-linux-androideabi/4.9.x/armv7-a'\
f':{self.env["ANDROID_NDK"]}/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a'\
f':{self.env["ANDROID_NDK"]}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/arm-linux-androideabi/lib/armv7-a',
f'-lpython{PY_BRANCH}.{PY_M_BRANCH},m,gnustl_static,atomic'
])
self.run([
'python',
'setup.py',
'build_py',
])
def refresh(self):
return True
| apache-2.0 | 1,866,403,884,767,679,500 | 35.574074 | 145 | 0.56557 | false |
cylc/cylc | cylc/flow/cycling/iso8601.py | 1 | 34605 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Date-time cycling by point, interval, and sequence classes."""
from functools import lru_cache
import re
from metomi.isodatetime.data import Calendar, Duration, CALENDAR
from metomi.isodatetime.dumpers import TimePointDumper
from metomi.isodatetime.timezone import (
get_local_time_zone, get_local_time_zone_format, TimeZoneFormatMode)
from metomi.isodatetime.exceptions import IsodatetimeError
from cylc.flow.time_parser import CylcTimeParser
from cylc.flow.cycling import (
PointBase, IntervalBase, SequenceBase, ExclusionBase, cmp_to_rich, cmp
)
from cylc.flow.exceptions import (
CylcConfigError,
IntervalParsingError,
PointParsingError,
SequenceDegenerateError
)
from cylc.flow.wallclock import get_current_time_string
from cylc.flow.parsec.validate import IllegalValueError
CYCLER_TYPE_ISO8601 = "iso8601"
CYCLER_TYPE_SORT_KEY_ISO8601 = "b"
DATE_TIME_FORMAT = "CCYYMMDDThhmm"
EXPANDED_DATE_TIME_FORMAT = "+XCCYYMMDDThhmm"
NEW_DATE_TIME_REC = re.compile("T")
WARNING_PARSE_EXPANDED_YEAR_DIGITS = (
"(incompatible with [cylc]cycle point num expanded year digits = %s ?)")
class SuiteSpecifics:
"""Store suite-setup-specific constants and utilities here."""
ASSUMED_TIME_ZONE = None
DUMP_FORMAT = None
NUM_EXPANDED_YEAR_DIGITS = None
abbrev_util = None
interval_parser = None
point_parser = None
recurrence_parser = None
iso8601_parsers = None
class ISO8601Point(PointBase):
"""A single point in an ISO8601 date time sequence."""
TYPE = CYCLER_TYPE_ISO8601
TYPE_SORT_KEY = CYCLER_TYPE_SORT_KEY_ISO8601
__slots__ = ('value')
@classmethod
def from_nonstandard_string(cls, point_string):
"""Standardise a date-time string."""
return ISO8601Point(str(point_parse(point_string))).standardise()
def add(self, other):
"""Add an Interval to self."""
return ISO8601Point(self._iso_point_add(self.value, other.value))
def __cmp__(self, other):
# Compare other (point) to self.
if other is None:
return -1
if self.TYPE != other.TYPE:
return cmp(self.TYPE_SORT_KEY, other.TYPE_SORT_KEY)
if self.value == other.value:
return 0
return self._iso_point_cmp(self.value, other.value)
def standardise(self):
"""Reformat self.value into a standard representation."""
try:
self.value = str(point_parse(self.value))
except IsodatetimeError as exc:
if self.value.startswith("+") or self.value.startswith("-"):
message = WARNING_PARSE_EXPANDED_YEAR_DIGITS % (
SuiteSpecifics.NUM_EXPANDED_YEAR_DIGITS)
else:
message = str(exc)
raise PointParsingError(type(self), self.value, message)
return self
def sub(self, other):
"""Subtract a Point or Interval from self."""
if isinstance(other, ISO8601Point):
return ISO8601Interval(
self._iso_point_sub_point(self.value, other.value))
return ISO8601Point(
self._iso_point_sub_interval(self.value, other.value))
def __hash__(self):
return hash(self.value)
@staticmethod
@lru_cache(10000)
def _iso_point_add(point_string, interval_string):
"""Add the parsed point_string to the parsed interval_string."""
point = point_parse(point_string)
interval = interval_parse(interval_string)
return str(point + interval)
@staticmethod
@lru_cache(10000)
def _iso_point_cmp(point_string, other_point_string):
"""Compare the parsed point_string to the other one."""
point = point_parse(point_string)
other_point = point_parse(other_point_string)
return cmp(point, other_point)
@staticmethod
@lru_cache(10000)
def _iso_point_sub_interval(point_string, interval_string):
"""Return the parsed point_string minus the parsed interval_string."""
point = point_parse(point_string)
interval = interval_parse(interval_string)
return str(point - interval)
@staticmethod
@lru_cache(10000)
def _iso_point_sub_point(point_string, other_point_string):
"""Return the difference between the two parsed point strings."""
point = point_parse(point_string)
other_point = point_parse(other_point_string)
return str(point - other_point)
# TODO: replace __cmp__ infrastructure
cmp_to_rich(ISO8601Point)
class ISO8601Interval(IntervalBase):
"""The interval between points in an ISO8601 date time sequence."""
NULL_INTERVAL_STRING = "P0Y"
TYPE = CYCLER_TYPE_ISO8601
TYPE_SORT_KEY = CYCLER_TYPE_SORT_KEY_ISO8601
__slots__ = ('value')
@classmethod
def get_null(cls):
"""Return a null interval."""
return ISO8601Interval("P0Y")
@classmethod
def get_null_offset(cls):
"""Return a null offset."""
return ISO8601Interval("+P0Y")
def get_inferred_child(self, string):
"""Return an instance with 'string' amounts of my non-zero units."""
interval = interval_parse(self.value)
amount_per_unit = int(string)
unit_amounts = {}
for attribute in ["years", "months", "weeks", "days",
"hours", "minutes", "seconds"]:
if getattr(interval, attribute):
unit_amounts[attribute] = amount_per_unit
interval = Duration(**unit_amounts)
return ISO8601Interval(str(interval))
def standardise(self):
"""Format self.value into a standard representation."""
try:
self.value = str(interval_parse(self.value))
except IsodatetimeError:
raise IntervalParsingError(type(self), self.value)
return self
def add(self, other):
"""Add other to self (point or interval) c.f. ISO 8601."""
if isinstance(other, ISO8601Interval):
return ISO8601Interval(
self._iso_interval_add(self.value, other.value))
return other + self
def cmp_(self, other):
"""Compare another interval with this one."""
return self._iso_interval_cmp(self.value, other.value)
def sub(self, other):
"""Subtract another interval from this one."""
return ISO8601Interval(
self._iso_interval_sub(self.value, other.value))
def __abs__(self):
"""Return an interval with absolute values of this one's values."""
return ISO8601Interval(
self._iso_interval_abs(self.value, self.NULL_INTERVAL_STRING))
def __mul__(self, factor):
"""Return an interval with v * factor for v in this one's values."""
return ISO8601Interval(self._iso_interval_mul(self.value, factor))
def __bool__(self):
"""Return whether this interval has any non-null values."""
return self._iso_interval_nonzero(self.value)
@staticmethod
@lru_cache(10000)
def _iso_interval_abs(interval_string, other_interval_string):
"""Return the absolute (non-negative) value of an interval_string."""
interval = interval_parse(interval_string)
other = interval_parse(other_interval_string)
if interval < other:
return str(interval * -1)
return interval_string
@staticmethod
@lru_cache(10000)
def _iso_interval_add(interval_string, other_interval_string):
"""Return one parsed interval_string plus the other one."""
interval = interval_parse(interval_string)
other = interval_parse(other_interval_string)
return str(interval + other)
@staticmethod
@lru_cache(10000)
def _iso_interval_cmp(interval_string, other_interval_string):
"""Compare one parsed interval_string with the other one."""
interval = interval_parse(interval_string)
other = interval_parse(other_interval_string)
return cmp(interval, other)
@staticmethod
@lru_cache(10000)
def _iso_interval_sub(interval_string, other_interval_string):
"""Subtract one parsed interval_string from the other one."""
interval = interval_parse(interval_string)
other = interval_parse(other_interval_string)
return str(interval - other)
@staticmethod
@lru_cache(10000)
def _iso_interval_mul(interval_string, factor):
"""Multiply one parsed interval_string's values by factor."""
interval = interval_parse(interval_string)
return str(interval * factor)
@staticmethod
@lru_cache(10000)
def _iso_interval_nonzero(interval_string):
"""Return whether the parsed interval_string is a null interval."""
interval = interval_parse(interval_string)
return bool(interval)
class ISO8601Exclusions(ExclusionBase):
"""A collection of ISO8601Sequences that represent excluded sequences.
The object is able to determine if points are within any of its
grouped exclusion sequences. The Python ``in`` and ``not in`` operators
may be used on this object to determine if a point is in the collection
of exclusion sequences."""
def __init__(self, excl_points, start_point, end_point=None):
super(ISO8601Exclusions, self).__init__(start_point, end_point)
self.build_exclusions(excl_points)
def build_exclusions(self, excl_points):
for point in excl_points:
try:
# Try making an ISO8601Sequence
exclusion = ISO8601Sequence(point, self.exclusion_start_point,
self.exclusion_end_point)
self.exclusion_sequences.append(exclusion)
except (AttributeError, TypeError, ValueError):
# Try making an ISO8601Point
exclusion_point = ISO8601Point.from_nonstandard_string(
str(point)) if point else None
if exclusion_point not in self.exclusion_points:
self.exclusion_points.append(exclusion_point)
class ISO8601Sequence(SequenceBase):
"""A sequence of ISO8601 date time points separated by an interval.
Note that an ISO8601Sequence object (may) contain
ISO8601ExclusionSequences"""
TYPE = CYCLER_TYPE_ISO8601
TYPE_SORT_KEY = CYCLER_TYPE_SORT_KEY_ISO8601
_MAX_CACHED_POINTS = 100
__slots__ = ('dep_section', 'context_start_point', 'context_end_point',
'offset', '_cached_first_point_values',
'_cached_next_point_values', '_cached_valid_point_booleans',
'_cached_recent_valid_points', 'spec', 'abbrev_util',
'recurrence', 'exclusions', 'step', 'value')
@classmethod
def get_async_expr(cls, start_point=None):
"""Express a one-off sequence at the initial cycle point."""
if start_point is None:
return "R1"
return "R1/" + str(start_point)
def __init__(self, dep_section, context_start_point=None,
context_end_point=None):
SequenceBase.__init__(
self, dep_section, context_start_point, context_end_point)
self.dep_section = dep_section
if context_start_point is None:
self.context_start_point = context_start_point
elif isinstance(context_start_point, ISO8601Point):
self.context_start_point = context_start_point
else:
self.context_start_point = ISO8601Point.from_nonstandard_string(
context_start_point)
if context_end_point is None:
self.context_end_point = None
elif isinstance(context_end_point, ISO8601Point):
self.context_end_point = context_end_point
else:
self.context_end_point = ISO8601Point.from_nonstandard_string(
context_end_point)
self.offset = ISO8601Interval.get_null()
self._cached_first_point_values = {}
self._cached_next_point_values = {}
self._cached_valid_point_booleans = {}
self._cached_recent_valid_points = []
self.spec = dep_section
self.abbrev_util = CylcTimeParser(self.context_start_point,
self.context_end_point,
SuiteSpecifics.iso8601_parsers)
# Parse_recurrence returns an isodatetime TimeRecurrence object
# and a list of exclusion strings.
self.recurrence, excl_points = self.abbrev_util.parse_recurrence(
dep_section)
# Determine the exclusion start point and end point
try:
exclusion_start_point = ISO8601Point.from_nonstandard_string(
str(self.recurrence.start_point))
except IsodatetimeError:
exclusion_start_point = self.context_start_point
try:
exclusion_end_point = ISO8601Point.from_nonstandard_string(
str(self.recurrence.end_point))
except IsodatetimeError:
exclusion_end_point = self.context_end_point
self.exclusions = []
# Creating an exclusions object instead
if excl_points:
try:
self.exclusions = ISO8601Exclusions(
excl_points,
exclusion_start_point,
exclusion_end_point)
except AttributeError:
pass
self.step = ISO8601Interval(str(self.recurrence.duration))
self.value = str(self.recurrence)
# Concatenate the strings in exclusion list
if self.exclusions:
self.value += '!' + str(self.exclusions)
def get_interval(self):
"""Return the interval between points in this sequence."""
return self.step
def get_offset(self):
"""Deprecated: return the offset used for this sequence."""
return self.offset
def set_offset(self, i_offset):
"""Deprecated: alter state to i_offset the entire sequence."""
if self.recurrence.start_point is not None:
self.recurrence.start_point += interval_parse(str(i_offset))
if self.recurrence.end_point is not None:
self.recurrence.end_point += interval_parse(str(i_offset))
self._cached_first_point_values = {}
self._cached_next_point_values = {}
self._cached_valid_point_booleans = {}
self._cached_recent_valid_points = []
self.value = str(self.recurrence) + '!' + str(self.exclusions)
if self.exclusions:
self.value += '!' + str(self.exclusions)
@lru_cache(100)
def is_on_sequence(self, point):
"""Return True if point is on-sequence."""
# Iterate starting at recent valid points, for speed.
if self.exclusions and point in self.exclusions:
return False
for valid_point in reversed(self._cached_recent_valid_points):
if valid_point == point:
return True
if valid_point > point:
continue
next_point = valid_point
while next_point is not None and next_point < point:
next_point = self.get_next_point_on_sequence(next_point)
if next_point is None:
continue
if next_point == point:
return True
return self.recurrence.get_is_valid(point_parse(point.value))
def is_valid(self, point):
"""Return True if point is on-sequence and in-bounds."""
try:
return self._cached_valid_point_booleans[point.value]
except KeyError:
is_valid = self.is_on_sequence(point)
if (len(self._cached_valid_point_booleans) >
self._MAX_CACHED_POINTS):
self._cached_valid_point_booleans.popitem()
self._cached_valid_point_booleans[point.value] = is_valid
return is_valid
def get_prev_point(self, point):
"""Return the previous point < point, or None if out of bounds."""
# may be None if out of the recurrence bounds
res = None
prev_point = self.recurrence.get_prev(point_parse(point.value))
if prev_point:
res = ISO8601Point(str(prev_point))
if res == point:
raise SequenceDegenerateError(self.recurrence,
SuiteSpecifics.DUMP_FORMAT,
res, point)
# Check if res point is in the list of exclusions
# If so, check the previous point by recursion.
# Once you have found a point that is *not* in the exclusion
# list, you can return it.
if self.exclusions and res in self.exclusions:
return self.get_prev_point(res)
return res
def get_nearest_prev_point(self, point):
"""Return the largest point < some arbitrary point."""
if self.is_on_sequence(point):
return self.get_prev_point(point)
p_iso_point = point_parse(point.value)
prev_cycle_point = None
for recurrence_iso_point in self.recurrence:
# Is recurrence point greater than arbitrary point?
if recurrence_iso_point > p_iso_point:
break
recurrence_cycle_point = ISO8601Point(str(recurrence_iso_point))
if self.exclusions and recurrence_cycle_point in self.exclusions:
break
prev_cycle_point = recurrence_cycle_point
if prev_cycle_point is None:
return None
if prev_cycle_point == point:
raise SequenceDegenerateError(
self.recurrence, SuiteSpecifics.DUMP_FORMAT,
prev_cycle_point, point
)
# Check all exclusions
if self.exclusions and prev_cycle_point in self.exclusions:
return self.get_prev_point(prev_cycle_point)
return prev_cycle_point
def get_next_point(self, point):
"""Return the next point > p, or None if out of bounds."""
try:
return ISO8601Point(self._cached_next_point_values[point.value])
except KeyError:
pass
# Iterate starting at recent valid points, for speed.
for valid_point in reversed(self._cached_recent_valid_points):
if valid_point >= point:
continue
next_point = valid_point
excluded = False
while next_point is not None and (next_point <= point or excluded):
excluded = False
next_point = self.get_next_point_on_sequence(next_point)
if next_point and next_point in self.exclusions:
excluded = True
if next_point is not None:
self._check_and_cache_next_point(point, next_point)
return next_point
# Iterate starting at the beginning.
p_iso_point = point_parse(point.value)
for recurrence_iso_point in self.recurrence:
if recurrence_iso_point > p_iso_point:
next_point = ISO8601Point(str(recurrence_iso_point))
if next_point and next_point in self.exclusions:
continue
self._check_and_cache_next_point(point, next_point)
return next_point
return None
def _check_and_cache_next_point(self, point, next_point):
"""Verify and cache the get_next_point return info."""
# Verify next_point != point.
if next_point == point:
raise SequenceDegenerateError(
self.recurrence, SuiteSpecifics.DUMP_FORMAT,
next_point, point
)
# Cache the answer for point -> next_point.
if (len(self._cached_next_point_values) >
self._MAX_CACHED_POINTS):
self._cached_next_point_values.popitem()
self._cached_next_point_values[point.value] = next_point.value
# Cache next_point as a valid starting point for this recurrence.
if (len(self._cached_next_point_values) >
self._MAX_CACHED_POINTS):
self._cached_recent_valid_points.pop(0)
self._cached_recent_valid_points.append(next_point)
def get_next_point_on_sequence(self, point):
"""Return the on-sequence point > point assuming that point is
on-sequence, or None if out of bounds."""
result = None
next_point = self.recurrence.get_next(point_parse(point.value))
if next_point:
result = ISO8601Point(str(next_point))
if result == point:
raise SequenceDegenerateError(
self.recurrence, SuiteSpecifics.DUMP_FORMAT,
point, result
)
# Check it is in the exclusions list now
if result and result in self.exclusions:
return self.get_next_point_on_sequence(result)
return result
def get_first_point(self, point):
"""Return the first point >= to point, or None if out of bounds."""
try:
return ISO8601Point(self._cached_first_point_values[point.value])
except KeyError:
pass
p_iso_point = point_parse(point.value)
for recurrence_iso_point in self.recurrence:
if recurrence_iso_point >= p_iso_point:
first_point_value = str(recurrence_iso_point)
ret = ISO8601Point(first_point_value)
# Check multiple exclusions
if ret and ret in self.exclusions:
return self.get_next_point_on_sequence(ret)
if (len(self._cached_first_point_values) >
self._MAX_CACHED_POINTS):
self._cached_first_point_values.popitem()
self._cached_first_point_values[point.value] = (
first_point_value)
return ret
return None
def get_start_point(self):
"""Return the first point in this sequence, or None."""
for recurrence_iso_point in self.recurrence:
point = ISO8601Point(str(recurrence_iso_point))
# Check for multiple exclusions
if not self.exclusions or point not in self.exclusions:
return point
return None
def get_stop_point(self):
"""Return the last point in this sequence, or None if unbounded."""
if (self.recurrence.repetitions is not None or (
(self.recurrence.start_point is not None or
self.recurrence.min_point is not None) and
(self.recurrence.end_point is not None or
self.recurrence.max_point is not None))):
curr = None
prev = None
for recurrence_iso_point in self.recurrence:
prev = curr
curr = recurrence_iso_point
ret = ISO8601Point(str(curr))
if self.exclusions and ret in self.exclusions:
return ISO8601Point(str(prev))
return ret
return None
def __eq__(self, other):
# Return True if other (sequence) is equal to self.
if self.TYPE != other.TYPE:
return False
if self.value == other.value:
return True
return False
def __lt__(self, other):
return self.value < other.value
def __str__(self):
return self.value
def __hash__(self):
return hash(self.value)
def _get_old_anchor_step_recurrence(anchor, step, start_point):
"""Return a string representing an old-format recurrence translation."""
anchor_point = ISO8601Point.from_nonstandard_string(anchor)
# We may need to adjust the anchor downwards if it is ahead of the start.
if start_point is not None:
while anchor_point >= start_point + step:
anchor_point -= step
return str(anchor_point) + "/" + str(step)
def ingest_time(value, now=None):
"""Handle relative, truncated and prev/next cycle points.
Args:
value (str):
The string containing the prev()/next() stuff.
now (metomi.isodatetime.data.TimePoint):
A time point to use as the context for resolving the value.
"""
# remove extraneous whitespace from cycle point
value = value.replace(" ", "")
parser = SuiteSpecifics.point_parser
# integer point or old-style date-time cycle point format
is_integer = bool(re.match(r"\d+$", value))
# iso8601 expanded year
is_expanded = (
(value.startswith("-") or value.startswith("+"))
and "P" not in value
)
# prev() or next()
is_prev_next = "next" in value or "previous" in value
# offset from now (±P...)
is_offset = value.startswith("P") or value.startswith("-P")
if (
is_integer
or is_expanded
):
# we don't need to do any fancy processing
return value
# parse the timepoint if needed
if is_prev_next or is_offset:
# `value` isn't necessarily valid ISO8601
timepoint = None
is_truncated = None
else:
timepoint = parser.parse(value)
# missing date-time components off the front (e.g. 01T00)
is_truncated = timepoint.truncated
if not any((is_prev_next, is_offset, is_truncated)):
return value
if now is None:
now = parser.parse(get_current_time_string())
else:
now = parser.parse(now)
# correct for year in 'now' if year only,
# or year and time, specified in input
if re.search(r"\(-\d{2}[);T]", value):
now.year += 1
# correct for month in 'now' if year and month only,
# or year, month and time, specified in input
elif re.search(r"\(-\d{4}[);T]", value):
now.month_of_year += 1
# perform whatever transformation is required
offset = None
if is_prev_next:
cycle_point, offset = prev_next(value, now, parser)
elif is_offset:
cycle_point = now
offset = value
else: # is_truncated
cycle_point = now + timepoint
if offset is not None:
# add/subtract offset duration to/from chosen timepoint
duration_parser = SuiteSpecifics.interval_parser
offset = offset.replace('+', '')
offset = duration_parser.parse(offset)
cycle_point = cycle_point + offset
return str(cycle_point)
def prev_next(value, now, parser):
"""Handle prev() and next() syntax.
Args:
value (str):
The string containing the prev()/next() stuff.
now (metomi.isodatetime.data.TimePoint):
A time point to use as the context for resolving the value.
parser (metomi.isodatetime.parsers.TimePointParser):
A time point parser.
Returns
tuple - (cycle_point, offset)
"""
# are we in gregorian mode (or some other eccentric calendar
if CALENDAR.mode != Calendar.MODE_GREGORIAN:
raise CylcConfigError(
'prev()/next() syntax must be used with integer or gregorian'
f' cycling modes ("{value}")'
)
# break down cycle point into constituent parts.
direction, tmp = value.split("(")
tmp, offset = tmp.split(")")
if offset.strip() == '':
offset = None
else:
offset = offset.strip()
timepoints = tmp.split(";")
# for use with 'previous' below.
go_back = {
"minute_of_hour": "PT1M",
"hour_of_day": "PT1H",
"day_of_week": "P1D",
"day_of_month": "P1D",
"day_of_year": "P1D",
"week_of_year": "P1W",
"month_of_year": "P1M",
"year_of_decade": "P1Y",
"decade_of_century": "P10Y",
"year_of_century": "P1Y",
"century": "P100Y"}
for i_time, my_time in enumerate(timepoints):
parsed_point = parser.parse(my_time.strip())
timepoints[i_time] = parsed_point + now
if direction == 'previous':
# for 'previous' determine next largest unit,
# from go_back dict (defined outside 'for' loop), and
# subtract 1 of it from each timepoint
duration_parser = SuiteSpecifics.interval_parser
next_unit = parsed_point.get_smallest_missing_property_name()
timepoints[i_time] = (
timepoints[i_time] -
duration_parser.parse(go_back[next_unit]))
my_diff = [abs(my_time - now) for my_time in timepoints]
cycle_point = timepoints[my_diff.index(min(my_diff))]
# ensure truncated dates do not have
# time from 'now' included'
if 'T' not in value.split(')')[0]:
cycle_point.hour_of_day = 0
cycle_point.minute_of_hour = 0
cycle_point.second_of_minute = 0
# ensure month and day from 'now' are not included
# where they did not appear in the truncated datetime
# NOTE: this may break when the order of tick over
# for time point is reversed!!!
# https://github.com/metomi/isodatetime/pull/101
# case 1 - year only
if re.search(r"\(-\d{2}[);T]", value):
cycle_point.month_of_year = 1
cycle_point.day_of_month = 1
# case 2 - month only or year and month
elif re.search(r"\(-(-\d{2}|\d{4})[;T)]", value):
cycle_point.day_of_month = 1
return cycle_point, offset
def init_from_cfg(cfg):
"""Initialise global variables (yuk) based on the configuration."""
num_expanded_year_digits = cfg['cylc'][
'cycle point num expanded year digits']
time_zone = cfg['cylc']['cycle point time zone']
custom_dump_format = cfg['cylc']['cycle point format']
assume_utc = cfg['cylc']['UTC mode']
cycling_mode = cfg['scheduling']['cycling mode']
init(
num_expanded_year_digits=num_expanded_year_digits,
custom_dump_format=custom_dump_format,
time_zone=time_zone,
assume_utc=assume_utc,
cycling_mode=cycling_mode
)
def init(num_expanded_year_digits=0, custom_dump_format=None, time_zone=None,
assume_utc=False, cycling_mode=None):
"""Initialise suite-setup-specific information."""
if cycling_mode in Calendar.default().MODES:
Calendar.default().set_mode(cycling_mode)
if time_zone is None:
if assume_utc:
time_zone = "Z"
time_zone_hours_minutes = (0, 0)
else:
time_zone = get_local_time_zone_format(TimeZoneFormatMode.reduced)
time_zone_hours_minutes = get_local_time_zone()
else:
time_zone_hours_minutes = TimePointDumper().get_time_zone(time_zone)
SuiteSpecifics.ASSUMED_TIME_ZONE = time_zone_hours_minutes
SuiteSpecifics.NUM_EXPANDED_YEAR_DIGITS = num_expanded_year_digits
if custom_dump_format is None:
if num_expanded_year_digits > 0:
SuiteSpecifics.DUMP_FORMAT = EXPANDED_DATE_TIME_FORMAT + time_zone
else:
SuiteSpecifics.DUMP_FORMAT = DATE_TIME_FORMAT + time_zone
else:
SuiteSpecifics.DUMP_FORMAT = custom_dump_format
if "+X" not in custom_dump_format and num_expanded_year_digits:
raise IllegalValueError(
'cycle point format',
('cylc', 'cycle point format'),
SuiteSpecifics.DUMP_FORMAT
)
SuiteSpecifics.iso8601_parsers = CylcTimeParser.initiate_parsers(
dump_format=SuiteSpecifics.DUMP_FORMAT,
num_expanded_year_digits=num_expanded_year_digits,
assumed_time_zone=SuiteSpecifics.ASSUMED_TIME_ZONE
)
(SuiteSpecifics.point_parser,
SuiteSpecifics.interval_parser,
SuiteSpecifics.recurrence_parser) = SuiteSpecifics.iso8601_parsers
SuiteSpecifics.abbrev_util = CylcTimeParser(
None, None, SuiteSpecifics.iso8601_parsers
)
def get_dump_format():
"""Return cycle point string dump format."""
return SuiteSpecifics.DUMP_FORMAT
def get_point_relative(offset_string, base_point):
"""Create a point from offset_string applied to base_point."""
try:
interval = ISO8601Interval(str(interval_parse(offset_string)))
except IsodatetimeError:
return ISO8601Point(str(
SuiteSpecifics.abbrev_util.parse_timepoint(
offset_string, context_point=_point_parse(base_point.value))
))
else:
return base_point + interval
def interval_parse(interval_string):
"""Parse an interval_string into a proper Duration class."""
try:
return _interval_parse(interval_string).copy()
except Exception:
try:
return -1 * _interval_parse(
interval_string.replace("-", "", 1)).copy()
except Exception:
return _interval_parse(
interval_string.replace("+", "", 1)).copy()
def is_offset_absolute(offset_string):
"""Return True if offset_string is a point rather than an interval."""
try:
interval_parse(offset_string)
except Exception:
return True
else:
return False
@lru_cache(10000)
def _interval_parse(interval_string):
"""Parse an interval_string into a proper Duration object."""
return SuiteSpecifics.interval_parser.parse(interval_string)
def point_parse(point_string):
"""Parse a point_string into a proper TimePoint object."""
return _point_parse(point_string).copy()
@lru_cache(10000)
def _point_parse(point_string):
"""Parse a point_string into a proper TimePoint object."""
if "%" in SuiteSpecifics.DUMP_FORMAT:
# May be a custom not-quite ISO 8601 dump format.
try:
return SuiteSpecifics.point_parser.strptime(
point_string, SuiteSpecifics.DUMP_FORMAT)
except IsodatetimeError:
pass
# Attempt to parse it in ISO 8601 format.
return SuiteSpecifics.point_parser.parse(point_string)
| gpl-3.0 | -7,118,441,874,249,039,000 | 35.970085 | 79 | 0.615102 | false |
kfix/SleepProxyServer | sleepproxy/sniff.py | 1 | 1143 | from select import select
from threading import Event, Thread
from scapy.config import conf
from scapy.data import ETH_P_ALL, MTU
class SnifferThread(Thread):
"""A thread which runs a scapy sniff, and can be stopped"""
def __init__(self, prn, filterexp, iface):
Thread.__init__(self) #make this a greenlet?
self._prn = prn
self._filterexp = filterexp
self._iface = iface
self._stop_recd = Event()
def run(self):
self._sniff()
def stop(self):
self._stop_recd.set()
def _sniff(self):
sock = conf.L2listen(type=ETH_P_ALL, filter=self._filterexp, iface=self._iface)
while 1:
try:
sel = select([sock], [], [], 1)
if sock in sel[0]:
p = sock.recv(MTU)
if p is None:
break
self._prn(p)
if self._stop_recd.is_set():
print "Breaking out of sniffer thread %s" % (self, )
break
except KeyboardInterrupt:
break
sock.close()
| bsd-2-clause | 825,196,674,331,822,000 | 28.307692 | 87 | 0.504812 | false |
midokura/python-midonetclient | src/midonetclient/application.py | 1 | 18841 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Midokura PTE LTD.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tomoe Sugihara <[email protected]>, Midokura
# @author: Ryu Ishimoto <[email protected]>, Midokura
# @author: Artem Dmytrenko <[email protected]>, Midokura
import os
from midonetclient import vendor_media_type
from midonetclient.ad_route import AdRoute
from midonetclient.bgp import Bgp
from midonetclient.bridge import Bridge
from midonetclient.chain import Chain
from midonetclient.host import Host
from midonetclient.port import Port
from midonetclient.port_group import PortGroup
from midonetclient.ip_addr_group import IpAddrGroup
from midonetclient.resource_base import ResourceBase
from midonetclient.route import Route
from midonetclient.router import Router
from midonetclient.rule import Rule
from midonetclient.tenant import Tenant
from midonetclient.tunnel_zone import TunnelZone
from midonetclient.write_version import WriteVersion
from midonetclient.system_state import SystemState
from midonetclient.host_version import HostVersion
from midonetclient.load_balancer import LoadBalancer
from midonetclient.vip import VIP
from midonetclient.pool import Pool
from midonetclient.pool_member import PoolMember
from midonetclient.health_monitor import HealthMonitor
from midonetclient.pool_statistic import PoolStatistic
from midonetclient.vtep import Vtep
class Application(ResourceBase):
media_type = vendor_media_type.APPLICATION_JSON_V5
ID_TOKEN = '{id}'
IP_ADDR_TOKEN = '{ipAddr}'
def __init__(self, uri, dto, auth):
super(Application, self).__init__(uri, dto, auth)
def get_ad_route_template(self):
return self.dto['adRouteTemplate']
def get_bgp_template(self):
return self.dto['bgpTemplate']
def get_bridge_template(self):
return self.dto['bridgeTemplate']
def get_chain_template(self):
return self.dto['chainTemplate']
def get_host_template(self):
return self.dto['hostTemplate']
def get_port_group_template(self):
return self.dto['portGroupTemplate']
def get_ip_addr_group_template(self):
return self.dto['ipAddrGroupTemplate']
def get_port_template(self):
return self.dto['portTemplate']
def get_route_template(self):
return self.dto['routeTemplate']
def get_router_template(self):
return self.dto['routerTemplate']
def get_rule_template(self):
return self.dto['ruleTemplate']
def get_tenant_template(self):
return self.dto['tenantTemplate']
def get_tunnel_zone_template(self):
return self.dto['tunnelZoneTemplate']
def get_vtep_template(self):
return self.dto['vtepTemplate']
def get_write_version_uri(self):
return self.dto['writeVersion']
def get_system_state_uri(self):
return self.dto['systemState']
def get_host_versions_uri(self):
return self.dto['hostVersions']
#L4LB resources
def get_load_balancers_uri(self):
return self.dto['loadBalancers']
def get_vips_uri(self):
return self.dto['vips']
def get_pools_uri(self):
return self.dto['pools']
def get_pool_members_uri(self):
return self.dto['poolMembers']
def get_ports_uri(self):
return self.dto['ports']
def get_health_monitors_uri(self):
return self.dto['healthMonitors']
def get_pool_statistics_uri(self):
return self.dto['poolStatistics']
def get_load_balancer_template(self):
return self.dto['loadBalancerTemplate']
def get_vip_template(self):
return self.dto['vipTemplate']
def get_pool_template(self):
return self.dto['poolTemplate']
def get_pool_member_template(self):
return self.dto['poolMemberTemplate']
def get_health_monitor_template(self):
return self.dto['healthMonitorTemplate']
def get_pool_statistic_template(self):
return self.dto['poolStatisticTemplate']
def get_tenants(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_TENANT_COLLECTION_JSON}
return self.get_children(self.dto['tenants'], query, headers, Tenant)
def get_routers(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_ROUTER_COLLECTION_JSON}
return self.get_children(self.dto['routers'], query, headers, Router)
def get_bridges(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_BRIDGE_COLLECTION_JSON}
return self.get_children(self.dto['bridges'], query, headers, Bridge)
def get_ports(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_PORT_COLLECTION_JSON}
return self.get_children(self.dto['ports'], query, headers, Port)
def get_port_groups(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_PORTGROUP_COLLECTION_JSON}
return self.get_children(self.dto['portGroups'], query, headers,
PortGroup)
def get_ip_addr_groups(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_IP_ADDR_GROUP_COLLECTION_JSON}
return self.get_children(self.dto['ipAddrGroups'], query, headers,
IpAddrGroup)
def get_chains(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_CHAIN_COLLECTION_JSON}
return self.get_children(self.dto['chains'], query, headers, Chain)
def get_tunnel_zones(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_TUNNEL_ZONE_COLLECTION_JSON}
return self.get_children(self.dto['tunnelZones'], query, headers,
TunnelZone)
def get_tunnel_zone(self, id_):
return self._get_resource_by_id(TunnelZone, self.dto['tunnelZones'],
self.get_tunnel_zone_template(), id_)
def get_hosts(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_HOST_COLLECTION_JSON}
return self.get_children(self.dto['hosts'], query, headers, Host)
def delete_ad_route(self, id_):
return self._delete_resource_by_id(self.get_ad_route_template(), id_)
def get_ad_route(self, id_):
return self._get_resource_by_id(AdRoute, self.dto['adRoutes'],
self.get_ad_route_template(), id_)
def delete_bgp(self, id_):
return self._delete_resource_by_id(self.get_bgp_template(), id_)
def get_bgp(self, id_):
return self._get_resource_by_id(Bgp, None, self.get_bgp_template(),
id_)
def delete_bridge(self, id_):
return self._delete_resource_by_id(self.get_bridge_template(), id_)
def get_bridge(self, id_):
return self._get_resource_by_id(Bridge, self.dto['bridges'],
self.get_bridge_template(), id_)
def delete_chain(self, id_):
return self._delete_resource_by_id(self.get_chain_template(), id_)
def get_chain(self, id_):
return self._get_resource_by_id(Chain, self.dto['chains'],
self.get_chain_template(), id_)
def get_host(self, id_):
return self._get_resource_by_id(Host, self.dto['hosts'],
self.get_host_template(), id_)
def delete_port_group(self, id_):
return self._delete_resource_by_id(self.get_port_group_template(), id_)
def get_port_group(self, id_):
return self._get_resource_by_id(PortGroup, self.dto['portGroups'],
self.get_port_group_template(), id_)
def delete_ip_addr_group(self, id_):
return self._delete_resource_by_id(self.get_ip_addr_group_template(),
id_)
def get_ip_addr_group(self, id_):
return self._get_resource_by_id(IpAddrGroup, self.dto['ipAddrGroups'],
self.get_ip_addr_group_template(), id_)
def delete_port(self, id_):
return self._delete_resource_by_id(self.get_port_template(), id_)
def get_port(self, id_):
return self._get_resource_by_id(Port, None,
self.get_port_template(), id_)
def delete_route(self, id_):
return self._delete_resource_by_id(self.get_route_template(), id_)
def get_route(self, id_):
return self._get_resource_by_id(Route, None, self.get_route_template(),
id_)
def delete_router(self, id_):
return self._delete_resource_by_id(self.get_router_template(), id_)
def get_router(self, id_):
return self._get_resource_by_id(Router, self.dto['routers'],
self.get_router_template(), id_)
def delete_rule(self, id_):
return self._delete_resource_by_id(self.get_rule_template(), id_)
def get_rule(self, id_):
return self._get_resource_by_id(Rule, None, self.get_rule_template(),
id_)
def get_tenant(self, id_):
return self._get_resource_by_id(Tenant, self.dto['tenants'],
self.get_tenant_template(), id_)
def add_router(self):
return Router(self.dto['routers'], {}, self.auth)
def add_bridge(self):
return Bridge(self.dto['bridges'], {}, self.auth)
def add_port_group(self):
return PortGroup(self.dto['portGroups'], {}, self.auth)
def add_ip_addr_group(self):
return IpAddrGroup(self.dto['ipAddrGroups'], {}, self.auth)
def add_chain(self):
return Chain(self.dto['chains'], {}, self.auth)
def add_tunnel_zone(self):
return TunnelZone(self.dto['tunnelZones'], {}, self.auth)
def add_gre_tunnel_zone(self):
return TunnelZone(
self.dto['tunnelZones'], {'type': 'gre'}, self.auth,
vendor_media_type.APPLICATION_GRE_TUNNEL_ZONE_HOST_JSON,
vendor_media_type.APPLICATION_GRE_TUNNEL_ZONE_HOST_COLLECTION_JSON)
def add_vxlan_tunnel_zone(self):
return TunnelZone(
self.dto['tunnelZones'], {'type': 'vxlan'}, self.auth,
vendor_media_type.APPLICATION_TUNNEL_ZONE_HOST_JSON,
vendor_media_type.APPLICATION_TUNNEL_ZONE_HOST_COLLECTION_JSON)
def add_vtep_tunnel_zone(self):
return TunnelZone(
self.dto['tunnelZones'], {'type': 'vtep'}, self.auth,
vendor_media_type.APPLICATION_TUNNEL_ZONE_HOST_JSON,
vendor_media_type.APPLICATION_TUNNEL_ZONE_HOST_COLLECTION_JSON)
def get_write_version(self):
return self._get_resource(WriteVersion, None,
self.get_write_version_uri())
def get_system_state(self):
return self._get_resource(SystemState, None,
self.get_system_state_uri())
def get_host_versions(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_HOST_VERSION_JSON}
return self.get_children(self.dto['hostVersions'],
query, headers, HostVersion)
def _create_uri_from_template(self, template, token, value):
return template.replace(token, value)
def _get_resource(self, clazz, create_uri, uri):
return clazz(create_uri, {'uri': uri}, self.auth).get(
headers={'Content-Type': clazz.media_type,
'Accept': clazz.media_type})
def _get_resource_by_id(self, clazz, create_uri,
template, id_):
uri = self._create_uri_from_template(template,
self.ID_TOKEN,
id_)
return self._get_resource(clazz, create_uri, uri)
def _get_resource_by_ip_addr(self, clazz, create_uri,
template, ip_address):
uri = self._create_uri_from_template(template,
self.IP_ADDR_TOKEN,
ip_address)
return self._get_resource(clazz, create_uri, uri)
def _delete_resource_by_id(self, template, id_):
uri = self._create_uri_from_template(template,
self.ID_TOKEN,
id_)
self.auth.do_request(uri, 'DELETE')
def _delete_resource_by_ip_addr(self, template, ip_address):
uri = self._create_uri_from_template(template,
self.IP_ADDR_TOKEN,
ip_address)
self.auth.do_request(uri, 'DELETE')
def _upload_resource(self, clazz, create_uri, uri, body, headers):
return clazz(create_uri, {'uri': uri}, self.auth)\
.upload(create_uri, body, headers=headers)
#L4LB resources
def get_load_balancers(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_LOAD_BALANCER_COLLECTION_JSON}
return self.get_children(self.dto['loadBalancers'],
query, headers, LoadBalancer)
def get_vips(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_VIP_COLLECTION_JSON}
return self.get_children(self.dto['vips'], query, headers, VIP)
def get_pools(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_POOL_COLLECTION_JSON}
return self.get_children(self.dto['pools'], query, headers, Pool)
def get_pool_members(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_POOL_MEMBER_COLLECTION_JSON}
return self.get_children(self.dto['poolMembers'],
query, headers, PoolMember)
def get_health_monitors(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_HEALTH_MONITOR_COLLECTION_JSON}
return self.get_children(self.dto['healthMonitors'],
query, headers, HealthMonitor)
def get_pool_statistics(self, query):
headers = {'Accept':
vendor_media_type.APPLICATION_POOL_STATISTIC_COLLECTION_JSON}
return self.get_children(self.dto['poolStatistics'],
query, headers, PoolStatistic)
def get_load_balancer(self, id_):
return self._get_resource_by_id(LoadBalancer,
self.dto['loadBalancers'],
self.get_load_balancer_template(),
id_)
def get_vip(self, id_):
return self._get_resource_by_id(VIP,
self.dto['vips'],
self.get_vip_template(),
id_)
def get_pool(self, id_):
return self._get_resource_by_id(Pool,
self.dto['pools'],
self.get_pool_template(),
id_)
def get_pool_member(self, id_):
return self._get_resource_by_id(PoolMember,
self.dto['poolMembers'],
self.get_pool_member_template(),
id_)
def get_health_monitor(self, id_):
return self._get_resource_by_id(HealthMonitor,
self.dto['healthMonitors'],
self.get_health_monitor_template(),
id_)
def get_pool_statistic(self, id_):
return self._get_resource_by_id(PoolStatistic,
self.dto['poolStatistic'],
self.get_pool_statistic_template(),
id_)
def delete_load_balancer(self, id_):
return self._delete_resource_by_id(
self.get_load_balancer_template(), id_)
def delete_vip(self, id_):
return self._delete_resource_by_id(self.get_vip_template(), id_)
def delete_pool(self, id_):
return self._delete_resource_by_id(self.get_pool_template(), id_)
def delete_pool_member(self, id_):
return self._delete_resource_by_id(
self.get_pool_member_template(), id_)
def delete_health_monitor(self, id_):
return self._delete_resource_by_id(
self.get_health_monitor_template(), id_)
def delete_pool_statistic(self, id_):
return self._delete_resource_by_id(
self.get_pool_statistic_template(), id_)
def add_load_balancer(self):
return LoadBalancer(self.dto['loadBalancers'], {}, self.auth)
def add_vip(self):
return VIP(self.dto['vips'], {}, self.auth)
def add_pool(self):
return Pool(self.dto['pools'], {}, self.auth)
def add_pool_member(self):
return PoolMember(self.dto['poolMembers'], {}, self.auth)
def add_health_monitor(self):
return HealthMonitor(self.dto['healthMonitors'], {}, self.auth)
def add_pool_statistic(self):
return PoolStatistic(self.dto['poolStatistics'], {}, self.auth)
def get_vteps(self):
headers = {'Accept':
vendor_media_type.APPLICATION_VTEP_COLLECTION_JSON}
return self.get_children(self.dto['vteps'], {}, headers, Vtep)
def add_vtep(self):
return Vtep(self.dto['vteps'], {}, self.auth)
def get_vtep(self, mgmt_ip):
return self._get_resource_by_ip_addr(Vtep,
self.dto['vteps'],
self.get_vtep_template(),
mgmt_ip)
def delete_vtep(self, mgmt_ip):
return self._delete_resource_by_ip_addr(self.get_vtep_template(),
mgmt_ip)
| apache-2.0 | -5,466,880,693,771,139,000 | 37.217039 | 80 | 0.581869 | false |
sheppard/django-rest-framework | tests/test_generics.py | 1 | 19410 | from __future__ import unicode_literals
import pytest
from django.db import models
from django.shortcuts import get_object_or_404
from django.test import TestCase
from django.utils import six
from rest_framework import generics, renderers, serializers, status
from rest_framework.response import Response
from rest_framework.test import APIRequestFactory
from tests.models import (
BasicModel, ForeignKeySource, ForeignKeyTarget, RESTFrameworkModel
)
factory = APIRequestFactory()
# Models
class SlugBasedModel(RESTFrameworkModel):
text = models.CharField(max_length=100)
slug = models.SlugField(max_length=32)
# Model for regression test for #285
class Comment(RESTFrameworkModel):
email = models.EmailField()
content = models.CharField(max_length=200)
created = models.DateTimeField(auto_now_add=True)
# Serializers
class BasicSerializer(serializers.ModelSerializer):
class Meta:
model = BasicModel
class ForeignKeySerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
class SlugSerializer(serializers.ModelSerializer):
slug = serializers.ReadOnlyField()
class Meta:
model = SlugBasedModel
fields = ('text', 'slug')
# Views
class RootView(generics.ListCreateAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
class InstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.exclude(text='filtered out')
serializer_class = BasicSerializer
class FKInstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = ForeignKeySource.objects.all()
serializer_class = ForeignKeySerializer
class SlugBasedInstanceView(InstanceView):
"""
A model with a slug-field.
"""
queryset = SlugBasedModel.objects.all()
serializer_class = SlugSerializer
lookup_field = 'slug'
# Tests
class TestRootView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = RootView.as_view()
def test_get_root_view(self):
"""
GET requests to ListCreateAPIView should return list of objects.
"""
request = factory.get('/')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data)
def test_post_root_view(self):
"""
POST requests to ListCreateAPIView should create a new object.
"""
data = {'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {'id': 4, 'text': 'foobar'})
created = self.objects.get(id=4)
self.assertEqual(created.text, 'foobar')
def test_put_root_view(self):
"""
PUT requests to ListCreateAPIView should not be allowed
"""
data = {'text': 'foobar'}
request = factory.put('/', data, format='json')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "PUT" not allowed.'})
def test_delete_root_view(self):
"""
DELETE requests to ListCreateAPIView should not be allowed
"""
request = factory.delete('/')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "DELETE" not allowed.'})
def test_post_cannot_set_id(self):
"""
POST requests to create a new object should not be able to set the id.
"""
data = {'id': 999, 'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(1):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {'id': 4, 'text': 'foobar'})
created = self.objects.get(id=4)
self.assertEqual(created.text, 'foobar')
def test_post_error_root_view(self):
"""
POST requests to ListCreateAPIView in HTML should include a form error.
"""
data = {'text': 'foobar' * 100}
request = factory.post('/', data, HTTP_ACCEPT='text/html')
response = self.view(request).render()
expected_error = '<span class="help-block">Ensure this field has no more than 100 characters.</span>'
self.assertIn(expected_error, response.rendered_content.decode('utf-8'))
EXPECTED_QUERIES_FOR_PUT = 2
class TestInstanceView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz', 'filtered out']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects.exclude(text='filtered out')
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = InstanceView.as_view()
self.slug_based_view = SlugBasedInstanceView.as_view()
def test_get_instance_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data[0])
def test_post_instance_view(self):
"""
POST requests to RetrieveUpdateDestroyAPIView should not be allowed
"""
data = {'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(0):
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "POST" not allowed.'})
def test_put_instance_view(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should update an object.
"""
data = {'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk='1').render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(dict(response.data), {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_patch_instance_view(self):
"""
PATCH requests to RetrieveUpdateDestroyAPIView should update an object.
"""
data = {'text': 'foobar'}
request = factory.patch('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_delete_instance_view(self):
"""
DELETE requests to RetrieveUpdateDestroyAPIView should delete an object.
"""
request = factory.delete('/1')
with self.assertNumQueries(2):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response.content, six.b(''))
ids = [obj.id for obj in self.objects.all()]
self.assertEqual(ids, [2, 3])
def test_get_instance_view_incorrect_arg(self):
"""
GET requests with an incorrect pk type, should raise 404, not 500.
Regression test for #890.
"""
request = factory.get('/a')
with self.assertNumQueries(0):
response = self.view(request, pk='a').render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_put_cannot_set_id(self):
"""
PUT requests to create a new object should not be able to set the id.
"""
data = {'id': 999, 'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foobar'})
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
def test_put_to_deleted_instance(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should return 404 if
an object does not currently exist.
"""
self.objects.get(id=1).delete()
data = {'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_put_to_filtered_out_instance(self):
"""
PUT requests to an URL of instance which is filtered out should not be
able to create new objects.
"""
data = {'text': 'foo'}
filtered_out_pk = BasicModel.objects.filter(text='filtered out')[0].pk
request = factory.put('/{0}'.format(filtered_out_pk), data, format='json')
response = self.view(request, pk=filtered_out_pk).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_patch_cannot_create_an_object(self):
"""
PATCH requests should not be able to create objects.
"""
data = {'text': 'foobar'}
request = factory.patch('/999', data, format='json')
with self.assertNumQueries(1):
response = self.view(request, pk=999).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertFalse(self.objects.filter(id=999).exists())
def test_put_error_instance_view(self):
"""
Incorrect PUT requests in HTML should include a form error.
"""
data = {'text': 'foobar' * 100}
request = factory.put('/', data, HTTP_ACCEPT='text/html')
response = self.view(request, pk=1).render()
expected_error = '<span class="help-block">Ensure this field has no more than 100 characters.</span>'
self.assertIn(expected_error, response.rendered_content.decode('utf-8'))
class TestFKInstanceView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
t = ForeignKeyTarget(name=item)
t.save()
ForeignKeySource(name='source_' + item, target=t).save()
self.objects = ForeignKeySource.objects
self.data = [
{'id': obj.id, 'name': obj.name}
for obj in self.objects.all()
]
self.view = FKInstanceView.as_view()
class TestOverriddenGetObject(TestCase):
"""
Test cases for a RetrieveUpdateDestroyAPIView that does NOT use the
queryset/model mechanism but instead overrides get_object()
"""
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
class OverriddenGetObjectView(generics.RetrieveUpdateDestroyAPIView):
"""
Example detail view for override of get_object().
"""
serializer_class = BasicSerializer
def get_object(self):
pk = int(self.kwargs['pk'])
return get_object_or_404(BasicModel.objects.all(), id=pk)
self.view = OverriddenGetObjectView.as_view()
def test_overridden_get_object_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self.data[0])
# Regression test for #285
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
exclude = ('created',)
class CommentView(generics.ListCreateAPIView):
serializer_class = CommentSerializer
model = Comment
class TestCreateModelWithAutoNowAddField(TestCase):
def setUp(self):
self.objects = Comment.objects
self.view = CommentView.as_view()
def test_create_model_with_auto_now_add_field(self):
"""
Regression test for #285
https://github.com/tomchristie/django-rest-framework/issues/285
"""
data = {'email': '[email protected]', 'content': 'foobar'}
request = factory.post('/', data, format='json')
response = self.view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
created = self.objects.get(id=1)
self.assertEqual(created.content, 'foobar')
# Test for particularly ugly regression with m2m in browsable API
class ClassB(models.Model):
name = models.CharField(max_length=255)
class ClassA(models.Model):
name = models.CharField(max_length=255)
children = models.ManyToManyField(ClassB, blank=True, null=True)
class ClassASerializer(serializers.ModelSerializer):
children = serializers.PrimaryKeyRelatedField(
many=True, queryset=ClassB.objects.all()
)
class Meta:
model = ClassA
class ExampleView(generics.ListCreateAPIView):
serializer_class = ClassASerializer
queryset = ClassA.objects.all()
class TestM2MBrowsableAPI(TestCase):
def test_m2m_in_browsable_api(self):
"""
Test for particularly ugly regression with m2m in browsable API
"""
request = factory.get('/', HTTP_ACCEPT='text/html')
view = ExampleView().as_view()
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
class InclusiveFilterBackend(object):
def filter_queryset(self, request, queryset, view):
return queryset.filter(text='foo')
class ExclusiveFilterBackend(object):
def filter_queryset(self, request, queryset, view):
return queryset.filter(text='other')
class TwoFieldModel(models.Model):
field_a = models.CharField(max_length=100)
field_b = models.CharField(max_length=100)
class DynamicSerializerView(generics.ListCreateAPIView):
queryset = TwoFieldModel.objects.all()
renderer_classes = (renderers.BrowsableAPIRenderer, renderers.JSONRenderer)
def get_serializer_class(self):
if self.request.method == 'POST':
class DynamicSerializer(serializers.ModelSerializer):
class Meta:
model = TwoFieldModel
fields = ('field_b',)
else:
class DynamicSerializer(serializers.ModelSerializer):
class Meta:
model = TwoFieldModel
return DynamicSerializer
class TestFilterBackendAppliedToViews(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances to filter on.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
def test_get_root_view_filters_by_name_with_filter_backend(self):
"""
GET requests to ListCreateAPIView should return filtered list.
"""
root_view = RootView.as_view(filter_backends=(InclusiveFilterBackend,))
request = factory.get('/')
response = root_view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data, [{'id': 1, 'text': 'foo'}])
def test_get_root_view_filters_out_all_models_with_exclusive_filter_backend(self):
"""
GET requests to ListCreateAPIView should return empty list when all models are filtered out.
"""
root_view = RootView.as_view(filter_backends=(ExclusiveFilterBackend,))
request = factory.get('/')
response = root_view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def test_get_instance_view_filters_out_name_with_filter_backend(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should raise 404 when model filtered out.
"""
instance_view = InstanceView.as_view(filter_backends=(ExclusiveFilterBackend,))
request = factory.get('/1')
response = instance_view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, {'detail': 'Not found.'})
def test_get_instance_view_will_return_single_object_when_filter_does_not_exclude_it(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object when not excluded
"""
instance_view = InstanceView.as_view(filter_backends=(InclusiveFilterBackend,))
request = factory.get('/1')
response = instance_view(request, pk=1).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'id': 1, 'text': 'foo'})
def test_dynamic_serializer_form_in_browsable_api(self):
"""
GET requests to ListCreateAPIView should return filtered list.
"""
view = DynamicSerializerView.as_view()
request = factory.get('/')
response = view(request).render()
self.assertContains(response, 'field_b')
self.assertNotContains(response, 'field_a')
class TestGuardedQueryset(TestCase):
def test_guarded_queryset(self):
class QuerysetAccessError(generics.ListAPIView):
queryset = BasicModel.objects.all()
def get(self, request):
return Response(list(self.queryset))
view = QuerysetAccessError.as_view()
request = factory.get('/')
with pytest.raises(RuntimeError):
view(request).render()
| bsd-2-clause | 8,100,674,122,528,714,000 | 34.680147 | 109 | 0.631685 | false |
wwitzel3/awx | awx/main/tests/unit/api/serializers/conftest.py | 1 | 1711 | import mock
import pytest
@pytest.fixture
def get_related_assert():
def fn(model_obj, related, resource_name, related_resource_name):
assert related_resource_name in related
assert related[related_resource_name] == '/api/v2/%s/%d/%s/' % (resource_name, model_obj.pk, related_resource_name)
return fn
@pytest.fixture
def get_related_mock_and_run():
def fn(serializer_class, model_obj):
serializer = serializer_class()
related = serializer.get_related(model_obj)
return related
return fn
@pytest.fixture
def test_get_related(get_related_assert, get_related_mock_and_run):
def fn(serializer_class, model_obj, resource_name, related_resource_name):
related = get_related_mock_and_run(serializer_class, model_obj)
get_related_assert(model_obj, related, resource_name, related_resource_name)
return related
return fn
@pytest.fixture
def get_summary_fields_assert():
def fn(summary, summary_field_name):
assert summary_field_name in summary
return fn
@pytest.fixture
def get_summary_fields_mock_and_run():
def fn(serializer_class, model_obj):
serializer = serializer_class()
serializer.show_capabilities = []
serializer.context['view'] = mock.Mock(kwargs={})
return serializer.get_summary_fields(model_obj)
return fn
@pytest.fixture
def test_get_summary_fields(get_summary_fields_mock_and_run, get_summary_fields_assert):
def fn(serializer_class, model_obj, summary_field_name):
summary = get_summary_fields_mock_and_run(serializer_class, model_obj)
get_summary_fields_assert(summary, summary_field_name)
return summary
return fn
| apache-2.0 | 6,837,339,957,018,921,000 | 30.685185 | 123 | 0.6955 | false |
hhatto/pgmagick | test/test_cookbook.py | 1 | 3362 | # coding: utf-8
import os.path
import sys
import unittest
from pgmagick import api
from pgmagick import Image, Geometry, Blob
from utils import MACOSX_FONT
class TestCookbook(unittest.TestCase):
def setUp(self):
self.tmp_filename_jpg = "_cookbook_test.jpg"
self.tmp_filename_png = "_cookbook_test.png"
def tearDown(self):
if os.path.exists(self.tmp_filename_jpg):
os.remove(self.tmp_filename_jpg)
if os.path.exists(self.tmp_filename_png):
os.remove(self.tmp_filename_png)
def test_red_background_jpeg_image(self):
img = api.Image((300, 200), 'red')
img.write(self.tmp_filename_jpg)
def test_transparent_png_image(self):
img = api.Image((300, 200), 'transparent')
img.write(self.tmp_filename_png)
def test_gradient_png_image(self):
img = api.Image((300, 200), 'gradient:#ffffff-#000000')
img.write(self.tmp_filename_png)
def test_annotate(self):
img = api.Image((300, 200))
if sys.platform.lower() == 'darwin':
img.font(MACOSX_FONT)
img.annotate('Hello World')
img.write(self.tmp_filename_png)
def test_annotate_with_angle45(self):
img = api.Image((300, 200))
if sys.platform.lower() == 'darwin':
img.font(MACOSX_FONT)
img.annotate('Hello World', angle=45)
img.write(self.tmp_filename_png)
def test_annotate_with_japanese_font(self):
img = api.Image((300, 200))
if sys.platform.lower() == 'darwin':
img.font("/System/Library/Fonts/Hiragino Sans GB.ttc")
else:
# TODO: not support windows
img.font("/usr/share/fonts/truetype/ttf-japanese-gothic.ttf")
img.annotate('ようこそpgmagickへ!!')
img.write(self.tmp_filename_png)
def test_scale(self):
img = api.Image((300, 200), 'blue')
img.write(self.tmp_filename_png)
img2 = api.Image(self.tmp_filename_png)
img2.scale(0.5)
img2.write(self.tmp_filename_png)
self.assertEqual(img2.width, 150)
self.assertEqual(img2.height, 100)
def test_scale_with_lanczos(self):
img = api.Image((300, 200), 'blue')
img.write(self.tmp_filename_png)
img2 = api.Image(self.tmp_filename_png)
img2.scale((150, 100), 'lanczos')
img2.write(self.tmp_filename_png)
self.assertEqual(img2.width, 150)
self.assertEqual(img2.height, 100)
@unittest.skipIf(sys.version_info[0] == 3, "not support python3.x")
def test_scale_jpeg(self):
img = api.Image((400, 400), 'blue')
img.write(self.tmp_filename_jpg)
with open(self.tmp_filename_jpg, 'rb') as fp:
b = Blob(str(fp.read()))
img2 = Image(b, Geometry(200, 200))
if sys.platform.lower() == 'darwin':
# NOTE: error occur when use '200x200' param
# -----------------------------------------------------
# RuntimeError: Magick: Application transferred too few
# scanlines (x.jpg) reported by coders/jpeg.c:344 (JPEGErrorHandler)
img2.scale('199x199')
else:
img2.scale('200x200')
img2.write(self.tmp_filename_jpg)
if __name__ == '__main__':
unittest.main()
| mit | 3,823,916,215,492,745,700 | 33.916667 | 90 | 0.581146 | false |
BV-DR/foamBazar | ideFoam/inputFiles/transportProperties.py | 1 | 1464 | from ideFoam.inputFiles import ReadWriteFile, getFilePath
from PyFoam.Basics.DataStructures import DictProxy
from os.path import join
from ideFoam.inputFiles.compatOF import water, air
"""
Convenience class to simply write "TransportProperties"
"""
class TransportProperties(ReadWriteFile) :
"""
TransportProperties dictionnary
"""
@classmethod
def Build(cls , case, rhoWater = 1000 , nuWater = 1e-6, rhoAir = 1. , nuAir = 1.48e-05, sigma = 0.0 , application = "foamStar") :
res = cls( name = join(case, getFilePath("transportProperties") ), read = False )
res.header["class"] = "dictionary"
if application == "foamStar" : res["phases"] = ["water" , "air"]
dw = DictProxy()
dw["transportModel"] = "Newtonian"
dw["nu"] = "nu [0 2 -1 0 0 0 0] {}".format(nuWater)
dw["rho"] = "rho [1 -3 0 0 0 0 0] {}".format(rhoWater)
res['"'+water[application]+'"'] = dw
da = DictProxy()
da["transportModel"] = "Newtonian",
da["nu"] = "nu [0 2 -1 0 0 0 0] {}".format(nuAir)
da["rho"] = "rho [1 -3 0 0 0 0 0] {}".format(rhoAir)
res['"'+air[application]+'"'] = da
res[r"sigma"] = "sigma [1 0 -2 0 0 0 0] {}".format(sigma)
return res
if __name__ == "__main__" :
print(TransportProperties.Build("test" , application = "foamExtend"))
| gpl-3.0 | 7,376,739,367,757,549,000 | 32.272727 | 134 | 0.551913 | false |
caspartse/QQ-Groups-Spider | vendor/pyexcel/plugins/renderers/sqlalchemy.py | 1 | 2226 | """
pyexcel.plugin.renderers.sqlalchemy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Export data into database datables
:copyright: (c) 2015-2017 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel_io import save_data
import pyexcel_io.database.common as sql
from pyexcel._compact import OrderedDict
from pyexcel.renderer import DbRenderer
import pyexcel.internal.common as common
class SQLAlchemyRenderer(DbRenderer):
"""Import data into database"""
def render_sheet_to_stream(self, file_stream, sheet,
init=None, mapdict=None, **keywords):
headers = common.get_sheet_headers(sheet)
importer = sql.SQLTableImporter(file_stream[0])
adapter = sql.SQLTableImportAdapter(file_stream[1])
adapter.column_names = headers
adapter.row_initializer = init
adapter.column_name_mapping_dict = mapdict
importer.append(adapter)
save_data(importer, {adapter.get_name(): sheet.get_internal_array()},
file_type=self._file_type, **keywords)
def render_book_to_stream(self, file_stream, book,
inits=None, mapdicts=None, **keywords):
session, tables = file_stream
thebook = book
initializers = inits
colnames_array = common.get_book_headers_in_array(book)
if initializers is None:
initializers = [None] * len(tables)
if mapdicts is None:
mapdicts = [None] * len(tables)
scattered = zip(tables, colnames_array, mapdicts, initializers)
importer = sql.SQLTableImporter(session)
for each_table in scattered:
adapter = sql.SQLTableImportAdapter(each_table[0])
adapter.column_names = each_table[1]
adapter.column_name_mapping_dict = each_table[2]
adapter.row_initializer = each_table[3]
importer.append(adapter)
to_store = OrderedDict()
for sheet in thebook:
# due book.to_dict() brings in column_names
# which corrupts the data
to_store[sheet.name] = sheet.get_internal_array()
save_data(importer, to_store, file_type=self._file_type, **keywords)
| mit | -2,864,051,646,709,129,700 | 38.75 | 77 | 0.624888 | false |
gkc1000/pyscf | pyscf/nao/m_gpaw_wfsx.py | 1 | 1781 | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os
import sys
import numpy as np
from numpy import zeros, empty
import warnings
class gpaw_wfsx_c():
def __init__(self, calc):
"""
Gathers the information on the available wavefunctions
(Kohn-Sham or Hartree-Fock orbitals)
"""
assert calc.wfs.mode.lower()=='lcao'
self.nreim = 1 # Only real part? because wavefunctions from gpaw are complex
self.nspin = calc.get_number_of_spins()
self.norbs = calc.setups.nao
self.nbands= calc.parameters['nbands']
self.k2xyz = calc.parameters['kpts']
self.nkpoints = len(self.k2xyz)
self.ksn2e = np.zeros((self.nkpoints, self.nspin, self.nbands))
for ik in range(self.nkpoints):
for spin in range(self.nspin):
self.ksn2e[ik, spin, :] = calc.wfs.collect_eigenvalues(spin,ik)
# Import wavefunctions from GPAW calculator
self.x = np.zeros((self.nkpoints, self.nspin, self.nbands, self.norbs, self.nreim))
for k in range(calc.wfs.kd.nibzkpts):
for s in range(calc.wfs.nspins):
C_nM = calc.wfs.collect_array('C_nM', k, s)
self.x[k, s, :, :, 0] = C_nM.real
| apache-2.0 | -6,172,492,844,675,328,000 | 36.104167 | 87 | 0.691746 | false |
jrversteegh/softsailor | deps/scipy-0.10.0b2/scipy/misc/__init__.py | 2 | 1914 | """
==========================================
Miscellaneous routines (:mod:`scipy.misc`)
==========================================
.. currentmodule:: scipy.misc
Various utilities that don't have another home.
Note that the Python Imaging Library (PIL) is not a dependency
of SciPy and therefore the `pilutil` module is not available on
systems that don't have PIL installed.
.. autosummary::
:toctree: generated/
bytescale - Byte scales an array (image)
central_diff_weights - Weights for an n-point central m-th derivative
comb - Combinations of N things taken k at a time, "N choose k"
derivative -\tFind the n-th derivative of a function at a point
factorial - The factorial function, n! = special.gamma(n+1)
factorial2 - Double factorial, (n!)!
factorialk - (...((n!)!)!...)! where there are k '!'
fromimage - Return a copy of a PIL image as a numpy array
imfilter - Simple filtering of an image
imread - Read an image file from a filename
imresize - Resize an image
imrotate - Rotate an image counter-clockwise
imsave - Save an array to an image file
imshow - Simple showing of an image through an external viewer
info - Get help information for a function, class, or module
lena - Get classic image processing example image Lena
pade - Pade approximation to function as the ratio of two polynomials
radon -
toimage - Takes a numpy array and returns a PIL image
"""
__all__ = ['who', 'source', 'info', 'doccer']
import doccer
from common import *
from numpy import who, source, info as _info
import sys
def info(object=None,maxwidth=76,output=sys.stdout,toplevel='scipy'):
return _info(object, maxwidth, output, toplevel)
info.__doc__ = _info.__doc__
del sys
try:
from pilutil import *
__all__ += pilutil.__all__
except ImportError:
pass
__all__ += common.__all__
from numpy.testing import Tester
test = Tester().test
| gpl-3.0 | 5,945,414,943,550,416,000 | 30.9 | 72 | 0.673981 | false |
naresh21/synergetics-edx-platform | lms/djangoapps/edcast/api.py | 1 | 3170 | import logging
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from student.models import User
from enrollment import api
from course_modes.models import CourseMode
from openedx.core.lib.exceptions import CourseNotFoundError
from enrollment.errors import (
CourseEnrollmentError,
CourseModeNotFoundError,
CourseEnrollmentExistsError
)
from .authentication import JSONWebTokenAuthenticationQS
log = logging.getLogger("Edcast")
class EdcastUserEnroll(APIView):
authentication_classes = [JSONWebTokenAuthenticationQS]
def post(self, request):
data = request.json or request.data
user_details = data.get("user")
course_details = data.get("payment")
course_id = course_details.get("course_id")
email = user_details.get("email")
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
log.exception("Invalid user trying to enroll course")
error = {"error_message": "Invalid user"}
return Response(error, status=status.HTTP_401_UNAUTHORIZED)
try:
username = user.username
enrollment = api.get_enrollment(username, unicode(course_id))
if not enrollment:
api.add_enrollment(username, unicode(course_id), mode=CourseMode.HONOR, is_active=True)
return Response({})
except CourseNotFoundError:
log.exception("Invalid course id.")
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": u"No course '{course_id}' found for enrollment".format(course_id=course_id)
})
except CourseModeNotFoundError:
log.exception("Course mode not define for the {course_id}".format(course_id=course_id))
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": (
u"The [{mode}] course mode is expired or otherwise unavailable for course run [{course_id}]."
).format(mode=CourseMode.HONOR, course_id=course_id)
})
except CourseEnrollmentExistsError as error:
log.warning('An enrollment already exists for user [%s] in course run [%s].', username, course_id)
return Response(data={
"error": "User already enrolled in the {course_id}".format(course_id=course_id)
})
except CourseEnrollmentError:
log.exception("An error occurred while creating the new course enrollment for user "
"[%s] in course run [%s]", username, course_id)
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": (
u"An error occurred while creating the new course enrollment for user "
u"'{username}' in course '{course_id}'"
).format(username=username, course_id=course_id)
}
)
| agpl-3.0 | 7,598,553,352,521,704,000 | 37.658537 | 117 | 0.603785 | false |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/core/basics/quantity.py | 1 | 3116 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.basics.quantity Contains the Quantity class, representing floating point values with a certain
# uncertainty
# -----------------------------------------------------------------
# Import standard modules
import math
# -----------------------------------------------------------------
class Quantity(object):
"""
This class ...
"""
def __init__(self, value, error=None):
"""
The constructor ...
"""
# Set the attributes
self.value = value
self.error = error
# -----------------------------------------------------------------
@property
def relative_error(self):
"""
This function ...
:return:
"""
return self.error / self.value
# -----------------------------------------------------------------
def __add__(self, quantity):
"""
This function ...
:param quantity:
:return:
"""
value = self.value + quantity.value
error = math.sqrt(math.pow(self.error, 2) + math.pow(quantity.error, 2))
return Quantity(value, error)
# -----------------------------------------------------------------
def __sub__(self, quantity):
"""
This function ...
:param quantity:
:return:
"""
value = self.value - quantity.value
error = math.sqrt(math.pow(self.error, 2) + math.pow(quantity.error, 2))
return Quantity(value, error)
# -----------------------------------------------------------------
def __mul__(self, quantity):
"""
This function ...
:param quantity:
:return:
"""
value = self.value * quantity.value
error = math.sqrt(math.pow(quantity.value * self.error, 2) + math.pow(self.value * quantity.error, 2))
return Quantity(value, error)
# -----------------------------------------------------------------
def __div__(self, quantity):
"""
This function ...
:param quantity:
:return:
"""
value = self.value / quantity.value
error = math.fabs(value) * math.sqrt(math.pow(self.relative_error, 2) + math.pow(quantity.relative_error, 2))
return Quantity(value, error)
# -----------------------------------------------------------------
def __truediv__(self, quantity):
"""
This function ...
:param quantity:
:return:
"""
value = self.value / quantity.value
error = math.fabs(value) * math.sqrt(math.pow(self.relative_error, 2) + math.pow(quantity.relative_error, 2))
return Quantity(value, error)
# -----------------------------------------------------------------
| mit | 8,029,924,372,909,666,000 | 25.853448 | 117 | 0.402889 | false |
cloudwatt/contrail-controller | src/opserver/opserver.py | 1 | 79582 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Opserver
#
# Operational State Server for VNC
#
from gevent import monkey
monkey.patch_all()
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
from uveserver import UVEServer
import sys
import ConfigParser
import bottle
import json
import uuid
import argparse
import time
import redis
import base64
import socket
import struct
import errno
import copy
import datetime
import pycassa
from analytics_db import AnalyticsDb
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from pysandesh.util import UTCTimestampUsec
from pysandesh.sandesh_base import *
from pysandesh.sandesh_session import SandeshWriter
from pysandesh.gen_py.sandesh_trace.ttypes import SandeshTraceRequest
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionType,\
ConnectionStatus
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, CategoryNames,\
ModuleCategoryMap, Module2NodeType, NodeTypeNames, ModuleIds,\
INSTANCE_ID_DEFAULT, COLLECTOR_DISCOVERY_SERVICE_NAME,\
ANALYTICS_API_SERVER_DISCOVERY_SERVICE_NAME
from sandesh.viz.constants import _TABLES, _OBJECT_TABLES,\
_OBJECT_TABLE_SCHEMA, _OBJECT_TABLE_COLUMN_VALUES, \
_STAT_TABLES, STAT_OBJECTID_FIELD, STAT_VT_PREFIX, \
STAT_TIME_FIELD, STAT_TIMEBIN_FIELD, STAT_UUID_FIELD, \
STAT_SOURCE_FIELD, SOURCE, MODULE
from sandesh.viz.constants import *
from sandesh.analytics.ttypes import *
from sandesh.analytics.cpuinfo.ttypes import ProcessCpuInfo
from sandesh.discovery.ttypes import CollectorTrace
from opserver_util import OpServerUtils
from opserver_util import ServicePoller
from cpuinfo import CpuInfoData
from sandesh_req_impl import OpserverSandeshReqImpl
from sandesh.analytics_database.ttypes import *
from sandesh.analytics_database.constants import PurgeStatusString
from overlay_to_underlay_mapper import OverlayToUnderlayMapper, \
OverlayToUnderlayMapperError
_ERRORS = {
errno.EBADMSG: 400,
errno.ENOBUFS: 403,
errno.EINVAL: 404,
errno.ENOENT: 410,
errno.EIO: 500,
errno.EBUSY: 503
}
@bottle.error(400)
@bottle.error(403)
@bottle.error(404)
@bottle.error(410)
@bottle.error(500)
@bottle.error(503)
def opserver_error(err):
return err.body
#end opserver_error
class LinkObject(object):
def __init__(self, name, href):
self.name = name
self.href = href
# end __init__
# end class LinkObject
def obj_to_dict(obj):
# Non-null fields in object get converted to json fields
return dict((k, v) for k, v in obj.__dict__.iteritems())
# end obj_to_dict
def redis_query_start(host, port, redis_password, qid, inp):
redish = redis.StrictRedis(db=0, host=host, port=port,
password=redis_password)
for key, value in inp.items():
redish.hset("QUERY:" + qid, key, json.dumps(value))
query_metadata = {}
query_metadata['enqueue_time'] = OpServerUtils.utc_timestamp_usec()
redish.hset("QUERY:" + qid, 'query_metadata', json.dumps(query_metadata))
redish.hset("QUERY:" + qid, 'enqueue_time',
OpServerUtils.utc_timestamp_usec())
redish.lpush("QUERYQ", qid)
res = redish.blpop("REPLY:" + qid, 10)
if res is None:
return None
# Put the status back on the queue for the use of the status URI
redish.lpush("REPLY:" + qid, res[1])
resp = json.loads(res[1])
return int(resp["progress"])
# end redis_query_start
def redis_query_status(host, port, redis_password, qid):
redish = redis.StrictRedis(db=0, host=host, port=port,
password=redis_password)
resp = {"progress": 0}
chunks = []
# For now, the number of chunks will be always 1
res = redish.lrange("REPLY:" + qid, -1, -1)
if not res:
return None
chunk_resp = json.loads(res[0])
ttl = redish.ttl("REPLY:" + qid)
if int(ttl) != -1:
chunk_resp["ttl"] = int(ttl)
query_time = redish.hmget("QUERY:" + qid, ["start_time", "end_time"])
chunk_resp["start_time"] = query_time[0]
chunk_resp["end_time"] = query_time[1]
if chunk_resp["progress"] == 100:
chunk_resp["href"] = "/analytics/query/%s/chunk-final/%d" % (qid, 0)
chunks.append(chunk_resp)
resp["progress"] = chunk_resp["progress"]
resp["chunks"] = chunks
return resp
# end redis_query_status
def redis_query_chunk_iter(host, port, redis_password, qid, chunk_id):
redish = redis.StrictRedis(db=0, host=host, port=port,
password=redis_password)
iters = 0
fin = False
while not fin:
#import pdb; pdb.set_trace()
# Keep the result line valid while it is being read
redish.persist("RESULT:" + qid + ":" + str(iters))
elems = redish.lrange("RESULT:" + qid + ":" + str(iters), 0, -1)
yield elems
if elems == []:
fin = True
else:
redish.delete("RESULT:" + qid + ":" + str(iters), 0, -1)
iters += 1
return
# end redis_query_chunk_iter
def redis_query_chunk(host, port, redis_password, qid, chunk_id):
res_iter = redis_query_chunk_iter(host, port, redis_password, qid, chunk_id)
dli = u''
starter = True
fin = False
yield u'{"value": ['
outcount = 0
while not fin:
#import pdb; pdb.set_trace()
# Keep the result line valid while it is being read
elems = res_iter.next()
fin = True
for elem in elems:
fin = False
outcount += 1
if starter:
dli += '\n' + elem
starter = False
else:
dli += ', ' + elem
if not fin:
yield dli + '\n'
dli = u''
if outcount == 0:
yield '\n' + u']}'
else:
yield u']}'
return
# end redis_query_chunk
def redis_query_result(host, port, redis_password, qid):
try:
status = redis_query_status(host, port, redis_password, qid)
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] result : Connection Error' % (qid),
server_addrs = ['%s:%d' % (host, port)])
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] result : Exception: %s' % (qid, str(e)),
server_addrs = ['%s:%d' % (host, port)])
self._logger.error("Exception: %s" % e)
yield bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % e)
else:
if status is None:
yield bottle.HTTPError(_ERRORS[errno.ENOENT],
'Invalid query id (or) query result purged from DB')
if status['progress'] == 100:
for chunk in status['chunks']:
chunk_id = int(chunk['href'].rsplit('/', 1)[1])
for gen in redis_query_chunk(host, port, redis_password, qid,
chunk_id):
yield gen
else:
yield {}
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
message = None,
status = ConnectionStatus.UP,
server_addrs = ['%s:%d' % (host, port)],
name = 'Query')
return
# end redis_query_result
def redis_query_result_dict(host, port, redis_password, qid):
stat = redis_query_status(host, port, redis_password, qid)
prg = int(stat["progress"])
res = []
if (prg < 0) or (prg == 100):
done = False
gen = redis_query_result(host, port, redis_password, qid)
result = u''
while not done:
try:
result += gen.next()
#import pdb; pdb.set_trace()
except StopIteration:
done = True
res = (json.loads(result))['value']
return prg, res
# end redis_query_result_dict
def redis_query_info(redish, qid):
query_data = {}
query_dict = redish.hgetall('QUERY:' + qid)
query_metadata = json.loads(query_dict['query_metadata'])
del query_dict['query_metadata']
query_data['query_id'] = qid
query_data['query'] = str(query_dict)
query_data['enqueue_time'] = query_metadata['enqueue_time']
return query_data
# end redis_query_info
class OpStateServer(object):
def __init__(self, logger, redis_password=None):
self._logger = logger
self._redis_list = []
self._redis_password= redis_password
# end __init__
def update_redis_list(self, redis_list):
self._redis_list = redis_list
# end update_redis_list
def redis_publish(self, msg_type, destination, msg):
# Get the sandesh encoded in XML format
sandesh = SandeshWriter.encode_sandesh(msg)
msg_encode = base64.b64encode(sandesh)
redis_msg = '{"type":"%s","destination":"%s","message":"%s"}' \
% (msg_type, destination, msg_encode)
# Publish message in the Redis bus
for redis_server in self._redis_list:
redis_inst = redis.StrictRedis(redis_server[0],
redis_server[1], db=0,
password=self._redis_password)
try:
redis_inst.publish('analytics', redis_msg)
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'UVE', status = ConnectionStatus.DOWN,
message = 'Connection Error',
server_addrs = ['%s:%d' % (redis_server[0], \
redis_server[1])])
self._logger.error('No Connection to Redis [%s:%d].'
'Failed to publish message.' \
% (redis_server[0], redis_server[1]))
return True
# end redis_publish
# end class OpStateServer
class OpServer(object):
"""
This class provides ReST API to get operational state of
Contrail VNS system.
The supported **GET** APIs are:
* ``/analytics/virtual-network/<name>``
* ``/analytics/virtual-machine/<name>``
* ``/analytics/vrouter/<name>``:
* ``/analytics/bgp-router/<name>``
* ``/analytics/bgp-peer/<name>``
* ``/analytics/xmpp-peer/<name>``
* ``/analytics/collector/<name>``
* ``/analytics/tables``:
* ``/analytics/table/<table>``:
* ``/analytics/table/<table>/schema``:
* ``/analytics/table/<table>/column-values``:
* ``/analytics/table/<table>/column-values/<column>``:
* ``/analytics/query/<queryId>``
* ``/analytics/query/<queryId>/chunk-final/<chunkId>``
* ``/analytics/send-tracebuffer/<source>/<module>/<name>``
* ``/analytics/operation/analytics-data-start-time``
The supported **POST** APIs are:
* ``/analytics/query``:
* ``/analytics/operation/database-purge``:
"""
def __new__(cls, *args, **kwargs):
obj = super(OpServer, cls).__new__(cls, *args, **kwargs)
bottle.route('/', 'GET', obj.homepage_http_get)
bottle.route('/analytics', 'GET', obj.analytics_http_get)
bottle.route('/analytics/uves', 'GET', obj.uves_http_get)
bottle.route('/analytics/alarms', 'GET', obj.alarms_http_get)
bottle.route(
'/analytics/virtual-networks', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/virtual-machines', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/service-instances', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/service-chains', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/vrouters', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/bgp-routers', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/bgp-peers', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/xmpp-peers', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/collectors', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/generators', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/config-nodes', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/virtual-network/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/virtual-machine/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/service-instance/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/service-chain/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/vrouter/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/bgp-router/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/bgp-peer/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/xmpp-peer/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/collector/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/generator/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/config-node/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/query', 'POST', obj.query_process)
bottle.route('/analytics/query/<queryId>', 'GET', obj.query_status_get)
bottle.route('/analytics/query/<queryId>/chunk-final/<chunkId>',
'GET', obj.query_chunk_get)
bottle.route('/analytics/queries', 'GET', obj.show_queries)
bottle.route('/analytics/tables', 'GET', obj.tables_process)
bottle.route('/analytics/operation/database-purge',
'POST', obj.process_purge_request)
bottle.route('/analytics/operation/analytics-data-start-time',
'GET', obj._get_analytics_data_start_time)
bottle.route('/analytics/table/<table>', 'GET', obj.table_process)
bottle.route(
'/analytics/table/<table>/schema', 'GET', obj.table_schema_process)
for i in range(0, len(_TABLES)):
if len(_TABLES[i].columnvalues) > 0:
bottle.route('/analytics/table/<table>/column-values',
'GET', obj.column_values_process)
bottle.route('/analytics/table/<table>/column-values/<column>',
'GET', obj.column_process)
bottle.route('/analytics/send-tracebuffer/<source>/<module>/<instance_id>/<name>',
'GET', obj.send_trace_buffer)
bottle.route('/documentation/<filename:path>', 'GET',
obj.documentation_http_get)
for uve in UVE_MAP:
bottle.route(
'/analytics/uves/' + uve + 's', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/uves/' + uve + '/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/uves/' + uve, 'POST', obj.uve_http_post)
bottle.route(
'/analytics/alarms/' + uve + 's', 'GET', obj.alarm_list_http_get)
bottle.route(
'/analytics/alarms/' + uve + '/<name>', 'GET', obj.alarm_http_get)
bottle.route(
'/analytics/alarms/' + uve, 'POST', obj.alarm_http_post)
return obj
# end __new__
def disc_publish(self):
try:
import discoveryclient.client as client
except:
try:
# TODO: Try importing from the server. This should go away..
import discovery.client as client
except:
raise Exception('Could not get Discovery Client')
data = {
'ip-address': self._args.host_ip,
'port': self._args.rest_api_port,
}
self.disc = client.DiscoveryClient(
self._args.disc_server_ip,
self._args.disc_server_port,
ModuleNames[Module.OPSERVER])
self._logger.info("Disc Publish to %s : %d - %s"
% (self._args.disc_server_ip,
self._args.disc_server_port, str(data)))
self.disc.publish(ANALYTICS_API_SERVER_DISCOVERY_SERVICE_NAME, data)
# end
def __init__(self):
self._args = None
self._parse_args()
self._homepage_links = []
self._homepage_links.append(
LinkObject('documentation', '/documentation/index.html'))
self._homepage_links.append(LinkObject('analytics', '/analytics'))
super(OpServer, self).__init__()
module = Module.OPSERVER
self._moduleid = ModuleNames[module]
node_type = Module2NodeType[module]
self._node_type_name = NodeTypeNames[node_type]
if self._args.worker_id:
self._instance_id = self._args.worker_id
else:
self._instance_id = INSTANCE_ID_DEFAULT
self._hostname = socket.gethostname()
if self._args.dup:
self._hostname += 'dup'
opserver_sandesh_req_impl = OpserverSandeshReqImpl(self)
sandesh_global.init_generator(self._moduleid, self._hostname,
self._node_type_name, self._instance_id,
self._args.collectors, 'opserver_context',
int(self._args.http_server_port),
['opserver.sandesh'])
sandesh_global.set_logging_params(
enable_local_log=self._args.log_local,
category=self._args.log_category,
level=self._args.log_level,
file=self._args.log_file,
enable_syslog=self._args.use_syslog,
syslog_facility=self._args.syslog_facility)
ConnectionState.init(sandesh_global, self._hostname, self._moduleid,
self._instance_id,
staticmethod(ConnectionState.get_process_state_cb),
NodeStatusUVE, NodeStatus)
# Trace buffer list
self.trace_buf = [
{'name':'DiscoveryMsg', 'size':1000}
]
# Create trace buffers
for buf in self.trace_buf:
sandesh_global.trace_buffer_create(name=buf['name'], size=buf['size'])
self._logger = sandesh_global._logger
self._get_common = self._http_get_common
self._put_common = self._http_put_common
self._delete_common = self._http_delete_common
self._post_common = self._http_post_common
self._collector_pool = None
self._state_server = OpStateServer(self._logger, self._args.redis_password)
self._uve_server = UVEServer(('127.0.0.1',
self._args.redis_server_port),
self._logger,
self._args.redis_password)
self._LEVEL_LIST = []
for k in SandeshLevel._VALUES_TO_NAMES:
if (k < SandeshLevel.UT_START):
d = {}
d[k] = SandeshLevel._VALUES_TO_NAMES[k]
self._LEVEL_LIST.append(d)
self._CATEGORY_MAP =\
dict((ModuleNames[k], [CategoryNames[ce] for ce in v])
for k, v in ModuleCategoryMap.iteritems())
self.disc = None
if self._args.disc_server_ip:
self.disc_publish()
else:
self.redis_uve_list = []
try:
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
for redis_uve in self._args.redis_uve_list:
redis_ip_port = redis_uve.split(':')
redis_ip_port = (redis_ip_port[0], int(redis_ip_port[1]))
self.redis_uve_list.append(redis_ip_port)
except Exception as e:
self._logger.error('Failed to parse redis_uve_list: %s' % e)
else:
self._state_server.update_redis_list(self.redis_uve_list)
self._uve_server.update_redis_uve_list(self.redis_uve_list)
self._analytics_links = ['uves', 'alarms', 'tables', 'queries']
self._VIRTUAL_TABLES = copy.deepcopy(_TABLES)
for t in _OBJECT_TABLES:
obj = query_table(
name=t, display_name=_OBJECT_TABLES[t].objtable_display_name,
schema=_OBJECT_TABLE_SCHEMA,
columnvalues=_OBJECT_TABLE_COLUMN_VALUES)
self._VIRTUAL_TABLES.append(obj)
for t in _STAT_TABLES:
stat_id = t.stat_type + "." + t.stat_attr
scols = []
keyln = stat_query_column(name=STAT_SOURCE_FIELD, datatype='string', index=True)
scols.append(keyln)
tln = stat_query_column(name=STAT_TIME_FIELD, datatype='int', index=False)
scols.append(tln)
tcln = stat_query_column(name="CLASS(" + STAT_TIME_FIELD + ")",
datatype='int', index=False)
scols.append(tcln)
teln = stat_query_column(name=STAT_TIMEBIN_FIELD, datatype='int', index=False)
scols.append(teln)
tecln = stat_query_column(name="CLASS(" + STAT_TIMEBIN_FIELD+ ")",
datatype='int', index=False)
scols.append(tecln)
uln = stat_query_column(name=STAT_UUID_FIELD, datatype='uuid', index=False)
scols.append(uln)
cln = stat_query_column(name="COUNT(" + t.stat_attr + ")",
datatype='int', index=False)
scols.append(cln)
isname = False
for aln in t.attributes:
if aln.name==STAT_OBJECTID_FIELD:
isname = True
scols.append(aln)
if aln.datatype in ['int','double']:
sln = stat_query_column(name= "SUM(" + aln.name + ")",
datatype=aln.datatype, index=False)
scols.append(sln)
scln = stat_query_column(name= "CLASS(" + aln.name + ")",
datatype=aln.datatype, index=False)
scols.append(scln)
if not isname:
keyln = stat_query_column(name=STAT_OBJECTID_FIELD, datatype='string', index=True)
scols.append(keyln)
sch = query_schema_type(type='STAT', columns=scols)
stt = query_table(
name = STAT_VT_PREFIX + "." + stat_id,
display_name = t.display_name,
schema = sch,
columnvalues = [STAT_OBJECTID_FIELD, SOURCE])
self._VIRTUAL_TABLES.append(stt)
self._analytics_db = AnalyticsDb(self._logger,
self._args.cassandra_server_list,
self._args.redis_query_port,
self._args.redis_password)
bottle.route('/', 'GET', self.homepage_http_get)
bottle.route('/analytics', 'GET', self.analytics_http_get)
bottle.route('/analytics/uves', 'GET', self.uves_http_get)
bottle.route('/analytics/alarms', 'GET', self.alarms_http_get)
bottle.route(
'/analytics/virtual-networks', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/virtual-machines', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/service-instances', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/service-chains', 'GET', self.uve_list_http_get)
bottle.route('/analytics/vrouters', 'GET', self.uve_list_http_get)
bottle.route('/analytics/bgp-routers', 'GET', self.uve_list_http_get)
bottle.route('/analytics/collectors', 'GET', self.uve_list_http_get)
bottle.route('/analytics/generators', 'GET', self.uve_list_http_get)
bottle.route('/analytics/config-nodes', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/virtual-network/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/virtual-machine/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/service-instance/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/service-chain/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/vrouter/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/bgp-router/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/collector/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/generator/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/config-node/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/query', 'POST', self.query_process)
bottle.route(
'/analytics/query/<queryId>', 'GET', self.query_status_get)
bottle.route('/analytics/query/<queryId>/chunk-final/<chunkId>',
'GET', self.query_chunk_get)
bottle.route('/analytics/queries', 'GET', self.show_queries)
bottle.route('/analytics/tables', 'GET', self.tables_process)
bottle.route('/analytics/operation/database-purge',
'POST', self.process_purge_request)
bottle.route('/analytics/operation/analytics-data-start-time',
'GET', self._get_analytics_data_start_time)
bottle.route('/analytics/table/<table>', 'GET', self.table_process)
bottle.route('/analytics/table/<table>/schema',
'GET', self.table_schema_process)
for i in range(0, len(self._VIRTUAL_TABLES)):
if len(self._VIRTUAL_TABLES[i].columnvalues) > 0:
bottle.route('/analytics/table/<table>/column-values',
'GET', self.column_values_process)
bottle.route('/analytics/table/<table>/column-values/<column>',
'GET', self.column_process)
bottle.route('/analytics/send-tracebuffer/<source>/<module>/<instance_id>/<name>',
'GET', self.send_trace_buffer)
bottle.route('/documentation/<filename:path>',
'GET', self.documentation_http_get)
for uve in UVE_MAP:
bottle.route(
'/analytics/uves/' + uve + 's', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/uves/' + uve + '/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/uves/' + uve, 'POST', self.uve_http_post)
bottle.route(
'/analytics/alarms/' + uve + 's', 'GET', self.alarm_list_http_get)
bottle.route(
'/analytics/alarms/' + uve + '/<name>', 'GET', self.alarm_http_get)
bottle.route(
'/analytics/alarms/' + uve, 'POST', self.alarm_http_post)
# end __init__
def _parse_args(self, args_str=' '.join(sys.argv[1:])):
'''
Eg. python opserver.py --host_ip 127.0.0.1
--redis_server_port 6379
--redis_query_port 6379
--redis_password
--collectors 127.0.0.1:8086
--cassandra_server_list 127.0.0.1:9160
--http_server_port 8090
--rest_api_port 8081
--rest_api_ip 0.0.0.0
--log_local
--log_level SYS_DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--worker_id 0
--redis_uve_list 127.0.0.1:6379
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'host_ip' : "127.0.0.1",
'collectors' : ['127.0.0.1:8086'],
'cassandra_server_list' : ['127.0.0.1:9160'],
'http_server_port' : 8090,
'rest_api_port' : 8081,
'rest_api_ip' : '0.0.0.0',
'log_local' : False,
'log_level' : 'SYS_DEBUG',
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'dup' : False,
'redis_uve_list' : ['127.0.0.1:6379']
}
redis_opts = {
'redis_server_port' : 6379,
'redis_query_port' : 6379,
'redis_password' : None,
}
disc_opts = {
'disc_server_ip' : None,
'disc_server_port' : 5998,
}
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'REDIS' in config.sections():
redis_opts.update(dict(config.items('REDIS')))
if 'DISCOVERY' in config.sections():
disc_opts.update(dict(config.items('DISCOVERY')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
defaults.update(redis_opts)
defaults.update(disc_opts)
parser.set_defaults(**defaults)
parser.add_argument("--host_ip",
help="Host IP address")
parser.add_argument("--redis_server_port",
type=int,
help="Redis server port")
parser.add_argument("--redis_query_port",
type=int,
help="Redis query port")
parser.add_argument("--redis_password",
help="Redis server password")
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument("--http_server_port",
type=int,
help="HTTP server port")
parser.add_argument("--rest_api_port",
type=int,
help="REST API port")
parser.add_argument("--rest_api_ip",
help="REST API IP address")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--disc_server_ip",
help="Discovery Server IP address")
parser.add_argument("--disc_server_port",
type=int,
help="Discovery Server port")
parser.add_argument("--dup", action="store_true",
help="Internal use")
parser.add_argument("--redis_uve_list",
help="List of redis-uve in ip:port format. For internal use only",
nargs="+")
parser.add_argument(
"--worker_id",
help="Worker Id")
parser.add_argument("--cassandra_server_list",
help="List of cassandra_server_ip in ip:port format",
nargs="+")
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
if type(self._args.cassandra_server_list) is str:
self._args.cassandra_server_list = self._args.cassandra_server_list.split()
# end _parse_args
def get_args(self):
return self._args
# end get_args
def get_http_server_port(self):
return int(self._args.http_server_port)
# end get_http_server_port
def get_uve_server(self):
return self._uve_server
# end get_uve_server
def homepage_http_get(self):
json_body = {}
json_links = []
base_url = bottle.request.urlparts.scheme + \
'://' + bottle.request.urlparts.netloc
for link in self._homepage_links:
json_links.append(
{'link': obj_to_dict(
LinkObject(link.name, base_url + link.href))})
json_body = \
{"href": base_url,
"links": json_links
}
return json_body
# end homepage_http_get
def documentation_http_get(self, filename):
return bottle.static_file(
filename, root='/usr/share/doc/contrail-analytics-api/html')
# end documentation_http_get
def _http_get_common(self, request):
return (True, '')
# end _http_get_common
def _http_put_common(self, request, obj_dict):
return (True, '')
# end _http_put_common
def _http_delete_common(self, request, id):
return (True, '')
# end _http_delete_common
def _http_post_common(self, request, obj_dict):
return (True, '')
# end _http_post_common
@staticmethod
def _get_redis_query_ip_from_qid(qid):
try:
ip = qid.rsplit('-', 1)[1]
redis_ip = socket.inet_ntop(socket.AF_INET,
struct.pack('>I', int(ip, 16)))
except Exception as err:
return None
return redis_ip
# end _get_redis_query_ip_from_qid
def _query_status(self, request, qid):
resp = {}
redis_query_ip = OpServer._get_redis_query_ip_from_qid(qid)
if redis_query_ip is None:
return bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid query id')
try:
resp = redis_query_status(host=redis_query_ip,
port=int(self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid)
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] status : Connection Error' % (qid),
server_addrs = ['%s:%s' % (redis_query_ip, \
str(self._args.redis_query_port))])
return bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] status : Exception %s' % (qid, str(e)),
server_addrs = ['%s:%s' % (redis_query_ip, \
str(self._args.redis_query_port))])
self._logger.error("Exception: %s" % e)
return bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % e)
else:
if resp is None:
return bottle.HTTPError(_ERRORS[errno.ENOENT],
'Invalid query id or Abandoned query id')
resp_header = {'Content-Type': 'application/json'}
resp_code = 200
self._logger.debug("query [%s] status: %s" % (qid, resp))
return bottle.HTTPResponse(
json.dumps(resp), resp_code, resp_header)
# end _query_status
def _query_chunk(self, request, qid, chunk_id):
redis_query_ip = OpServer._get_redis_query_ip_from_qid(qid)
if redis_query_ip is None:
yield bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid query id')
try:
done = False
gen = redis_query_chunk(host=redis_query_ip,
port=int(self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid, chunk_id=chunk_id)
bottle.response.set_header('Content-Type', 'application/json')
while not done:
try:
yield gen.next()
except StopIteration:
done = True
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query [%s] chunk #%d : Connection Error' % \
(qid, chunk_id),
server_addrs = ['%s:%s' % (redis_query_ip, \
str(self._args.redis_query_port))])
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query [%s] chunk #%d : Exception %s' % \
(qid, chunk_id, str(e)),
server_addrs = ['%s:%s' % (redis_query_ip, \
str(self._args.redis_query_port))])
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.ENOENT], 'Error: %s' % e)
else:
self._logger.info(
"Query [%s] chunk #%d read at time %d"
% (qid, chunk_id, time.time()))
# end _query_chunk
def _query(self, request):
reply = {}
try:
redis_query_ip, = struct.unpack('>I', socket.inet_pton(
socket.AF_INET, self._args.host_ip))
qid = str(uuid.uuid1(redis_query_ip))
self._logger.info("Starting Query %s" % qid)
tabl = ""
for key, value in request.json.iteritems():
if key == "table":
tabl = value
self._logger.info("Table is " + tabl)
tabn = None
for i in range(0, len(self._VIRTUAL_TABLES)):
if self._VIRTUAL_TABLES[i].name == tabl:
tabn = i
if (tabn is not None):
tabtypes = {}
for cols in self._VIRTUAL_TABLES[tabn].schema.columns:
if cols.datatype in ['long', 'int']:
tabtypes[cols.name] = 'int'
elif cols.datatype in ['ipv4']:
tabtypes[cols.name] = 'ipv4'
else:
tabtypes[cols.name] = 'string'
self._logger.info(str(tabtypes))
if (tabn is None):
if not tabl.startswith("StatTable."):
reply = bottle.HTTPError(_ERRORS[errno.ENOENT],
'Table %s not found' % tabl)
yield reply
return
else:
self._logger.info("Schema not known for dynamic table %s" % tabl)
if tabl == OVERLAY_TO_UNDERLAY_FLOW_MAP:
overlay_to_underlay_map = OverlayToUnderlayMapper(
request.json, self._args.host_ip,
self._args.rest_api_port, self._logger)
try:
yield overlay_to_underlay_map.process_query()
except OverlayToUnderlayMapperError as e:
yield bottle.HTTPError(_ERRORS[errno.EIO], str(e))
return
prg = redis_query_start('127.0.0.1',
int(self._args.redis_query_port),
self._args.redis_password,
qid, request.json)
if prg is None:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] Query Engine not responding' % qid,
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
self._logger.error('QE Not Responding')
yield bottle.HTTPError(_ERRORS[errno.EBUSY],
'Query Engine is not responding')
return
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] Connection Error' % (qid),
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Query[%s] Exception: %s' % (qid, str(e)),
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Error: %s' % e)
else:
redish = None
if prg < 0:
cod = -prg
self._logger.error(
"Query Failed. Found Error %s" % errno.errorcode[cod])
reply = bottle.HTTPError(_ERRORS[cod], errno.errorcode[cod])
yield reply
else:
self._logger.info(
"Query Accepted at time %d , Progress %d"
% (time.time(), prg))
# In Async mode, we should return with "202 Accepted" here
# and also give back the status URI "/analytic/query/<qid>"
# OpServers's client will poll the status URI
if request.get_header('Expect') == '202-accepted' or\
request.get_header('Postman-Expect') == '202-accepted':
href = '/analytics/query/%s' % (qid)
resp_data = json.dumps({'href': href})
yield bottle.HTTPResponse(
resp_data, 202, {'Content-type': 'application/json'})
else:
for gen in self._sync_query(request, qid):
yield gen
# end _query
def _sync_query(self, request, qid):
# In Sync mode, Keep polling query status until final result is
# available
try:
self._logger.info("Polling %s for query result" % ("REPLY:" + qid))
prg = 0
done = False
while not done:
gevent.sleep(1)
resp = redis_query_status(host='127.0.0.1',
port=int(
self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid)
# We want to print progress only if it has changed
if int(resp["progress"]) == prg:
continue
self._logger.info(
"Query Progress is %s time %d" % (str(resp), time.time()))
prg = int(resp["progress"])
# Either there was an error, or the query is complete
if (prg < 0) or (prg == 100):
done = True
if prg < 0:
cod = -prg
self._logger.error("Found Error %s" % errno.errorcode[cod])
reply = bottle.HTTPError(_ERRORS[cod], errno.errorcode[cod])
yield reply
return
# In Sync mode, its time to read the final result. Status is in
# "resp"
done = False
gen = redis_query_result(host='127.0.0.1',
port=int(self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid)
bottle.response.set_header('Content-Type', 'application/json')
while not done:
try:
yield gen.next()
except StopIteration:
done = True
'''
final_res = {}
prg, final_res['value'] =\
redis_query_result_dict(host=self._args.redis_server_ip,
port=int(self._args.redis_query_port),
qid=qid)
yield json.dumps(final_res)
'''
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Sync Query[%s] Connection Error' % qid,
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Sync Query[%s] Exception: %s' % (qid, str(e)),
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Error: %s' % e)
else:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.UP,
message = None,
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
self._logger.info(
"Query Result available at time %d" % time.time())
return
# end _sync_query
def query_process(self):
self._post_common(bottle.request, None)
result = self._query(bottle.request)
return result
# end query_process
def query_status_get(self, queryId):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
return self._query_status(bottle.request, queryId)
# end query_status_get
def query_chunk_get(self, queryId, chunkId):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
return self._query_chunk(bottle.request, queryId, int(chunkId))
# end query_chunk_get
def show_queries(self):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
queries = {}
try:
redish = redis.StrictRedis(db=0, host='127.0.0.1',
port=int(self._args.redis_query_port),
password=self._args.redis_password)
pending_queries = redish.lrange('QUERYQ', 0, -1)
pending_queries_info = []
for query_id in pending_queries:
query_data = redis_query_info(redish, query_id)
pending_queries_info.append(query_data)
queries['pending_queries'] = pending_queries_info
processing_queries = redish.lrange(
'ENGINE:' + socket.gethostname(), 0, -1)
processing_queries_info = []
abandoned_queries_info = []
error_queries_info = []
for query_id in processing_queries:
status = redis_query_status(host='127.0.0.1',
port=int(
self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=query_id)
query_data = redis_query_info(redish, query_id)
if status is None:
abandoned_queries_info.append(query_data)
elif status['progress'] < 0:
query_data['error_code'] = status['progress']
error_queries_info.append(query_data)
else:
query_data['progress'] = status['progress']
processing_queries_info.append(query_data)
queries['queries_being_processed'] = processing_queries_info
queries['abandoned_queries'] = abandoned_queries_info
queries['error_queries'] = error_queries_info
except redis.exceptions.ConnectionError:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Show queries : Connection Error',
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
return bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as err:
# Update connection info
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'Query', status = ConnectionStatus.DOWN,
message = 'Show queries : Exception %s' % str(err),
server_addrs = ['127.0.0.1' + ':' +
str(self._args.redis_query_port)])
self._logger.error("Exception in show queries: %s" % str(err))
return bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % err)
else:
return json.dumps(queries)
# end show_queries
@staticmethod
def _get_tfilter(cfilt):
tfilter = {}
for tfilt in cfilt:
afilt = tfilt.split(':')
try:
attr_list = tfilter[afilt[0]]
except KeyError:
tfilter[afilt[0]] = set()
attr_list = tfilter[afilt[0]]
finally:
if len(afilt) > 1:
attr_list.add(afilt[1])
tfilter[afilt[0]] = attr_list
return tfilter
# end _get_tfilter
@staticmethod
def _uve_filter_set(req):
sfilter = None
mfilter = None
tfilter = None
kfilter = None
any_filter = False
if 'sfilt' in req.keys():
any_filter = True
sfilter = req.sfilt
if 'mfilt' in req.keys():
any_filter = True
mfilter = req.mfilt
if 'cfilt' in req.keys():
any_filter = True
infos = req.cfilt.split(',')
tfilter = OpServer._get_tfilter(infos)
if 'kfilt' in req.keys():
any_filter = True
kfilter = req.kfilt.split(',')
return any_filter, kfilter, sfilter, mfilter, tfilter
# end _uve_filter_set
@staticmethod
def _uve_http_post_filter_set(req):
try:
kfilter = req['kfilt']
if not isinstance(kfilter, list):
raise ValueError('Invalid kfilt')
except KeyError:
kfilter = ['*']
try:
sfilter = req['sfilt']
except KeyError:
sfilter = None
try:
mfilter = req['mfilt']
except KeyError:
mfilter = None
try:
cfilt = req['cfilt']
if not isinstance(cfilt, list):
raise ValueError('Invalid cfilt')
except KeyError:
tfilter = None
else:
tfilter = OpServer._get_tfilter(cfilt)
return True, kfilter, sfilter, mfilter, tfilter
# end _uve_http_post_filter_set
def _uve_alarm_http_post(self, is_alarm):
(ok, result) = self._post_common(bottle.request, None)
if not ok:
(code, msg) = result
abort(code, msg)
uve_type = bottle.request.url.rsplit('/', 1)[1]
try:
uve_tbl = UVE_MAP[uve_type]
except Exception as e:
yield bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid table name')
else:
try:
req = bottle.request.json
_, kfilter, sfilter, mfilter, tfilter = \
OpServer._uve_http_post_filter_set(req)
except Exception as err:
yield bottle.HTTPError(_ERRORS[errno.EBADMSG], err)
bottle.response.set_header('Content-Type', 'application/json')
yield u'{"value": ['
first = True
for key in kfilter:
if key.find('*') != -1:
uve_name = uve_tbl + ':*'
for gen in self._uve_server.multi_uve_get(uve_name, True,
kfilter, sfilter,
mfilter, tfilter,
is_alarm):
if first:
yield u'' + json.dumps(gen)
first = False
else:
yield u', ' + json.dumps(gen)
yield u']}'
return
first = True
for key in kfilter:
uve_name = uve_tbl + ':' + key
rsp = self._uve_server.get_uve(uve_name, True, sfilter,
mfilter, tfilter,
is_alarm=is_alarm)
if rsp != {}:
data = {'name': key, 'value': rsp}
if first:
yield u'' + json.dumps(data)
first = False
else:
yield u', ' + json.dumps(data)
yield u']}'
# end _uve_alarm_http_post
def uve_http_post(self):
return self._uve_alarm_http_post(is_alarm=False)
# end uve_http_post
def alarm_http_post(self):
return self._uve_alarm_http_post(is_alarm=True)
# end alarm_http_post
def _uve_alarm_http_get(self, name, is_alarm):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
uve_type = bottle.request.url.rsplit('/', 2)[1]
try:
uve_tbl = UVE_MAP[uve_type]
except Exception as e:
yield {}
else:
bottle.response.set_header('Content-Type', 'application/json')
uve_name = uve_tbl + ':' + name
req = bottle.request.query
flat = False
if 'flat' in req.keys():
flat = True
any_filter, kfilter, sfilter, mfilter, tfilter = \
OpServer._uve_filter_set(req)
if any_filter:
flat = True
uve_name = uve_tbl + ':' + name
if name.find('*') != -1:
flat = True
yield u'{"value": ['
first = True
for gen in self._uve_server.multi_uve_get(uve_name, flat,
kfilter, sfilter,
mfilter, tfilter,
is_alarm):
if first:
yield u'' + json.dumps(gen)
first = False
else:
yield u', ' + json.dumps(gen)
yield u']}'
else:
rsp = self._uve_server.get_uve(uve_name, flat, sfilter,
mfilter, tfilter,
is_alarm=is_alarm)
yield json.dumps(rsp)
# end _uve_alarm_http_get
def uve_http_get(self, name):
return self._uve_alarm_http_get(name, is_alarm=False)
# end uve_http_get
def alarm_http_get(self, name):
return self._uve_alarm_http_get(name, is_alarm=True)
# end alarm_http_get
def _uve_alarm_list_http_get(self, is_alarm):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
arg_line = bottle.request.url.rsplit('/', 1)[1]
uve_args = arg_line.split('?')
uve_type = uve_args[0][:-1]
if len(uve_args) != 1:
uve_filters = ''
filters = uve_args[1].split('&')
filters = \
[filt for filt in filters if filt[:len('kfilt')] != 'kfilt']
if len(filters):
uve_filters = '&'.join(filters)
else:
uve_filters = 'flat'
else:
uve_filters = 'flat'
try:
uve_tbl = UVE_MAP[uve_type]
except Exception as e:
return {}
else:
bottle.response.set_header('Content-Type', 'application/json')
req = bottle.request.query
_, kfilter, sfilter, mfilter, tfilter = \
OpServer._uve_filter_set(req)
uve_list = self._uve_server.get_uve_list(
uve_tbl, kfilter, sfilter, mfilter, tfilter, True, is_alarm)
uve_or_alarm = 'alarms' if is_alarm else 'uves'
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + \
'/analytics/%s/%s/' % (uve_or_alarm, uve_type)
uve_links =\
[obj_to_dict(LinkObject(uve,
base_url + uve + "?" + uve_filters))
for uve in uve_list]
return json.dumps(uve_links)
# end _uve_alarm_list_http_get
def uve_list_http_get(self):
return self._uve_alarm_list_http_get(is_alarm=False)
# end uve_list_http_get
def alarm_list_http_get(self):
return self._uve_alarm_list_http_get(is_alarm=True)
# end alarm_list_http_get
def analytics_http_get(self):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/'
analytics_links = [obj_to_dict(LinkObject(link, base_url + link))
for link in self._analytics_links]
return json.dumps(analytics_links)
# end analytics_http_get
def _uves_alarms_http_get(self, is_alarm):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
uve_or_alarm = 'alarms' if is_alarm else 'uves'
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/%s/' % (uve_or_alarm)
uvetype_links =\
[obj_to_dict(
LinkObject(uvetype + 's', base_url + uvetype + 's'))
for uvetype in UVE_MAP]
return json.dumps(uvetype_links)
# end _uves_alarms_http_get
def uves_http_get(self):
return self._uves_alarms_http_get(is_alarm=False)
# end uves_http_get
def alarms_http_get(self):
return self._uves_alarms_http_get(is_alarm=True)
# end alarms_http_get
def send_trace_buffer(self, source, module, instance_id, name):
response = {}
trace_req = SandeshTraceRequest(name)
if module not in ModuleIds:
response['status'] = 'fail'
response['error'] = 'Invalid module'
return json.dumps(response)
module_id = ModuleIds[module]
node_type = Module2NodeType[module_id]
node_type_name = NodeTypeNames[node_type]
if self._state_server.redis_publish(msg_type='send-tracebuffer',
destination=source + ':' +
node_type_name + ':' + module +
':' + instance_id,
msg=trace_req):
response['status'] = 'pass'
else:
response['status'] = 'fail'
response['error'] = 'No connection to Redis'
return json.dumps(response)
# end send_trace_buffer
def tables_process(self):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/table/'
json_links = []
for i in range(0, len(self._VIRTUAL_TABLES)):
link = LinkObject(self._VIRTUAL_TABLES[
i].name, base_url + self._VIRTUAL_TABLES[i].name)
tbl_info = obj_to_dict(link)
tbl_info['type'] = self._VIRTUAL_TABLES[i].schema.type
if (self._VIRTUAL_TABLES[i].display_name is not None):
tbl_info['display_name'] =\
self._VIRTUAL_TABLES[i].display_name
json_links.append(tbl_info)
return json.dumps(json_links)
# end tables_process
def process_purge_request(self):
self._post_common(bottle.request, None)
if ("application/json" not in bottle.request.headers['Content-Type']):
self._logger.error('Content-type is not JSON')
response = {
'status': 'failed', 'reason': 'Content-type is not JSON'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
analytics_start_time = self._analytics_db._get_analytics_start_time()
if (analytics_start_time == None):
self._logger.info("Failed to get the analytics start time")
response = {'status': 'failed',
'reason': 'Failed to get the analytics start time'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EIO],
{'Content-type': 'application/json'})
purge_input = None
if ("purge_input" in bottle.request.json.keys()):
value = bottle.request.json["purge_input"]
if (type(value) is int):
if ((value <= 100) and (value > 0)):
current_time = UTCTimestampUsec()
purge_input = analytics_start_time + (float((value)*
(float(current_time) - float(analytics_start_time))))/100
else:
response = {'status': 'failed',
'reason': 'Valid % range is [1, 100]'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
elif (type(value) is unicode):
try:
purge_input = OpServerUtils.convert_to_utc_timestamp_usec(value)
except:
response = {'status': 'failed',
'reason': 'Valid time formats are: \'%Y %b %d %H:%M:%S.%f\', '
'\'now\', \'now-h/m/s\', \'-/h/m/s\' in purge_input'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
else:
response = {'status': 'failed',
'reason': 'Valid purge_input format is % or time'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
else:
response = {'status': 'failed',
'reason': 'purge_input not specified'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
if (purge_input <= analytics_start_time):
response = {'status': 'failed',
'reason': 'purge input is less than analytics start time'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EIO],
{'Content-type': 'application/json'})
res = self._analytics_db.get_analytics_db_purge_status(
self._state_server._redis_list)
if (res == None):
purge_request_ip, = struct.unpack('>I', socket.inet_pton(
socket.AF_INET, self._args.host_ip))
purge_id = str(uuid.uuid1(purge_request_ip))
resp = self._analytics_db.set_analytics_db_purge_status(purge_id,
purge_input)
if (resp == None):
gevent.spawn(self.db_purge_operation, purge_input, purge_id)
response = {'status': 'started', 'purge_id': purge_id}
return bottle.HTTPResponse(json.dumps(response), 200,
{'Content-type': 'application/json'})
elif (resp['status'] == 'failed'):
return bottle.HTTPResponse(json.dumps(resp), _ERRORS[errno.EBUSY],
{'Content-type': 'application/json'})
elif (res['status'] == 'running'):
return bottle.HTTPResponse(json.dumps(res), 200,
{'Content-type': 'application/json'})
elif (res['status'] == 'failed'):
return bottle.HTTPResponse(json.dumps(res), _ERRORS[errno.EBUSY],
{'Content-type': 'application/json'})
# end process_purge_request
def db_purge_operation(self, purge_input, purge_id):
self._logger.info("purge_id %s START Purging!" % str(purge_id))
purge_stat = DatabasePurgeStats()
purge_stat.request_time = UTCTimestampUsec()
purge_info = DatabasePurgeInfo()
self._analytics_db.number_of_purge_requests += 1
purge_info.number_of_purge_requests = \
self._analytics_db.number_of_purge_requests
total_rows_deleted = self._analytics_db.db_purge(purge_input, purge_id)
self._analytics_db.delete_db_purge_status()
end_time = UTCTimestampUsec()
duration = end_time - purge_stat.request_time
purge_stat.purge_id = purge_id
if (total_rows_deleted < 0):
purge_stat.purge_status = PurgeStatusString[PurgeStatus.FAILURE]
self._logger.error("purge_id %s purging Failed" % str(purge_id))
else:
purge_stat.purge_status = PurgeStatusString[PurgeStatus.SUCCESS]
self._logger.info("purge_id %s purging DONE" % str(purge_id))
purge_stat.rows_deleted = total_rows_deleted
purge_stat.duration = duration
purge_info.name = self._hostname
purge_info.stats = [purge_stat]
purge_data = DatabasePurge(data=purge_info)
purge_data.send()
#end db_purge_operation
def _get_analytics_data_start_time(self):
analytics_start_time = self._analytics_db._get_analytics_start_time()
response = {'analytics_data_start_time': analytics_start_time}
return bottle.HTTPResponse(
json.dumps(response), 200, {'Content-type': 'application/json'})
# end _get_analytics_data_start_time
def table_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/table/' + table + '/'
json_links = []
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
link = LinkObject('schema', base_url + 'schema')
json_links.append(obj_to_dict(link))
if len(self._VIRTUAL_TABLES[i].columnvalues) > 0:
link = LinkObject(
'column-values', base_url + 'column-values')
json_links.append(obj_to_dict(link))
break
return json.dumps(json_links)
# end table_process
def table_schema_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
return json.dumps(self._VIRTUAL_TABLES[i].schema,
default=lambda obj: obj.__dict__)
return (json.dumps({}))
# end table_schema_process
def column_values_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + \
'/analytics/table/' + table + '/column-values/'
json_links = []
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
for col in self._VIRTUAL_TABLES[i].columnvalues:
link = LinkObject(col, base_url + col)
json_links.append(obj_to_dict(link))
break
return (json.dumps(json_links))
# end column_values_process
def generator_info(self, table, column):
if ((column == MODULE) or (column == SOURCE)):
sources = []
moduleids = []
for redis_uve in self.redis_uve_list:
redish = redis.StrictRedis(
db=1,
host=redis_uve[0],
port=redis_uve[1],
password=self._args.redis_password)
try:
for key in redish.smembers("NGENERATORS"):
source = key.split(':')[0]
module = key.split(':')[2]
if (sources.count(source) == 0):
sources.append(source)
if (moduleids.count(module) == 0):
moduleids.append(module)
except Exception as e:
self._logger.error('Exception: %s' % e)
if column == MODULE:
return moduleids
elif column == SOURCE:
return sources
elif (column == 'Category'):
return self._CATEGORY_MAP
elif (column == 'Level'):
return self._LEVEL_LIST
elif (column == STAT_OBJECTID_FIELD):
objtab = None
for t in _STAT_TABLES:
stat_table = STAT_VT_PREFIX + "." + \
t.stat_type + "." + t.stat_attr
if (table == stat_table):
objtab = t.obj_table
break
if (objtab != None) and (objtab != "None"):
#import pdb; pdb.set_trace()
return list(self._uve_server.get_uve_list(objtab,
None, None, None, None, False))
return []
# end generator_info
def column_process(self, table, column):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
if self._VIRTUAL_TABLES[i].columnvalues.count(column) > 0:
return (json.dumps(self.generator_info(table, column)))
return (json.dumps([]))
# end column_process
def start_uve_server(self):
self._uve_server.run()
#end start_uve_server
def start_webserver(self):
pipe_start_app = bottle.app()
try:
bottle.run(app=pipe_start_app, host=self._args.rest_api_ip,
port=self._args.rest_api_port, server='gevent')
except Exception as e:
self._logger.error("Exception: %s" % e)
sys.exit()
# end start_webserver
def cpu_info_logger(self):
opserver_cpu_info = CpuInfoData()
while True:
mod_cpu_info = ModuleCpuInfo()
mod_cpu_info.module_id = self._moduleid
mod_cpu_info.instance_id = self._instance_id
mod_cpu_info.cpu_info = opserver_cpu_info.get_cpu_info(
system=False)
mod_cpu_state = ModuleCpuState()
mod_cpu_state.name = self._hostname
# At some point, the following attributes will be deprecated in favor of cpu_info
mod_cpu_state.module_cpu_info = [mod_cpu_info]
mod_cpu_state.opserver_cpu_share = mod_cpu_info.cpu_info.cpu_share
mod_cpu_state.opserver_mem_virt =\
mod_cpu_info.cpu_info.meminfo.virt
opserver_cpu_state_trace = ModuleCpuStateTrace(data=mod_cpu_state)
opserver_cpu_state_trace.send()
aly_cpu_state = AnalyticsCpuState()
aly_cpu_state.name = self._hostname
aly_cpu_info = ProcessCpuInfo()
aly_cpu_info.module_id= self._moduleid
aly_cpu_info.inst_id = self._instance_id
aly_cpu_info.cpu_share = mod_cpu_info.cpu_info.cpu_share
aly_cpu_info.mem_virt = mod_cpu_info.cpu_info.meminfo.virt
aly_cpu_state.cpu_info = [aly_cpu_info]
aly_cpu_state_trace = AnalyticsCpuStateTrace(data=aly_cpu_state)
aly_cpu_state_trace.send()
gevent.sleep(60)
#end cpu_info_logger
def poll_collector_list(self):
'''
Analytics node may be brought up/down any time. For UVE aggregation,
Opserver needs to know the list of all Analytics nodes (redis-uves).
Presently, Discovery server supports only pull mechanism to get the
Publisher list. Periodically poll the Collector list [in lieu of
redi-uve nodes] from the discovery.
** Remove this code when the push mechanism to update the discovery clients
on the addition/deletion of Publisher nodes for a given service is
supported by the Discovery server.
'''
if self.disc:
while True:
self.redis_uve_list = []
try:
sub_obj = \
self.disc.subscribe(COLLECTOR_DISCOVERY_SERVICE_NAME, 0)
collectors = sub_obj.info
except Exception as e:
self._logger.error('Failed to get collector-list from ' \
'discovery server')
else:
if collectors:
disc_trace = CollectorTrace()
disc_trace.collectors = []
for collector in collectors:
self.redis_uve_list.append((collector['ip-address'],
self._args.redis_server_port))
disc_trace.collectors.append(collector['ip-address'])
disc_trace.trace_msg(name='DiscoveryMsg')
self._uve_server.update_redis_uve_list(self.redis_uve_list)
self._state_server.update_redis_list(self.redis_uve_list)
if self.redis_uve_list:
gevent.sleep(60)
else:
gevent.sleep(5)
# end poll_collector_list
def disc_cb(self, clist):
'''
Analytics node may be brought up/down any time. For UVE aggregation,
Opserver needs to know the list of all Analytics nodes (redis-uves).
Periodically poll the Collector list [in lieu of
redi-uve nodes] from the discovery.
'''
newlist = []
for elem in clist:
(ipaddr,port) = elem
newlist.append((ipaddr, self._args.redis_server_port))
self._uve_server.update_redis_uve_list(newlist)
self._state_server.update_redis_list(newlist)
def main():
opserver = OpServer()
gevs = [
gevent.spawn(opserver.start_webserver),
gevent.spawn(opserver.cpu_info_logger),
gevent.spawn(opserver.start_uve_server)]
if opserver.disc:
sp = ServicePoller(opserver._logger, CollectorTrace, opserver.disc, \
COLLECTOR_DISCOVERY_SERVICE_NAME, opserver.disc_cb)
sp.start()
gevs.append(sp)
gevent.joinall(gevs)
if __name__ == '__main__':
main()
| apache-2.0 | -6,446,021,524,382,836,000 | 40.362786 | 98 | 0.526124 | false |
borevitzlab/Gigavision | libs/IPCamera.py | 1 | 21632 | import datetime
import logging.config
import os
import re
import time
import numpy
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from xml.etree import ElementTree
from PIL import Image
from io import BytesIO
try:
logging.config.fileConfig("logging.ini")
except:
pass
exiv2_exists = False
try:
import pyexiv2
exiv2_exists = True
except Exception as e:
logging.debug("Couldnt import pyexiv2: {}".format(str(e)))
class IPCamera(object):
def __init__(self, identifier=None, config=None, **kwargs):
if not config:
config = dict()
self.config = config.copy()
self.return_parser = config.get("return_parser", "plaintext")
e = os.environ.get("RETURN_PARSER", None)
e = os.environ.get("CAMERA_RETURN_PARSER", e)
self.return_parser = e if e is not None else self.return_parser
self.logger = logging.getLogger(identifier)
self.identifier = identifier
self.camera_name = config.get("camera_name", identifier)
self.interval = int(config.get("interval", 300))
self.current_capture_time = datetime.datetime.now()
self._image = None
self._notified = []
format_str = config.get("format_url", "http://{HTTP_login}@{ip}{command}")
e = os.environ.get("FORMAT_URL", None)
e = os.environ.get("CAMERA_FORMAT_URL", e)
format_str = e if e is not None else format_str
self.auth_type = config.get("auth_type", "basic")
e = os.environ.get("AUTH_TYPE", None)
e = os.environ.get("CAMERA_AUTH_TYPE", e)
self.auth_type = e if e is not None else self.auth_type
self.auth_object = None
username = config.get("username", "admin")
e = os.environ.get("AUTH_USERNAME", None)
e = os.environ.get("CAMERA_AUTH_USERNAME", e)
username = e if e is not None else username
password = config.get("password", "admin")
e = os.environ.get("AUTH_PASSWORD", None)
e = os.environ.get("CAMERA_AUTH_PASSWORD", e)
username = e if e is not None else username
if format_str.startswith("http://{HTTP_login}@"):
format_str = format_str.replace("{HTTP_login}@", "")
self.auth_object = HTTPBasicAuth(username, password)
self.auth_object_digest = HTTPDigestAuth(username, password)
self.auth_object = self.auth_object_digest if self.auth_type == "digest" else self.auth_object
self._HTTP_login = config.get("HTTP_login", "{user}:{password}").format(
user=username,
password=password)
ip = config.get("ip", "192.168.1.101:81")
ip = os.environ.get("IP", ip)
ip = os.environ.get("CAMERA_IP", ip)
self._url = format_str.format(
ip=ip,
HTTP_login=self._HTTP_login,
command="{command}")
)
self._image_size = config.get("image_size", [1920, 1080])
self._image_size = os.environ.get("CAMERA_IMAGE_SIZE", self._image_size)
if type(self._image_size) is str:
self._image_size = re.split("[\W+|\||,|x|x|:]", self._image_size)
self._image_size = [ int(float(x)) for x in self._image_size ]
self.image_quality = config.get("image_quality", 100)
self.image_quality = os.environ.get("CAMERA_IMAGE_QUALITY", self.image_quality)
# no autofocus modes by default.
self._autofocus_modes = config.get("autofocus_modes", [])
self._hfov_list = config.get("horizontal_fov_list",
[71.664, 58.269, 47.670, 40.981, 33.177, 25.246, 18.126, 12.782, 9.217, 7.050,
5.82])
self._vfov_list = config.get("vertical_fov_list",
[39.469, 33.601, 26.508, 22.227, 16.750, 13.002, 10.324, 7.7136, 4.787, 3.729,
2.448])
self._hfov = self._vfov = None
self._zoom_list = config.get("zoom_list", [50, 150, 250, 350, 450, 550, 650, 750, 850, 950, 1000])
self._focus_range = config.get("focus_range", [1, 99999])
# set commands from the rest of the config.
self.command_urls = config.get('urls', {})
self.return_keys = config.get("keys", {})
self.logger.info(self.status)
def _make_request(self, command_string, *args, **kwargs):
"""
Makes a generic request formatting the command string and applying the authentication.
:param command_string: command string like read stream raw
:type command_string: str
:param args:
:param kwargs:
:return:
"""
url = self._url.format(*args, command=command_string, **kwargs)
if "&" in url and "?" not in url:
url = url.replace("&", "?", 1)
response = None
try:
response = requests.get(url, timeout=60, auth=self.auth_object)
if response.status_code == 401:
self.logger.debug("Auth is not basic, trying digest")
response = requests.get(url, timeout=60, auth=self.auth_object_digest)
if response.status_code not in [200, 204]:
self.logger.error(
"[{}] - {}\n{}".format(str(response.status_code), str(response.reason), str(response.url)))
return
return response
except Exception as e:
self.logger.error("Some exception got raised {}".format(str(e)))
return
def _read_stream(self, command_string, *args, **kwargs):
"""
opens a url with the current HTTP_login string
:type command_string: str
:param command_string: url to go to with parameters
:return: string of data returned from the camera
"""
response = self._make_request(command_string, *args, **kwargs)
if response is None:
return
return response.text
def _read_stream_raw(self, command_string, *args, **kwargs):
"""
opens a url with the current HTTP_login string
:param command_string: url to go to with parameters
:type command_string: str
:return: string of data returned from the camera
"""
response = self._make_request(command_string, *args, **kwargs)
if response is None:
return
return response.content
def _get_cmd(self, cmd):
cmd_str = self.command_urls.get(cmd, None)
if not cmd_str and cmd_str not in self._notified:
print("No command available for \"{}\"".format(cmd))
self._notified.append(cmd_str)
return None, None
keys = self.return_keys.get(cmd, [])
if type(keys) not in (list, tuple):
keys = [keys]
return cmd_str, keys
@staticmethod
def get_value_from_xml(message_xml, *args):
"""
gets float, int or string values from a xml string where the key is the tag of the first element with value as
text.
:param message_xml: the xml to searach in.
:param args: list of keys to find values for.
:rtype: dict
:return: dict of arg: value pairs requested
"""
return_values = dict()
if not len(args):
return return_values
if not len(message_xml):
return return_values
# apparently, there is an issue parsing when the ptz returns INVALID XML (WTF?)
# these seem to be the tags that get mutilated.
illegal = ['\n', '\t', '\r',
"<CPStatusMsg>", "</CPStatusMsg>", "<Text>",
"</Text>", "<Type>Info</Type>", "<Type>Info",
"Info</Type>", "</Type>", "<Type>"]
for ill in illegal:
message_xml = message_xml.replace(ill, "")
root_element = ElementTree.Element("invalidation_tag")
try:
root_element = ElementTree.fromstring(message_xml)
except Exception as e:
print(str(e))
print("Couldnt parse XML!!!")
print(message_xml)
return_values = dict
for key in args:
target_ele = root_element.find(key)
if target_ele is None:
continue
value = target_ele.text.replace(' ', '')
if value is None:
continue
types = [float, int, str]
for t in types:
try:
return_values[key] = t(value)
break
except ValueError:
pass
else:
print("Couldnt cast an xml element text attribute to str. What are you feeding the xml parser?")
return return_values
@staticmethod
def get_value_from_plaintext(message, *args):
"""
gets float, int or string values from a xml string where the key is the tag of the first element with value as
text.
:param message:
:param args: list of keys to find values for.
:rtype: dict
:return: dict of arg: value pairs requested
"""
return_values = dict()
if not len(args):
return return_values
if not len(message):
return return_values
for line in message.split("\n"):
line = line.replace("= ", "=").replace(" =", "=").strip()
name, value = line.partition("=")[::2]
name, value = name.strip(), value.strip()
types = [float, int, str]
if name in args:
for t in types:
try:
v = t(value)
if str(v).lower() in ['yes', 'no', 'true', 'false', 'on', 'off']:
v = str(v).lower() in ['yes', 'true', 'on']
return_values[name] = v
break
except ValueError:
pass
else:
print("Couldnt cast an plaintext element text attribute to str. What are you feeding the parser?")
return return_values
def get_value_from_stream(self, stream, *keys):
"""
Gets a value from some text data (xml or plaintext = separated values)
returns a dict of "key":value pairs.
:param stream: text data to search for values
:type stream: str
:param keys:
:type keys: list
:return: dict of values
:rtype: dict
"""
if self.return_parser == 'plaintext':
return self.get_value_from_plaintext(stream, *keys)
elif self.return_parser == 'xml':
return self.get_value_from_xml(stream, *keys)
else:
return dict()
def encode_write_image(self, img: Image, fn: str) -> list:
"""
takes an image from PIL and writes it to disk as a tif and jpg
converts from rgb to bgr for cv2 so that the images save correctly
also tries to add exif data to the images
:param PIL.Image img: 3 dimensional image array, x,y,rgb
:param str fn: filename
:return: files successfully written.
:rtype: list(str)
"""
# output types must be valid!
fnp = os.path.splitext(fn)[0]
successes = list()
output_types = ["jpg", "tiff"]
e = os.environ.get("OUTPUT_TYPES", None)
if e is not None:
output_types = re.split("[\W+|\||,|:]", e)
for ext in output_types:
fn = "{}.{}".format(fnp, ext)
s = False
try:
if ext in ("tiff", "tif"):
if fn.endswith(".tiff"):
fn = fn[:-1]
img.save(fn, format="TIFF", compression='tiff_lzw')
if ext in ("jpeg", "jpg"):
img.save(fn, format="JPEG", quality=95, optimize=True, progressive=True, subsampling="4:4:4")
else:
img.save(fn)
s = True
except Exception as e:
self.logger.error("Couldnt write image")
self.logger.error(e)
# im = Image.fromarray(np.uint8(img))
# s = cv2.imwrite(fn, img)
if s:
successes.append(fn)
try:
# set exif data
if exiv2_exists:
meta = pyexiv2.ImageMetadata(fn)
meta.read()
for k, v in self.exif.items():
try:
meta[k] = v
except:
pass
meta.write()
except Exception as e:
self.logger.debug("Couldnt write the appropriate metadata: {}".format(str(e)))
return successes
def capture_image(self, filename=None) -> numpy.array:
"""
Captures an image with the IP camera, uses requests.get to acqire the image.
:param filename: filename without extension to capture to.
:return: list of filenames (of captured images) if filename was specified, otherwise a numpy array of the image.
:rtype: numpy.array or list
"""
st = time.time()
cmd, keys = self._get_cmd("get_image")
if "{width}" in cmd and "{height}" in cmd:
cmd = cmd.format(width=self._image_size[0], height=self.image_size[1])
if not cmd:
self.logger.error("No capture command, this is wrong...")
return self._image
url = self._url.format(command=cmd)
for x in range(10):
try:
# fast method
a = self._read_stream_raw(cmd)
# b = numpy.fromstring(a, numpy.uint8)
self._image = Image.open(BytesIO(a))
if filename:
rfiles = self.encode_write_image(self._image, filename)
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
return rfiles
else:
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
break
except Exception as e:
self.logger.error("Capture from network camera failed {}".format(str(e)))
time.sleep(0.2)
else:
self.logger.error("All capture attempts (10) for network camera failed.")
return self._image
# def set_fov_from_zoom(self):
# self._hfov = numpy.interp(self._zoom_position, self.zoom_list, self.hfov_list)
# self._vfov = numpy.interp(self._zoom_position, self.zoom_list, self.vfov_list)
@property
def image_quality(self) -> float:
"""
Image quality as a percentage.
:getter: cached.
:setter: to camera.
:rtype: float
"""
return self._image_quality
@image_quality.setter
def image_quality(self, value: float):
assert (1 <= value <= 100)
cmd, keys = self._get_cmd("get_image_quality")
if cmd:
self._read_stream(cmd.format(value))
@property
def image_size(self) -> list:
"""
Image resolution in pixels, tuple of (width, height)
:getter: from camera.
:setter: to camera.
:rtype: tuple
"""
cmd, keys = self._get_cmd("get_image_size")
if cmd:
stream = self._read_stream(cmd)
output = self.get_value_from_stream(stream, keys)
width,height = self._image_size
for k,v in output.items():
if "width" in k:
width = v
if "height" in k:
height = v
self._image_size = [width, height]
return self._image_size
@image_size.setter
def image_size(self, value):
assert type(value) in (list, tuple), "image size is not a list or tuple!"
assert len(value) == 2, "image size doesnt have 2 elements width,height are required"
value = list(value)
cmd, keys = self._get_cmd("set_image_size")
if cmd:
self._read_stream(cmd.format(width=value[0], height=value[1]))
self._image_size = value
@property
def focus_mode(self) -> str:
"""
TODO: this is broken, returns the dict of key: value not value
Focus Mode
When setting, the mode provided must be in 'focus_modes'
:getter: from camera.
:setter: to camera.
:rtype: list
"""
cmd, keys = self._get_cmd("get_focus_mode")
if not cmd:
return None
stream_output = self._read_stream(cmd)
return self.get_value_from_stream(stream_output, keys)['mode']
@focus_mode.setter
def focus_mode(self, mode: str):
assert (self._autofocus_modes is not None)
if str(mode).upper() not in [x.upper() for x in self._autofocus_modes]:
print("Focus mode not in list of supported focus modes, not setting.")
return
cmd, keys = self._get_cmd("set_focus_mode")
if cmd:
self._read_stream(cmd.format(mode=mode))
@property
def focus_position(self):
"""
Focal position as an absolute value.
:getter: from camera.
:setter: to camera.
:rtype: float
"""
cmd, keys = self._get_cmd("get_focus")
if not cmd:
return None
stream_output = self._read_stream(cmd)
result = self.get_value_from_stream(stream_output, keys)
return next(iter(result), float(99999))
@focus_position.setter
def focus_position(self, absolute_position):
self.logger.debug("Setting focus position to {}".format(absolute_position))
cmd, key = self._get_cmd("set_focus")
if not cmd:
assert (self._focus_range is not None and absolute_position is not None)
absolute_position = min(self._focus_range[1], max(self._focus_range[0], absolute_position))
assert (self._focus_range[0] <= absolute_position <= self._focus_range[1])
self._read_stream(cmd.format(focus=absolute_position))
def focus(self):
"""
focuses the camera by cycling it through its autofocus modes.
"""
self.logger.debug("Focusing...")
tempfocus = self.focus_mode
cmd, key = self._get_cmd("set_autofocus_mode")
if not cmd or len(self._autofocus_modes) < 1:
return
for mode in self._autofocus_modes:
self.focus_mode = mode
time.sleep(2)
self.focus_mode = tempfocus
self._read_stream(cmd.format(mode=self._autofocus_modes[0]))
time.sleep(2)
self.logger.debug("Focus complete.")
@property
def focus_range(self):
"""
Information about the focus of the camera
:return: focus type, focus max, focus min
:rtype: list [str, float, float]
"""
cmd, keys = self._get_cmd("get_focus_range")
if not cmd:
return None
stream_output = self._read_stream(cmd)
values = self.get_value_from_stream(stream_output, keys)
return values[2:0:-1]
@property
def hfov_list(self):
"""
List of horizontal FoV values according to focus list.
:getter: cached.
:setter: cache.
:rrtype: list(float)
"""
return self._hfov_list
@hfov_list.setter
def hfov_list(self, value):
assert type(value) in (list, tuple), "must be either list or tuple"
# assert len(value) == len(self._zoom_list), "must be the same length as zoom list"
self._hfov_list = list(value)
@property
def vfov_list(self):
"""
List of vertical FoV values according to focus list.
:getter: cached.
:setter: cache.
:rrtype: list(float)
"""
return self._vfov_list
@vfov_list.setter
def vfov_list(self, value):
assert type(value) in (list, tuple), "must be either list or tuple"
# assert len(value) == len(self._zoom_list), "must be the same length as zoom list"
self._vfov_list = list(value)
@property
def hfov(self):
"""
Horizontal FoV
:getter: calculated using cached zoom_position, zoom_list and hfov_list.
:setter: cache.
:rrtype: list(float)
"""
# self._hfov = numpy.interp(self._zoom_position, self.zoom_list, self.hfov_list)
return self._hfov
@hfov.setter
def hfov(self, value: float):
self._hfov = value
@property
def vfov(self):
"""
Vertical FoV
:getter: calculated using cached zoom_position, zoom_list and vfov_list.
:setter: cache.
:rrtype: list(float)
"""
# self._vfov = numpy.interp(self._zoom_position, self.zoom_list, self.vfov_list)
return self._vfov
@vfov.setter
def vfov(self, value: float):
self._vfov = value
@property
def status(self) -> str:
"""
Helper property for a string of the current zoom/focus status.
:return: informative string of zoom_pos zoom_range focus_pos focus_range
:rtype: str
"""
# fmt_string = "zoom_pos:\t{}\nzoom_range:\t{}"
fmt_string = "".join(("\nfocus_pos:\t{}\nfocus_range:\t{}"))
return fmt_string.format(self.focus_position, self.focus_range)
| mit | 2,764,234,713,448,507,000 | 34.873964 | 120 | 0.546644 | false |
emijrp/pywikibot-core | tests/namespace_tests.py | 1 | 11867 | # -*- coding: utf-8 -*-
"""Tests for the Namespace class."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
from collections import Iterable
from pywikibot.site import Namespace
from tests.aspects import unittest, TestCase, AutoDeprecationTestCase
import sys
if sys.version_info[0] > 2:
basestring = (str, )
unicode = str
# Default namespaces which should work in any MW wiki
_base_builtin_ns = {
'Media': -2,
'Special': -1,
'': 0,
'Talk': 1,
'User': 2,
'User talk': 3,
'Project': 4,
'Project talk': 5,
'MediaWiki': 8,
'MediaWiki talk': 9,
'Template': 10,
'Template talk': 11,
'Help': 12,
'Help talk': 13,
'Category': 14,
'Category talk': 15,
}
image_builtin_ns = dict(_base_builtin_ns)
image_builtin_ns['Image'] = 6
image_builtin_ns['Image talk'] = 7
file_builtin_ns = dict(_base_builtin_ns)
file_builtin_ns['File'] = 6
file_builtin_ns['File talk'] = 7
builtin_ns = dict(list(image_builtin_ns.items()) + list(file_builtin_ns.items()))
class TestNamespaceObject(TestCase):
"""Test cases for Namespace class."""
net = False
def testNamespaceTypes(self):
"""Test cases for methods manipulating namespace names."""
ns = Namespace.builtin_namespaces(use_image_name=False)
self.assertIsInstance(ns, dict)
self.assertTrue(all(x in ns for x in range(0, 16)))
self.assertTrue(all(isinstance(key, int)
for key in ns))
self.assertTrue(all(isinstance(val, Iterable)
for val in ns.values()))
self.assertTrue(all(isinstance(name, basestring)
for val in ns.values()
for name in val))
# Use a namespace object as a dict key
self.assertEqual(ns[ns[6]], ns[6])
def testNamespaceConstructor(self):
kwargs = {u'case': u'first-letter'}
y = Namespace(id=6, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'], **kwargs)
self.assertEqual(y.id, 6)
self.assertEqual(y.custom_name, u'dummy')
self.assertEqual(y.canonical_name, u'File')
self.assertNotEqual(y.custom_name, u'Dummy')
self.assertNotEqual(y.canonical_name, u'file')
self.assertIn(u'Image', y.aliases)
self.assertIn(u'Immagine', y.aliases)
self.assertEqual(len(y), 4)
self.assertEqual(list(y), ['dummy', u'File', u'Image', u'Immagine'])
self.assertEqual(y.case, u'first-letter')
def testNamespaceNameCase(self):
"""Namespace names are always case-insensitive."""
kwargs = {u'case': u'first-letter'}
y = Namespace(id=6, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'], **kwargs)
self.assertIn(u'dummy', y)
self.assertIn(u'Dummy', y)
self.assertIn(u'file', y)
self.assertIn(u'File', y)
self.assertIn(u'image', y)
self.assertIn(u'Image', y)
self.assertIn(u'immagine', y)
self.assertIn(u'Immagine', y)
def testNamespaceToString(self):
ns = Namespace.builtin_namespaces(use_image_name=False)
self.assertEqual(str(ns[0]), ':')
self.assertEqual(str(ns[1]), 'Talk:')
self.assertEqual(str(ns[6]), ':File:')
self.assertEqual(unicode(ns[0]), u':')
self.assertEqual(unicode(ns[1]), u'Talk:')
self.assertEqual(unicode(ns[6]), u':File:')
kwargs = {u'case': u'first-letter'}
y = Namespace(id=6, custom_name=u'ملف', canonical_name=u'File',
aliases=[u'Image', u'Immagine'], **kwargs)
self.assertEqual(str(y), ':File:')
if sys.version_info[0] <= 2:
self.assertEqual(unicode(y), u':ملف:')
self.assertEqual(y.canonical_prefix(), ':File:')
self.assertEqual(y.custom_prefix(), u':ملف:')
def testNamespaceCompare(self):
a = Namespace(id=0, canonical_name=u'')
self.assertEqual(a, 0)
self.assertEqual(a, '')
self.assertNotEqual(a, None)
x = Namespace(id=6, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'])
y = Namespace(id=6, custom_name=u'ملف', canonical_name=u'File',
aliases=[u'Image', u'Immagine'])
z = Namespace(id=7, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'])
self.assertEqual(x, x)
self.assertEqual(x, y)
self.assertNotEqual(x, a)
self.assertNotEqual(x, z)
self.assertEqual(x, 6)
self.assertEqual(x, u'dummy')
self.assertEqual(x, u'Dummy')
self.assertEqual(x, u'file')
self.assertEqual(x, u'File')
self.assertEqual(x, u':File')
self.assertEqual(x, u':File:')
self.assertEqual(x, u'File:')
self.assertEqual(x, u'image')
self.assertEqual(x, u'Image')
self.assertEqual(y, u'ملف')
self.assertLess(a, x)
self.assertGreater(x, a)
self.assertGreater(z, x)
self.assertIn(6, [x, y, z])
self.assertNotIn(8, [x, y, z])
def testNamespaceNormalizeName(self):
"""Test Namespace.normalize_name."""
self.assertEqual(Namespace.normalize_name(u'File'), u'File')
self.assertEqual(Namespace.normalize_name(u':File'), u'File')
self.assertEqual(Namespace.normalize_name(u'File:'), u'File')
self.assertEqual(Namespace.normalize_name(u':File:'), u'File')
self.assertEqual(Namespace.normalize_name(u''), u'')
self.assertEqual(Namespace.normalize_name(u':'), False)
self.assertEqual(Namespace.normalize_name(u'::'), False)
self.assertEqual(Namespace.normalize_name(u':::'), False)
self.assertEqual(Namespace.normalize_name(u':File::'), False)
self.assertEqual(Namespace.normalize_name(u'::File:'), False)
self.assertEqual(Namespace.normalize_name(u'::File::'), False)
def test_repr(self):
"""Test Namespace.__repr__."""
a = Namespace(id=0, canonical_name=u'Foo')
s = repr(a)
r = "Namespace(id=0, custom_name=%r, canonical_name=%r, aliases=[])" \
% (unicode('Foo'), unicode('Foo'))
self.assertEqual(s, r)
a.defaultcontentmodel = 'bar'
s = repr(a)
r = "Namespace(id=0, custom_name=%r, canonical_name=%r, aliases=[], defaultcontentmodel=%r)" \
% (unicode('Foo'), unicode('Foo'), unicode('bar'))
self.assertEqual(s, r)
a.case = 'upper'
s = repr(a)
r = "Namespace(id=0, custom_name=%r, canonical_name=%r, aliases=[], case=%r, defaultcontentmodel=%r)" \
% (unicode('Foo'), unicode('Foo'), unicode('upper'), unicode('bar'))
self.assertEqual(s, r)
b = eval(repr(a))
self.assertEqual(a, b)
class TestNamespaceDictDeprecated(AutoDeprecationTestCase):
"""Test static/classmethods in Namespace replaced by NamespacesDict."""
net = False
def test_resolve(self):
"""Test Namespace.resolve."""
namespaces = Namespace.builtin_namespaces(use_image_name=False)
main_ns = namespaces[0]
file_ns = namespaces[6]
special_ns = namespaces[-1]
self.assertEqual(Namespace.resolve([6]), [file_ns])
self.assertEqual(Namespace.resolve(['File']), [file_ns])
self.assertEqual(Namespace.resolve(['6']), [file_ns])
self.assertEqual(Namespace.resolve([file_ns]), [file_ns])
self.assertEqual(Namespace.resolve([file_ns, special_ns]),
[file_ns, special_ns])
self.assertEqual(Namespace.resolve([file_ns, file_ns]),
[file_ns, file_ns])
self.assertEqual(Namespace.resolve(6), [file_ns])
self.assertEqual(Namespace.resolve('File'), [file_ns])
self.assertEqual(Namespace.resolve('6'), [file_ns])
self.assertEqual(Namespace.resolve(file_ns), [file_ns])
self.assertEqual(Namespace.resolve(0), [main_ns])
self.assertEqual(Namespace.resolve('0'), [main_ns])
self.assertEqual(Namespace.resolve(-1), [special_ns])
self.assertEqual(Namespace.resolve('-1'), [special_ns])
self.assertEqual(Namespace.resolve('File:'), [file_ns])
self.assertEqual(Namespace.resolve(':File'), [file_ns])
self.assertEqual(Namespace.resolve(':File:'), [file_ns])
self.assertEqual(Namespace.resolve('Image:'), [file_ns])
self.assertEqual(Namespace.resolve(':Image'), [file_ns])
self.assertEqual(Namespace.resolve(':Image:'), [file_ns])
self.assertRaises(TypeError, Namespace.resolve, [True])
self.assertRaises(TypeError, Namespace.resolve, [False])
self.assertRaises(TypeError, Namespace.resolve, [None])
self.assertRaises(TypeError, Namespace.resolve, True)
self.assertRaises(TypeError, Namespace.resolve, False)
self.assertRaises(TypeError, Namespace.resolve, None)
self.assertRaises(KeyError, Namespace.resolve, -10)
self.assertRaises(KeyError, Namespace.resolve, '-10')
self.assertRaises(KeyError, Namespace.resolve, 'foo')
self.assertRaises(KeyError, Namespace.resolve, ['foo'])
self.assertRaisesRegex(KeyError,
r'Namespace identifier\(s\) not recognised: -10',
Namespace.resolve, [-10, 0])
self.assertRaisesRegex(KeyError,
r'Namespace identifier\(s\) not recognised: foo',
Namespace.resolve, [0, 'foo'])
self.assertRaisesRegex(KeyError,
r'Namespace identifier\(s\) not recognised: -10,-11',
Namespace.resolve, [-10, 0, -11])
def test_lookup_name(self):
"""Test Namespace.lookup_name."""
file_nses = Namespace.builtin_namespaces(use_image_name=False)
image_nses = Namespace.builtin_namespaces(use_image_name=True)
for name, ns_id in builtin_ns.items():
file_ns = Namespace.lookup_name(name, file_nses)
self.assertIsInstance(file_ns, Namespace)
image_ns = Namespace.lookup_name(name, image_nses)
self.assertIsInstance(image_ns, Namespace)
with self.disable_assert_capture():
self.assertEqual(file_ns.id, ns_id)
self.assertEqual(image_ns.id, ns_id)
class TestNamespaceCollections(TestCase):
"""Test how Namespace interact when in collections."""
net = False
def test_set(self):
"""Test converting sequence of Namespace to a set."""
namespaces = Namespace.builtin_namespaces(use_image_name=False)
self.assertTrue(all(isinstance(x, int) for x in namespaces))
self.assertTrue(all(isinstance(x, int) for x in namespaces.keys()))
self.assertTrue(all(isinstance(x, Namespace)
for x in namespaces.values()))
namespaces_set = set(namespaces)
self.assertEqual(len(namespaces), len(namespaces_set))
self.assertTrue(all(isinstance(x, int) for x in namespaces_set))
def test_set_minus(self):
"""Test performing set minus operation on set of Namespace objects."""
namespaces = Namespace.builtin_namespaces(use_image_name=False)
excluded_namespaces = set([-1, -2])
positive_namespaces = set(namespaces) - excluded_namespaces
self.assertEqual(len(namespaces),
len(positive_namespaces) + len(excluded_namespaces))
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| mit | 8,047,064,952,597,964,000 | 35.922118 | 111 | 0.599477 | false |
veloutin/papas | apmanager/accesspoints/cmdurls.py | 1 | 1647 | # PAPAS Access Point Administration System
# Copyright (c) 2010 Revolution Linux inc. <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
urlpatterns = patterns ('apmanager.accesspoints',
(r'new/$', 'views.apcommands.create_new_command' ),
(r'new/ap/(?P<ap_id>\d+)/cmd/(?P<command_id>\d+)/$', 'views.apcommands.edit_new_command' , {'group_id':None} ),
(r'new/cmd/(?P<command_id>\d+)/ap/(?P<ap_id>\d+)/$', 'views.apcommands.edit_new_command' , {'group_id':None} ),
(r'new/group/(?P<group_id>\d+)/cmd/(?P<command_id>\d+)/$', 'views.apcommands.edit_new_command' , {'ap_id':None} ),
(r'new/cmd/(?P<command_id>\d+)/group/(?P<group_id>\d+)/$', 'views.apcommands.edit_new_command' , {'ap_id':None} ),
(r'view/(?P<command_id>\d+)/$','views.apcommands.view_command'),
(r'viewexec/(?P<exec_id>\d+)/$','views.apcommands.view_exec'),
(r'all/?$', 'views.apcommands.view_home', dict(view_all=True) ),
(r'$', 'views.apcommands.view_home' ),
)
| agpl-3.0 | -6,984,247,115,467,998,000 | 42.342105 | 118 | 0.679417 | false |
MoroGasper/client | client/plugins/hoster/http.py | 1 | 26525 | # -*- coding: utf-8 -*-
"""Copyright (C) 2013 COLDWELL AG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import gevent
from gevent.lock import Semaphore
from contextlib import closing
import base64
from ... import core, hoster, account as account_module, event, logger
from ...scheme import Column, transaction
from bs4 import BeautifulSoup
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
log = logger.get('http')
@event.register('account.domain:changed')
def _(e, account, old):
if re.match(r'^w:', account.domain):
account._domain = hoster.wildcard(account.domain)
elif re.match(r'^r:', account.domain):
account._domain = hoster.regexp(account.domain)
else:
account._domain = re.compile(re.quote(account.domain))
class Account(account_module.Profile, account_module.HttpAccount):
scheme = Column('api', read_only=False)
# options
auth_method = Column('api', read_only=False)
cookies = Column('api', read_only=False)
headers = Column('api', read_only=False)
def __init__(self, **kwargs):
account_module.Profile.__init__(self, **kwargs)
account_module.HttpAccount.__init__(self, **kwargs)
if not self.cookies:
self.cookies = {}
if not self.headers:
self.headers = {}
def get_login_data(self):
data = account_module.Profile.get_login_data(self)
data.update(dict(auth_method=self.auth_method, cookies=self.cookies, headers=self.headers))
return data
def match(self, file):
if self.scheme is not None and self.scheme != file.split_url.scheme:
return False
if not account_module.Profile.match(self, file):
return False
return True
def _http_request(self, func, *args, **kwargs):
self._http_request_prepare(kwargs)
if self.cookies:
if 'cookies' not in kwargs:
kwargs['cookies'] = dict()
kwargs['cookies'].update(self.cookies)
if self.headers:
if 'headers' not in kwargs:
kwargs['headers'] = dict()
kwargs['headers'].update(self.headers)
if self.auth_method and (self.username or self.password):
if self.auth_method == 'basic':
kwargs['auth'] = HTTPBasicAuth(self.username, self.password)
elif self.auth_method == 'digest':
kwargs['auth'] = HTTPDigestAuth(self.username, self.password)
else:
self.fatal('unknown auth method: {}'.format(self.auth_method))
return func(*args, **kwargs)
def on_initialize(self):
pass
@hoster.host
class this:
model = hoster.HttpHoster
account_model = Account
name = "http"
priority = 150
patterns = [
hoster.Matcher('https?')
]
config = [
hoster.cfg('domains', dict(), dict),
hoster.cfg('send_crawl_domains', False, bool, description='Report domain names that have no plugin')
]
_crawl_mime_types = 'text/.*'
_download_mime_types = '.*/.*'
input_lock = Semaphore()
def load_icon(hostname):
return base64.b64decode(_http_default_icon)
def get_hostname(file):
return file.split_url.host
def on_check(file):
# check if we have a multi hoster account for this file
acc = hoster.get_multihoster_account('check', multi_match, file)
if acc:
oldacc = file.account
try:
file.log.info('trying multihoster {}, on_check of {}'.format(acc.name, file.url))
acc.hoster.get_download_context(file, acc)
return acc.hoster.on_check(file)
except gevent.GreenletExit:
raise
except BaseException as e:
log.exception(e)
file.account = oldacc
# default check code
with closing(file.account.get(file.url, referer=file.referer, stream=True)) as resp:
if resp.status_code in (301, 302, 303, 307):
return [hoster.urljoin(file.url, resp.headers['Location'])]
hoster.http_response_errors(file, resp)
content_type = None
if 'Content-Type' in resp.headers:
content_type = re.sub('; .*$', '', resp.headers['Content-Type'])
content_length = None
if 'Content-Length' in resp.headers:
content_length = int(resp.headers['Content-Length'])
content_disposition = None
if resp.headers.get('Content-Disposition', None) not in (None, 'attachment'):
content_disposition = resp.headers['Content-Disposition']
if content_disposition or (content_length and content_length > hoster.MB(2)): # or 'accept-ranges' in resp.headers:
return _on_check_download(file, resp, content_type, content_length, content_disposition)
if content_type:
if re.match(_crawl_mime_types, content_type):
return _on_check_crawl(file, resp, content_type, content_length, content_disposition)
elif re.match(_download_mime_types, content_type):
return _on_check_download(file, resp, content_type, content_length, content_disposition)
file.delete_after_greenlet()
def _on_check_download(file, resp, content_type, content_length, content_disposition):
if content_disposition:
name = hoster.contentdisposition.parse(content_disposition)
else:
path = hoster.urlsplit(file.url).path
name = os.path.basename(path)
file.set_infos(name, size=content_length)
def _on_check_crawl(file, resp, content_type, content_length, content_disposition):
# TODO: ask if file sould be parsed or downloaded
if False:
return _on_check_download(file, resp, content_type, content_length, content_disposition)
# send domain to backend
if this.config.send_crawl_domains:
domain = file.split_url.host
log.send('info', 'unknown domain: {}'.format(domain))
# prase data
data = resp.text
data = data.replace('\\/', '/') # lazy method to get also json encoded links
links = hoster.collect_links(data)
def _collect(tag, attr):
for i in soup.select(tag):
url = i.get(attr)
if url:
url = hoster.urljoin(file.url, url)
if not url in links:
links.add(url)
try:
soup = BeautifulSoup(data)
_collect('a', 'href')
_collect('img', 'src')
title = soup.select('title')
if title:
title = title[0].text
except UnicodeEncodeError as e:
file.log.warning('error: {}'.format(e))
title = file.url
# filter links
hoster_links = []
anonymous_links = []
if not links:
return file.no_download_link()
for url in links:
try:
host = hoster.find(url, {'ftp', 'http', 'torrent'})
except ValueError:
continue
link = {'url': url, 'referer': file.url}
if host:
link['host'], link['pmatch'] = host
hoster_links.append(link)
#elif re.search(r'\.(jpe?g|gif|png|avi|flv|mkv|rar|zip|vob|srt|sub|mp3|mp4|ogg|opus)$', url):
elif re.search(r'\.(avi|flv|mkv|rar|zip|vob|srt|sub|mp3|mp4|ogg|opus)$', url):
anonymous_links.append(link)
if hoster_links:
core.add_links(hoster_links)
elif anonymous_links:
hostname = file.split_url.host
with input_lock:
if hostname in this.config.domains:
add = this.config.domains[hostname]
else:
remember, add = file.input_remember_button(['Found #{num} links on #{domain}. Do you want to add them?', dict(num=len(anonymous_links), domain=hostname)])
if add is None:
add = False
elif remember:
with transaction:
this.config.domains[hostname] = add
if add:
core.add_links(anonymous_links, package_name=title)
file.delete_after_greenlet()
def get_download_context(file):
# check if we have a multi hoster account for this file
acc = hoster.get_multihoster_account('download', multi_match, file)
if acc:
# context already set
return
else:
# default http download
file.set_download_context(
account=this.get_account('download', file),
download_func=on_download,
download_next_func=this.on_download_next)
def on_download(chunk):
return chunk.file.url
def multi_match(acc, hostname):
for host in acc.compatible_hosts:
if host.match(hostname):
return True
return False
_http_default_icon = """
iVBORw0KGgoAAAANSUhEUgAAAIAAAACACAYAAADDPmHLAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJ
bWFnZVJlYWR5ccllPAAAA2ZpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdp
bj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6
eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0
NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJo
dHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlw
dGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEu
MC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVz
b3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1N
Ok9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDpDOUFGQzg0RTk4NDhFMjExOTk5OUYzRjU5RTY2
REU0MSIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoxMkJGMThDMDRBOTYxMUUyQUREMzk3MTM0
MjU4QjYzNyIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDoxMkJGMThCRjRBOTYxMUUyQUREMzk3
MTM0MjU4QjYzNyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgQ1M2IChXaW5kb3dz
KSI+IDx4bXBNTTpEZXJpdmVkRnJvbSBzdFJlZjppbnN0YW5jZUlEPSJ4bXAuaWlkOkQ4RjUxNDg4
OTI0QUUyMTFCOTM4QUIwMzc5MzZFQTExIiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOkM5QUZD
ODRFOTg0OEUyMTE5OTk5RjNGNTlFNjZERTQxIi8+IDwvcmRmOkRlc2NyaXB0aW9uPiA8L3JkZjpS
REY+IDwveDp4bXBtZXRhPiA8P3hwYWNrZXQgZW5kPSJyIj8+zznWBAAALhFJREFUeNrsfQd8HFed
/5uZrdquVe+yJFu2JduJu9NNQhqpJoEkcJcLhDsfJJcQLglcAe4DgeN/pAB/4IADjlASSIFUSEIq
xMHEJU5c4m5Zve6utH135t535o00Gu2uVtKuLCl5yfPujnanvO/v/dr7/X6Pu/r75JS2Z83783Fa
D+2LaV9CezPti2gvpd3LupV2A+0O9v1h2hO0h2kfYL2H9qO0H6D9XdoP0j5EFlgzLIBnKKf9TNo3
0X46ALca+BKv1US8ViMpLDASl9lIrEaBmA20CzwROJ5wHCFGgZNPEE2IDiJRCpAkTySRrIgmkyQU
S5LhaIIMhuNkMBKTX8MJsZcRxE7aX6f9T7R3vU8As9vMtJ9D+0W0X+wwGZprXBZS5bCQYpuZuCjw
VgqyqPmBpL5K2mOSfBzHjPT76mBYKKHI37Op3xo7CSWOEn80XjIQjJ3dNRK5rX04QkZiSRDEs7T/
nvZXQE/zaTC5eSICjLSfT/u1tF/Z5Clw13sKSJ27gLgspjGAGcpSFuBPPKb/njTh2Ljz0g8cPUIJ
gnQEwqTNHybHfGEf/dNvaX+Y9j/SHn+fAGZOAFW0v7q40Fa/tMhG6gvtxMTzyuzVAz2L4I/jDuwL
cVEkJykhHBkcIUd94U569Ge0/4j2I+8TwPQIwEX7a5vrvK1rKz0KW08FyCkGf+L3JBKJJ8nRoSD5
88khHH6G9m/S/tJcIwB+DnOnAtDHmdWeeQW+rFvQDyaqcDYXO8nNq2u5CxuKL61xWl6kf36T9ssw
8d4ngMxNoP1Xm6rcG8+o8c4r8Nn/oy1JP1Q6C8j5DaXkksbi1Q0e6xP08F9o/+D7BJBGLNH+7fWV
rsvPri2a1+CznzF9RSIlDis5u66YXLioeG2dy/oHZj0sfp8Axrd71le4tp5XX0ySCwR8/TXKnFZy
3qJisqnaA1P2bdq/xpxT73kC2LqhwnU32H5SWnjga7+H52v0Osj1rZWmVaWOu+mhPbSf914ggCVM
xuvbFrD+llIXEeCYWcDgj11XIgI1aVvL3GRzvbeR+Q7ugz9qIRIAHG2gdPjckzoTcDPtv7y+pVLw
FJjeM+Crn9ErqKJ4w4oqbmWp4zb6cTvtyxYSAVQyV+l21SmiAb+V9seubi4zVbqs7znwx1+DIy2l
brKxytPKLIWP5N3cWvqhvIO/kTlA7qX9cR348PK9dHFjcclSajO/t8Ef+73baiKVdrNJ4MiHB8Jx
Oz34ou4n84YDfJTd/E9Z14IPL98zm+u8Va1U7mMQJFEincNh4o/ESUKU3pPgq+crLDCT0yo8pKXY
/jl65FHClqfyIZfzZcv/K+1fpv152r+gAx+LO4/By7em0iM/8P6+AHnqUG+Usb4ycAergS9wmAzE
aTYQm0kgeF9AX+1G+pl2K31vNQjy1RYS+OoLx3GktdyN1yve7h0GFwW/7p3rBABw/8fj8n58yD9w
kr7/mKr0adoDGyrdm2Hu4Um3tQ+SV9sG2+jxD9P+V/VL4YToCSdiFb2hGHSIciYyQBzV0JuYblG6
ocojrAMhLSDwRxVEkWqDJdQy4ri1u3sCWG7GqmjHXCUAODN+43K6L6XgJxn4fbrZf9PqcufWc+qL
5Cd8+Xgf+Uun/0UmLvp05xtifW+Ga3rfaB/qXF3hNvEct6DA115jcbEDQSzNu7oDr9EDH6D92Fwj
AMj0J5xOx9nstr9C+6s68FfR/h3IfJFS9ts9foD/XXrs1hRcItt21apS57wCH3c6HIuTrkCYnAxE
5ONNXpu8ZpDpGk1eEAFXv7PL/wJRgmLa5woByAodBX8TT1VX35D/TUYAeqXvkUuaSqzFBWb5QCwp
e/oDMwAf7WNLiuxzHnx87g9GleARCnogmoAL+Gnan4I11jUSfWh1ebK8qciR9hoYrYZCO+510a5u
mQjOSsE1Z50AnAr49k2mAoH0d/ugyN3IHD7a9mOq9DW0UHNPdX60ljjJi8cHPknf/gdRAjKn2mpp
P7vMblFcx3MIfMzySCJJuocjpJ2CftQXjjBzGIAjPuC47lnW7ujyPxJLiBuWl7nkc6S6hkwEXjt9
XnHJnp7hJ5kTLXSqCAA860nMfCM1UuJBeUZ/XZXZmtn/t6eXOq4+o1pZ2lUfBmvmVB8o2tEVgK7w
w+nM/k1VHm4uge8LR0knneEIEaPKawcD+2lmDWUCCt/d/Hbf8C9EIl3VSolAXQ/RX0OkNw1xkBSl
9Xv7Rn5FD109XS46EwKQ1+wh8402OihxnvgDgUOMALTgQ1u/T5b7OkDweUWpk1AC+PQ0CeCGpkJ7
TsDHjEU08NHBIDnuC5FSm5lUu6yyPZ4OfAUMkc7yqDzL9/eP4JF2YFKwmb57ig4ccMFrKKjfpj/a
ugzOMVW3SSFamikHpZ8v39cf/Ab9eMdsE8CPnE7n5UabKN9jJCTP/s+C8+m+94Pzags9pZRNiyk8
fIVWM6l3W1ce84XPpUdensL111TYzUudFuNohO90wMe99wyHybv9I+TAQLCfHnoQAFLlbP2bXX7Y
3RuWeu1ClctCyhwWYuB5EoolqCyPUAUuRE74I8gpeI7Ncsz2nhlyVczkf9zXNxKkN/y5pSUaIpCk
ceOHj4spkUQS4mepiIH/5NezRQB3OZ2uGwG+RO9XjBhJMBR4gVG9dvZft6LEcem6ysJx/Env4VtJ
uQAlgNumSAAfW0qVP2maAZxRKp8PDY6Q15WYPazCIXgTEb1qWPdLjJsV7R8YuYh2EMNFbrPB5Ysm
DjPA0WGbx/LgT/nnfQpHubOFigN4RlM7ujhS4y6AjvFj+uEd2vflmwAQ0/ZVgxXgJwjPUcVvMIBb
+bwOfPgEvg5wk5P49hfJbLwHA1xDe1uWzqbrFhXapgX+u/3D5OUTA53MPf1jkjlqF1zh56wbKPjV
ubLBs5lolAhgFmxdBhEqpXaFIxdidbnLRpVI6APrU3DhnK0FIM3qQbfXIRBeUfLFqLy0D5n3pu67
t51R5a4pc1gnXdhBP6/WixPdksU9wP7ddXqZswSZPlNl+zA9KfgwPS+k/V/I1EK2E7MIvtpuoTL+
sf09gQniS/uEVa4CQjniClUHywcBwHj/lcvtcAmmJJOfPAmH5NyHb6ZSEgUWv58JfPWBlkPWEfLJ
DIse5WwWvnRpU8nyTTXeUTk4FZmPLKBzar1O5qT6AJn7DYN9w97+kb8cGRgZ96DaRTC8a1L8IXCq
nZcPAvgGVfpWGQrEsaFNGkk4HNqtevx0bXfPSDQr8FVg6Kx207fXpxBTt9N+4Mxqzw23rq/n6jy2
aYGvcpvFdKAubCjG4sGzjOjmegNL//BbPYGOzkB4Avhjz8aRDdQshuJNsowxzJYAELx4i9mOC4qj
sz+uaP7fVb+kS/LY2RWMjiIw2ZIuyKqlVE7WvY2Mxc2fjfOsKXfde+PKaudp5e5Rh89M4/bBMrcs
LTdSJRXm5zdI6jC1udTg9r3mLx1DsUg8kXYMim0WiAKEl30pVwQAZ8/3PF4Hl5Tio7YTT8crMBzw
M7Yst4ujS7W/6wxEE7242WzX8z0WM2ktcSAU6mPMHHuZsvvWjdVeYjcbRs3IXDl5EHhxeqWHrCp1
/jNRglXsc5wIttH+heO+4OgM0T8vPtcXylL0dqazzZgAvkhZfx3P5D7Ax8WTMfkWnp3Ehbu7L6Rw
AXw7ThWwnZ1DJJ4QNQ8wXo61FDvI8mL7zy5tLPnYresXcfUe+4zs/Mk8fDy9k9MpZ9lQ4YJ18zsy
t7Ol0O490B98/thgcNzBseeV5DiCVWVOWEr/Nak3b5KQsOUwlRxekyBh/jHw0WJBjsRisXuY7ZlK
BKC1lNrMZ5Y7LGRf3zD5xdsd/hP+8H3bO32tJp6zVDgt42S5zG5MBlJPZTxmp5Rn9y6Q3tcXIE8e
7Im1D0fvpx8/M801idlur/QEo5+sdlnN0J204I+ahhYjCcbii4djyTcyWTqTUfs9bo/DKEqJceDD
9h8ZGY4zz1emtquX6gEY5GcP9yJ1GsEMiA5a9mrb4P/et+2odJDa5Fxa8ZAf8OFYOzY4Qn6084S0
rX0ItnMT7Qi98pH50eArubvdF9LEEUjjxgA+g2qXvLz8NZIhFzETAay1mK2XG6zSOPDl8KuErC8h
csc/yY3u7gnGKPh9Qwx81VeAqho30r6J/m3Xn070k6FwTL5GPsHH+buHw+Tlo33kpeMD8DquY1ZH
G5l/7XsHBoJv4Hn04KtjgHWMGqflNPr2yukQwOetdiNl/OI48EEKybh8lTeyuMlDfaFYGwN/R4q/
4xxrdnQHtj64p73/rW5/XsDH/QciMbK9fZA8fah33+Gh0BXMVn6TzN+GR7v1r50+iefSjQG1dtwy
F/jXqRIAnC6XCRYyAXzZ/E/Il9ibxU1CcUdY+M5JvoMqBRteOTEQEUUx5+Dv7vaR3+zr6trTO7yV
flxJ+xNkYTRw4Sd6R6Kpxwq6gNlIymwm1E46dyoE8HGXy2WQpMQE8PESV+zQg/of6czAUXMwiweB
TPnOubWFFo7ncsr2T/pD5M1OP0TOCkZoCbJwGpw9GwByKvBV/0uNwgXunAoB3Gi0cinBl0VAUh7D
7hw+yH1ryp0XIWYgl+Bj+fQPR/rw8W+Isqiz0NrfN3ttpbIlkAJ89b3XZladeY3ZEECL2WRZSoRE
SvDxfzIh+wRSxqKl4QKZ2tbVZc5bNlQXKgEjOVT4dnbJSj2SLV9YgOBj9t9ZSTV9MQP4ssimbxo8
NgzJJ7IhgAssVjORZOVvIvg5bohne6C52CH7sXNp6r07MExlfwDrFJ8nC7N9YonXVq6uiKYDH68D
4ThRbCzyd0RZSs9IABsFE0kLPkeUjBX9iabBBcCOHr10cYlRdvrkEPzhaJy8dmIQDp0byMRgjQYy
S5m3eWzg6Xcho1jKAvwuaorDzLabBFRLvWAyAljJG0ha8OV1AF7+mWMGD4BVv2fPqS1017ttOffw
Hegbwdu7yMToGMQSIEMZ4VNXz2MCuKmp0FaF6qfZgO+n4OOzQVGwr81EAPhcx/FiWvDRDEaZQqrS
3d0kpd9MmPkbKt2NrSzKJZfg4x+zQX4sj+66WPZ9fnN9UeHFTSVY9HmEKLkL/DwDH+N3F1YzpSzB
VxViUXnUKxkHSUkAToEXTBInpgUf/xqNMvdfMg3w0e5fU+7aLJd+y1Po9mKvvBqGKNlCZmLe21ps
/+EVS8qMGLiiAjO5rrWSay1xICLoScaR5mo7i3EztX+fPl8tZv9UwEdL0NcCI+9inFCZzHoC4AUh
I/h4bzDJdLNyGg9zK9X4t26sKSRJMT/g41/kG6ypcDmp/Y8ZXruyxHHJynK3fO/qYCBa6bQKN9ji
Jbu6AxALV2Xp3JrttnxZke3rSmFaQgEUSLnTShLS1MBXx8gki2/xYqJEMk8ggBA8cenBV44bzfKb
86Y4+3HRe5eVOPMKvvq3FVS8WARhK1V85KpcckClbsBEORSNfs8gNG1rH9rGOMZccxQdHookyKqK
MYk2XfDxj6hgCJ/A7alEgE8UkzIHSA0+Jx/ijCIVAyYsFVdm+RAoefKryxeXCo5UXqs8ZOwoVbjs
xEj1AaRnYS0AoeCpAigQlewyG6DUVs9BDnBoOJbIytSbDHyJ+QSMPNesYqfnAFRMSD2cyJdKgpgS
fPXkdkcBGRqMIaX7m5M8QDHtj32g3uuq1ioueQRfXQPY0+0nO7v9mNkIqcX6Rgm6zSjwViMvF5mw
UAKx0u62GIg/moCJeGyOEUD7SCwZo2NukgGYAfiyU4jCitozcVGCbvGQAWxbZ7fvEpP8RZwgpgRf
fW+xU91qkNzMPG1iBo31kU1V7sbmIuesgS/LsngC4GMZ+lIyfqcPIRhPltBe3E/ilYwoShmBROYg
B4Db9Ug0mVxqFoQZga+Ot6CAuUEmgBQX3JGISReZTOnBl99RMeB0OpcEAgEoT49qfo+Vh/9mpkbN
6nLX+tMrPOOSGvINvsw3B2RfwDfIxG1eMKBdrO+ZJ6bfkUiMEoBVmDH4ZGy2rhvVAXTKW5uYlDKC
r9TkoSaFQ5bn/07GR9QiA/bQunLnNde1VKzfUFU4q+DjdSQWJ1SzR47etxaI2/dwSBNcOxPwJaYf
0dYCJFM5QbqSyWRG8FVHsWCViMfjwTLrp3Qu4K9u7wq8csIXnhWFTzswuK9D/fLs/wqZQd78HGt9
EWY6zRR8xfqRoAhC6a0DASzWcYHDWO+fDHyFC4jE6hDUwa7QEAEo6COvtw91gBVzswQ+2mA4Svb0
DkOR+8EMBrxoDoGPEPl/r6KmbC7Al01BxRLAh2UggHN1CzuHotFImJOlQ3rw1U+8SSSFxW7Yzz8j
49cLwYKvfu5on//YUHBWwMcJDyuy/z/IzDJ2YXf9TZbfXUfyU+SZY8/xszNqCqn+J+QEfPU9i7uR
OQAiYpfqHv4tMS6MwZkGfJkL0P/sbgMpLPQgz+7LOlEAD9vlzx7uHTkyODzu6fJRlqU/GCH7+4NY
AHpwhoOPQIJlzGEyWbu+2mFGlbO3Se7SzLDW/3BTYcG/nd9QzNnMxpyCrxllmQBQZ2et7gZeSUQl
zXRODb56CIGjNo9sUCD48AYdESBv8ILnj/b37e0NjIv8zXVNnoMDQVUpTVkuBfek7ZM0pLz9nDmx
MrWN2BrmgobiljKb6YfMvJpJg6/+5cVe2zUoGIWV11yDrzHHZQKAvDtTdxMvRNjFRkPC04A/Wr2C
T5LyWi8+IN/+QzoiQPTvxtfaBg/s7PTJ7CfX1bh8kRg5PBTaBadTtiM9CREgmvlpdr7iNN+Bybuq
wCzICS11Suzd7TMkgBqPxbgO3km4fPMBPhlzi3t59nCbdYrgn4LBkRGeqgbZgK++NVDLv2pRKTwI
v9ESARtoZKds3N7pe+yNkwPEH4nltBRbXFlaDJA0NXnSgT0JEdxfUmBC4MrjzKmlb2vKbGY5fEbe
CUSphXA146qpGlyw/0D7L4lSPDtNk/Ow8ga+5nfFMgcotZlqdKwOHrGnE5HswcdRo8VInEUmsqi5
wsJmzsd1Aw3ZumV3z/DWX+/tDL/bN6w9zYzq8NlNsghqmAbImf6+qzcU+8OKEvsZjLPpvZw31rqt
o4MMubOq1IkbSVWwabvbbNjfVGj73vpK93VEyYI+M92F8wm+6hIG9iAAa6NHXj/XZwn+MjQSYwtD
WYBPFRWLReEYVpdAGpdXwrL4X/gEVIeTZqARnr2SioQXXz8xQIbCUR3QU6/AydyklYwt57Ldh9W4
ZUV26DZ3s2PYp3hHc5Ht7yqc1nH3Uq2IAcTeaWMMmlxmw9ozaotII5XrhTYLWVPp5pieMcEbC0dN
PsFX/iZTgBXACE1KAMW1OjHwzPBwoI0TDdmBbx0vLrBW0LKmjisrL0Uu4BOqba0RCSgpd/7e/pEb
H9vf3bWb6gaJpEimW34Vn4usRi4dF5iBPvDcyUBkV5UC9D1MHLy2ocrTsqTIKYM1XoRxIBZEHN2s
OXxlhcM8bsEEefyLC22tqXQGFITKL/ij55B3TbYjKLPEZlrF3INac/CH4eHklMEfDSgVRFJaV0Aq
qkuxILNb1TU0Ay4xLrF4R7f/qz/bczIMS0FFdKqFl50WeTI1TgLs9VPkEjj1/dgFdMuyCm5TjefK
K5eV8yV2S5rFLaRjycRyt0Z5vKrYbhl3QjxLtfI9WC3V4zmAlFfwNSLAPuoKXlEix3jerOMC3x/o
HwwL4FKTgU904LPviVKSFFVaoBeAPb/A2J5dxw1GmAlZv73Dd+9Pd58M7u3105sUNYM7edVtp6IH
NGUAk2dey29NkQv8kloYbcF4Qp65YtqAFmXYTVQcLS+2wzn2NbbKuN6ujYNgXzbQ77WUODAW94+7
Rym/4GvT8TEgw9GESJqLZAK4SSe7kE3z41BAnBx8biL4Yx8kYvcYyGmbFnEVVSXIz8OW69fo7XPm
PYQCteivnf57fr6nffDNjkHiC0UV/0EG8OUd3zMogqxtLvZ66qsqSj/BXKzZEgG44Yi8i4mUGXz1
b0rpO3k871le7OBVMaVfz0fyJuW+sBwuIUqs3iMmJag1r+DznPxmhFe1ATgcqHZqV71ZGi7wtf7e
gQgnGaYFPqd1GEl0BtUUkCWt1ZXFxV5UtfwT7WekIATsioGAzeq9fSM3P3GwZ9vTB7sJqmQlsFCV
pt6+w5Q+WJW1T5ZVOklppVVVRJuzFANY2l5cYDBMCr52xe1DS8q408rdN9ar9QxTBHP0heKEVxTY
X9D+ksNsbBRZ/B8nqZGAuZ75o06+JAggnGBJhMsVMXAr0YQNE6WI8b0hXwKe/2mDP+ZUkojVwZPq
xU7SsKTyDJfLCSL4HdEka2gIAat5qOC5iZpFy7e1D9330DudvX9u6yft/hAVEclx0xFbyGDyZVjg
ubKw2Egcbp40N1fYmL8iG31gqddqNBAuO/DVW0rKvgHLpDF8I7EksZlNbpvZyCXZjJS1c0oBoVii
l88x+BpUQjh3fzSuDKTNaCTryl1QSD6t4wJfp7rAyWRcmCH4Y3+Dq8NZZCCNrUWkvrH8cuZLf1Tr
StW5beHjRy3iqmO+8BWvnBh48OF3OodeO95PTlAFDaXZWaJEVRpQ/6GhocIs5zxg6bLaRurry1pI
djEDqzxyTeLswR/v25g8aUPSumjpPwYKPpXNeOYLg7FEzKirATAT8PGZiYABXl5rRrAkk1HLFC7w
BeaTVhtWcv4+5ItRqjTMGHztchdqDReWmcjasxr4puaqqylH2MbWD1DEQUjBFeLMrMRqXSk10S54
vX3ou4/v72p7hoqJYsUUXKST57jpreVVtlFFFbkPDZQLVVdO1AdS6AGt2LgqH+Cn0vbBJUXFs3k/
s56+Kqd65wh8vOcVRPpkDhCOjxZgI9BWN1S6vWTiTl/PDg0N/XSgIyTnBuYC/DH/EicTgqvYQJas
KCbLV9ScVVxciMLNR2n/ItFkIem4QpxZFuBYtb5IYklfOI73+g0Xr64oL6owmMYSXhSdJEnKq7PS
B1aMavF5Bh//GOhADceSao1itD1JScoZ+FqHIwjghBJ2PMaCVpbJk/82dVA0RHBHZ3tPZ3AwnlPw
tb+TqBS0URldv9RF1mxYVFNXX/4louyu8SzTqr0ZVvcOMjPz33QAfq6i0qG7rnJPTpdAli2bVB9Y
aaMcYDbA1xwBUaoVyy5X8vpyBL40isdxEMBxZNNqzQxkzZxb6zWxwdQadIO0f/TYoa5EMpoC1BmC
Px4ciQhmkZTVWMiGcxqF1pV1F1VWlPwPUQpTIKsFYWglWSz3XlZaUrjW6TFMAF99X1VjIw2L0uoD
sIxcyaQ0K+Bj9geiiRgZq8AKjC5VEluUe0ZiFujBqDGOpwL+WPAcIwB/dGLp0eWlTuxaeZ4qHzVc
ANuW3e3rjdKbMKQEbxz4ZDrgj1+A4ihXsHs4UrvERjad22BYeVrtBVVVJYg8RvkZBGOgxNvpZGKi
ixxVU1nlIEq+40TwVYW0ieoDNVVj+oCGgOCk+sIJVOfk8j3zR82+h4gStYy21iLwJWy1U76HYFzs
iyakO8JJMW4cXVvPHnyNB1MmgH39uDHdA+F6iOWn7QG2yKIlgm92dfZ8r/fEiFwzMCP43AzB1x+n
QNo9PKlbbCNnntcknLa6/ry6urL/53I6djD/waNMJwBBXFdZUbzKXWRKC/6oj4Ket7K6YJw+oCGC
e98dCD5xhMU35gt8iW1kwcZcbZeC/au/Ytvj9bPl5LOoDX8MiR4Clz34oOOEovbt46w/2Y8z+j+1
utYh8Pw4CsdlUbqNatnY8Aj19SWdZv1oeWXJ5aW1yPFP5h/8jOfhiEgZ2fBQgviGIsRHZ6zNZqbm
np043YYJ59H/Vr1WR1uYvLO3A9VPsfFCSEP0SM7btbrCXVvK1gFyDT7GOxJPBpkCjgpgCHDZbjfy
rVFxTEREEiK48Nnsp/Dc/shh5LdgL4Q481OnA182MSnMCc4Ay87Fsz+/44vEJ3iccKqV5S5yWpnz
ghS+AciNj3R19L7Q20bZI284heArgycYJOIpEUg9FRWnry8lS1rdUwIfx6spMTc1lI/qAxqCR4LJ
R3Z0+mKReDLn4I+5swWbzSg8YDHwryPAhb62xiWNwqd8eSVT0g0sxuLDw3HxM1GRRCASMoEvWxnK
RN8jB3KpwQp9rLS7PnpXFgVe2TeAwsNrdESAkJErOtt7/9B7nLJHTjhF4Gd/nkzgsycmi5udpLY6
pT6AyiJ3dwRCY2FtOQJffQ+LHIDLSZwCb0yy9+rv8NkqcE67kbuP+QjOZaf6/7AWJMJnBF9ZB5Af
9k2iUZre6B4tNjgxdNthMZLN9UVmJl+LdUQAd+3lnR29j/UcC476buYj+KP5DiRJLYO0+sD9VB/4
7VHoAzkGPxs7HxMyRtl8lLJ7SgjLaX+JKY3w4Lp5LjP4yjK7/KRvaAngtfZARGaj6VbcGgvtZF2l
G6FjT6r2soYI8LTXdHb23NtNiSAaFOcc+CRL8NXjLo9AWlsqU/kHMBw37e8fOd4zEplV8PXr+SCC
pCQTwkeYq/xfuLFlx5Tgw4pQKv3K1twoAXQMxxIH/NF4WjMHBVxXlrqwrQuUo0dYTJyWCKAy3NHV
1fOJfbtPRvy9cfn0cwV8bgrgq27u2jo7WZxeH7h2Z5c/FmU5e7MJvvZ7WKKGK99ARLuBE1fGtFFV
E2Y+UsJ4LE4dYIt84+zmZzvU/WjSxO3j1KsrPVAKUe3jlymIAA3Bk2ccOdh5tLctJJuJ8xF8laab
l7lIXWp9AFr6nRizocjsgA8LACVeRMLFzQI/avrJawfQFZLKazrw5SAtQckH1gaEqO332O+WTJK0
ARm0WtlmZQtRNlocFQcaQkBx6FUn23p+cmy/jwT9Y0Un5yL4ZBz4E/WBmlpbOn3gW4cGQ49j947A
LIAPf0A4KSHrqY6+Ps7zghzHIXJCkPYkzLtM4HNsGQhYpyKAV475wr64tlp3mrh9mJoaTvCC1iWr
IQLYmTf19Q1cuW9PW3v38ZC8z9BcBH/CdXXXcxcKZGVren2gPxg9xsurdfkBHzMd4AcT0sNEiTiG
B/TqaFJaSid9EXNXP2oQ+LTgy44bbPErymbjK6kIAHbg7076wxnBV4+B1awqd5O1Fa6NjB2uSkEE
RA32aGvreeCNV48kBjrjY+biPABf/U1dvZ0saUypD2BArw1E4zGByw/4kNsUfOhdCE3Xpr1Blg+o
3CgpcWnBxwmNSlnZ35Gx7XEn+M5/fVTejCi7jB15k6cSFzmrphDWwTbVWZRCJAwzx8XKw4c6nnn3
rX7i71O2nZ0e+GTa4JNpgK/qA0uXu0h9TUp9ADb158AFBJLrmS+D/3gK8FPFLaYFX1lple9u3AbT
egJ47ogv1I0IoWzTtUQWAHnV0nLLihLHd1iwRkUabgBT5dLBId+5+/aefGX/7j6ZEMY4QrbgZ6PM
pVmNnAb4YxwjKVsGTB9YqiOCb/ujicc4ScxJDJ8MPseTUELChtwfJZOnu39GGJ240gRbHiXlKfvH
SurzmQgAVPTTI0PBrMDX2sAo/4bCi2uV7dfgS/8k0ZSg0RECZNC5g0P+c/a+c/K5P714WOrrTIw6
JmcKPpdqnXqG4Kvf9XgEctoKObTo1yniB24ajiWPCjma+aGkBG19SxbgIxfxo3G5DN5E8OU1BCXw
9KcsiGbs+aw/mVDcEeFUh29eXcupLsippGuhIfHzyECQ7O0fwWLGPxLd/kIpQq5wAMGoH6+uKrWV
VtiI3ckrJeunCX524mNq4I995sn+vQGy/1AnTN5P6Djdatpft5sEU1yanp0PmR9JSs+xsLhsKpf9
wMpLN4di8ZTgywRpNMvbCxPdFnKpCADtyQ82FH+oarQc+dTTtfChzRcir7YN4PBTLErnLf2FdMTg
YuFcN9oKrBvLytzEW2IldgclBk6aE+CPupXpsO7aNUSOnuhGbOKDOiL4tNMsfAd+/aQ0NfDlIpeE
CzGXezY1jpAJtd8gxg1xUZoAPt4XmIyUigRgcJn+x+kI4Nxqp+WlCxpKx+3aOZ10LfimoVi+fnJI
Yn4DLCq9PgkhoCHBA/mKWwqs1tUlVNksLLJQk8xAOF6aVfBTh7tRTjeUJH98+QjkJQps7NcRwW9s
RuHDCLkXp+DhwyJTQuL8JPsC1r+w8OL1YbWaqA58nM9iNlP5zyG45+VsCQBtxyWNxaeXOKzTBl/7
wLgRmJgoGtU1EsWq2n2MIKJZEEMdUbKXUbLlnPraUnt5TQGx2fhTBr76/tjRENn5Vnuq+AFws50F
Bn5RXMrevYsJk5D4bAkAJvifBTHOJURpAvhoSGeIcoadTDRNaJlq5X/pXbXC1wzBl/Vn+opU6nPq
i8mFDSXrGzwFDzGHBghhhfbCKYpOH6f9O4wIvMdO9NzS2TacPfgkP+DLbGqRnSxdXAH/wLd1xAsQ
rwklxKgxhYcuc9JGVg2i/btmLpkWfEw6lnX0pXQnyUQATx0ZCm/v8IdmDL729/iey2oiayo9ZMuy
8sJ1le7bvFYjdIO97EaXpSEC7crjRUXF9uzB5/IDvmJbi6SlxUUaassQsfxxHRFg5t0ha/VTCuDM
qm21CWRVDCFkKcBHw35CYZHbznSwlC2TCEA7v9ZpeX5zQ4laXXJG4KezKOSggniC9AxHyJtd/jeJ
pmhVCnFwdlGh+5XV60tlP/2pBB//Dg+LVBeIE58vTg4c6UinDzxsM3DXxkRVH8icsZMkwmQiAPrR
W0YpYYspMYQTwAfRmRXZ/0G97a9thkmo7IUTgcjT7/YPX9rodeQFfPU9ZFVAiU7+3iT39PXqWtfs
gy9xZGREJD4Z7CgZ9AVJ30AgwZxbO5g3EK+pqo1/KpiQTrcIXKMoiRlm/qTTH6uvSJL5iYUTbeF4
avCVTTNMAP/pTOBnQwBon6Ua/Afr3QVGrDzlA3xZNIgiOTgYhF/9Vxnu5aqKsqKNJWVmJfwgX+BL
PAmOJInfp4IdomD7E2xm79AADtGVcrt5nQjDjL6W2vbbKBGYY0kpPfijIfsyNssYR1nDXlsNHDEJ
9Nlj8URa8I30IaKcAKfAHZOBmw0BINvm3r29gbtay9x5AR8NW7wyT1U4DfuHNvOVqhpHbsEH2EHK
xn0x2qNkiILdO+BPpgE7rV2exV5JcIrdTkXAdzOBLynJgXhIv4knBQZWmBvFMlBCB/GC8TQyXw72
Qiyh2QSRjbDxd3NBAGhf3t0zvKXQamqswKYPOQYfAQ1/7fRLzMeert1YW1O6rNBrnJDkkTX4FOxQ
MEkCvgTx+aN0do+CfUADNjqCLYOZBiQLwGXJRpT8hLVqF2R/e/q4fQgILpmA3VAQo3cWS+PbTwW+
vCcQFaURiT9MWNXWXBEAZuXNLx4fePGGFVZurGzBzMEHdQ8pwRQvailWN/tRYOeL1dX27MGnBk4Y
YFPOjZnNwBbZNd7UgT2SA7AxlssZ0OvYawtl+QZ1mRhL6NnE7UsZlnQzgY+YgThvxB8+lU40TZcA
CPMiPfB2t/82RRTkBnyAdUxZfMqk/N3SuKi82ubiR0lvHPh0ZkfCogx2wA82HqYy2ycy8aVV0Hbl
CGzVBbtW0083C1yBgaUeIb07IYlyActYlhk7mYI5JgMflzWZqOInyfEKL2X7EFMhALS79/QOb7aZ
DCvqCm0zBh8tnkySY75wJwtUSDX7QW13V9XaZfCxEKOAHaedym1/mPT2y2Af0snsXSwOIRdgl2mA
xuxeY+I5r1HgWCEsROkqMfvJ0Tz+qeXqzQR8HLEYjWD9SPa4ayqATpUA4La9blv70HaH2WDzFphn
BD5am7KpxA9J+u3a7mpuqivsaBuhcpvO7H6fpAN7B3O4BHIEtpO5TVU2vo6y1moKOBFYjl6SAR4T
Z5ainSvw4fCJcQaw0etSudYztckcQekaFmke+mhLpTwFpgs+2Pej+xAIIPv6O1LMfpRY+73G1lbB
9k92g1kCDntypU5uL7EaeF7Ox5c1cHmHrbFny1F+fq7Ah5vZYraQkCjnBvx6qkAayPQaLrT+nR7/
Z1vL3QS+jamCL1fICspL3U+o4KdoXSSLHUqzBBtu72YN0OgrLQbehHw6pEtBjsAkQGh9TC1WmevK
HDkEX/b2mcwA/97pgD8TAkC7852+kUaO4y5fVuKaMviY/Ur84XjlTwUzXdHGKbDyWuZAWc/AXm0W
eAdSqXle0SZEBnZCA26uq3HlC3y4jC0U/LDEYwLdOV0QpysC1IaQqBdXlTrXLy526ERBevDRIvEE
efZQ70E2KyUys+bVzey1dpNQispekI8hKrRRik2UlMSJ8eOfn4yd2QA/SgQsq28mM9gcyzDDgceF
L9vdE3iV47nmpkI7EbMAH4fahuR7/v40wLfpnCvrzAZ+kddqJG6UqzcbCer5IKauX03XYilv0gIA
H7zLbDQBfDivLiMz3BltpgSA1kf7Bbu6/K9Qtr6oQUsEacDHp739I2Hm+s3UUJqrRTe7l5faTAJm
t5N2VO9CmpSkM0f7T1GuXr7Bt1Bbn2r8iOu7gI09OdUEgNZO+/k7u/x/pCy2HiuHYlrwCekOyMof
AkKGdCusTTp7exWd2dZCq4m4LAY58thiNIw/ryRNAH9gAYIPto+ZT8E/xsBvzwVwuSIAwpZBz9rd
HXiBgt/c5HWMB0bzXO3Dsu0Px88VqmMFry6zwe2lYMusnAJeYDYoRR3zWJNnnsl8sP3zM1hNp5QA
CLuxc/b0DD8litLaJSVOpQKp9nnpf/WeAkz332LKe+TZbaSz20AflB8tm5p61fG9Bz5MPYAfIQLS
7xAS15tLwGZqBWRS1H6xrMh2RVORczR0S9INXr5q7y4U8OHkMSumHrjlDZOtUE6n8SQ/DTe6ZV9/
8L/e6fYTXzj2PvjTcO/Cw0fBRxj9lnyAn08OoG1wUf5odYXLriaavA9+5lU9s9EIZQ+rlkivezif
4PAk/w0PsHFHp//tg/3D42B9H/zx4GP9wWo2A3yUzt+Yb/BniwDQkDixbn//yP1PHOiW+kYimjF6
H3yEcZmpeSsaLVJE4u9nltE7swHMbBGA7P0lyhZp57/RPnT48MAwESekUr/3wEcAp9ViRiTPYWbi
3U6ySwiddwSgNoR+raDc4D+fOdgTa/eFRkO63kvgw7yzGk0kabTE6Kz/T6JkR70422DMhhKYqcHz
961Kh/mialcBKWQBJgsZfIQZQMPnBQOydhDr8E9ECV07Je1UE4DakL3ylWqnZW0VJQQ3259nIYGv
AG+Qc/Uo8HDqYJ/E5071wM8VAlDXAuDp+mKZzbS6xl1AvDazHH41n8FHGDgUPIKiDyKHiCaEaz9F
Zr4EvuAIQNuQy46slksaCm1cTOLk0HFpnoAPSkZNHpRlCUs8/vgM7d8kU4jWfa8TgNqQBIntbP/W
bhLKYCeLBKVOpQmFrU81+AAdWTwoxYZqXKwgE/ZFRsDrkbk6wHOdANSGuAAsgSIY9YoCI+9GwAfK
4ikRuqcGfBReRj0fQY5H4NUijPDbIz4PSZnxuT6w84UAtA2RvNhjF9VCLjbyXLO8ZQq4A6UIEAT8
C2KOweckZa91EB7PKTGFqPzBCi+jmhc0elQ/i86nwZyPBKBv2M9oE+uI528WOK4Ykb4CN5ZBpKZm
KcU0lHK4bAt1eb1dzRDGe16uV6hsc4tENFEBGsSFCBw1j/B11jvm8+AtBAJI1bC/z2KiBJxiM2mU
vkN2j5d1C+Mkap2/EJu58MANsA4Zjo0r32Wgw1YfWmgD9X8CDACB5yXKKmEKkwAAAABJRU5ErkJg
gg==
"""
| gpl-3.0 | 7,281,487,483,231,738,000 | 52.585859 | 170 | 0.837135 | false |
our-city-app/oca-backend | src/solutions/common/restapi/reservation.py | 1 | 12531 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from datetime import datetime
from types import NoneType
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.to import ReturnStatusTO, RETURNSTATUS_TO_SUCCESS
from rogerthat.utils import get_epoch_from_datetime
from mcfw.consts import MISSING
from mcfw.restapi import rest
from mcfw.rpc import returns, arguments
from solutions.common.dal.reservations import get_restaurant_settings, get_restaurant_reservation
from solutions.common.to.reservation import RestaurantShiftTO, RestaurantSettingsTO, RestaurantShiftDetailsTO, \
TimestampTO, RestaurantReservationTO, RestaurantReservationStatisticsTO, RestaurantBrokenReservationTO, TableTO, \
DeleteTableStatusTO, DeleteTableReservationTO
@rest("/common/restaurant/settings/load", "get", read_only_access=True)
@returns(RestaurantSettingsTO)
@arguments()
def load_shifts():
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
settings = get_restaurant_settings(service_user, service_identity)
return RestaurantSettingsTO.fromRestaurantSettingsObject(settings)
@rest("/common/restaurant/settings/shifts/save", "post")
@returns(ReturnStatusTO)
@arguments(shifts=[RestaurantShiftTO])
def save_shifts(shifts):
from solutions.common.bizz.reservation import save_shifts as save_shifts_bizz
try:
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
save_shifts_bizz(service_user, service_identity, shifts)
return RETURNSTATUS_TO_SUCCESS
except BusinessException, e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/restaurant/reservations", "get", read_only_access=True)
@returns([RestaurantShiftDetailsTO])
@arguments(year=int, month=int, day=int, hour=int, minute=int)
def get_reservations(year, month, day, hour, minute):
from solutions.common.bizz.reservation import get_shift_by_datetime, get_next_shift
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
result = list()
shift, start_time = get_shift_by_datetime(service_user, service_identity, datetime(year, month, day, hour, minute))
if shift:
details = RestaurantShiftDetailsTO()
details.shift = RestaurantShiftTO.fromShift(shift)
details.start_time = TimestampTO.fromDatetime(start_time)
details.reservations = list()
for reservation in get_restaurant_reservation(service_user, service_identity, start_time):
details.reservations.append(RestaurantReservationTO.fromReservation(reservation))
result.append(details)
shift, start_time = get_next_shift(service_user, service_identity, shift, start_time)
if shift:
details = RestaurantShiftDetailsTO()
details.shift = RestaurantShiftTO.fromShift(shift)
details.start_time = TimestampTO.fromDatetime(start_time)
details.reservations = list()
for reservation in get_restaurant_reservation(service_user, service_identity, start_time):
details.reservations.append(RestaurantReservationTO.fromReservation(reservation))
result.append(details)
return result
@rest("/common/restaurant/reservations/broken", "get", read_only_access=True)
@returns([RestaurantBrokenReservationTO])
@arguments()
def get_broken_reservations():
from solutions.common.dal.reservations import get_broken_reservations as dal_get_broken_reservations
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
settings = get_restaurant_settings(service_user, service_identity)
result = []
for reservation in dal_get_broken_reservations(service_user, service_identity):
alternative_shifts = [shift.name for shift in settings.get_shifts().values() if reservation.date.isoweekday() in shift.days]
result.append(RestaurantBrokenReservationTO.fromReservation(reservation, alternative_shifts))
return result
@rest("/common/restaurant/reservations/move_shift", "post")
@returns(NoneType)
@arguments(reservation_key=unicode, shift_name=unicode)
def move_reservation_to_shift(reservation_key, shift_name):
from solutions.common.bizz.reservation import move_reservation
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
move_reservation(service_user, service_identity, reservation_key, shift_name)
@rest("/common/restaurant/reservations/notified", "post")
@returns(NoneType)
@arguments(reservation_key=unicode)
def reservation_cancelled_notified(reservation_key):
from solutions.common.bizz.reservation import cancel_reservation
service_user = users.get_current_user()
cancel_reservation(service_user, reservation_key, True)
@rest("/common/restaurant/reservations/send_cancel_via_app", "post")
@returns(NoneType)
@arguments(reservation_keys=[unicode])
def reservation_send_cancel_via_app(reservation_keys):
from solutions.common.bizz.reservation import cancel_reservations
service_user = users.get_current_user()
cancel_reservations(service_user, reservation_keys)
@rest("/common/restaurant/reservations", "post")
@returns(unicode)
@arguments(year=int, month=int, day=int, hour=int, minute=int, name=unicode, people=int, comment=unicode, phone=unicode, force=bool)
def submit_reservation(year, month, day, hour, minute, name, people, comment, phone, force):
from solutions.common.bizz.reservation import reserve_table
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
return reserve_table(service_user, service_identity, None, get_epoch_from_datetime(datetime(year, month, day, hour, minute)), people, name, phone, comment, force)
@rest("/common/restaurant/reservation-stats", "get", read_only_access=True)
@returns(RestaurantReservationStatisticsTO)
@arguments(year=int, month=int, day=int)
def get_statistics(year, month, day):
from solutions.common.bizz.reservation import get_statistics as get_statistics_bizz
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
date = datetime(year, month, day)
return get_statistics_bizz(service_user, service_identity, date)
@rest("/common/restaurant/reservation/arrived", "post")
@returns(RestaurantReservationTO)
@arguments(reservation_key=unicode)
def toggle_reservation_arrived(reservation_key):
from solutions.common.bizz.reservation import toggle_reservation_arrived as toggle_reservation_arrived_bizz
reservation = toggle_reservation_arrived_bizz(users.get_current_user(), reservation_key)
return RestaurantReservationTO.fromReservation(reservation)
@rest("/common/restaurant/reservation/cancelled", "post")
@returns(RestaurantReservationTO)
@arguments(reservation_key=unicode)
def toggle_reservation_cancelled(reservation_key):
from solutions.common.bizz.reservation import toggle_reservation_cancelled as toggle_reservation_cancelled_bizz
reservation = toggle_reservation_cancelled_bizz(users.get_current_user(), reservation_key)
return RestaurantReservationTO.fromReservation(reservation)
@rest("/common/restaurant/reservation/edit", "post")
@returns(unicode)
@arguments(reservation_key=unicode, people=int, comment=unicode, force=bool, new_date=TimestampTO)
def edit_reservation(reservation_key, people, comment, force=True, new_date=None):
from solutions.common.bizz.reservation import edit_reservation as edit_reservation_bizz
new_epoch = 0
if new_date:
new_epoch = get_epoch_from_datetime(datetime(new_date.year, new_date.month, new_date.day, new_date.hour, new_date.minute))
return edit_reservation_bizz(users.get_current_user(), reservation_key, people, comment, force, True if new_date else False, new_epoch)
@rest("/common/restaurant/reservation/edit_tables", "post")
@returns(ReturnStatusTO)
@arguments(reservation_key=unicode, tables=[(int, long)])
def edit_reservation_tables(reservation_key, tables):
from solutions.common.bizz.reservation import edit_reservation_tables as edit_reservation_tables_bizz
try:
edit_reservation_tables_bizz(users.get_current_user(), reservation_key, tables)
return RETURNSTATUS_TO_SUCCESS
except BusinessException, e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/restaurant/reservation/reply", "post")
@returns(ReturnStatusTO)
@arguments(email=unicode, app_id=unicode, message=unicode, reservation_key=unicode)
def reply_reservation(email, app_id, message, reservation_key=None):
from solutions.common.bizz.reservation import reply_reservation as reply_reservation_bizz
try:
if reservation_key is MISSING:
reservation_key = None
reply_reservation_bizz(users.get_current_user(), email, app_id, message, reservation_key)
return RETURNSTATUS_TO_SUCCESS
except BusinessException, e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/restaurant/settings/tables/add", "post")
@returns(ReturnStatusTO)
@arguments(table=TableTO)
def add_table(table):
from solutions.common.bizz.reservation import add_table as add_table_bizz
try:
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
add_table_bizz(service_user, service_identity, table)
return RETURNSTATUS_TO_SUCCESS
except BusinessException, e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/restaurant/settings/tables/update", "post")
@returns(ReturnStatusTO)
@arguments(table=TableTO)
def update_table(table):
from solutions.common.bizz.reservation import update_table as update_table_bizz
try:
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
update_table_bizz(service_user, service_identity, table)
return RETURNSTATUS_TO_SUCCESS
except BusinessException, e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/restaurant/settings/tables/delete", "post")
@returns(DeleteTableStatusTO)
@arguments(table_id=(int, long), force=bool)
def delete_table(table_id, force):
from solutions.common.bizz.reservation import get_shift_by_datetime, delete_table as delete_table_bizz
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
dtsTO = DeleteTableStatusTO()
status, reservations = delete_table_bizz(service_user, service_identity, table_id, force)
dtsTO.success = status
dtsTO.reservations = list()
if not status:
for r in reservations:
dtrTO = DeleteTableReservationTO()
dtrTO.reservation = RestaurantReservationTO.fromReservation(r)
shift, start_time = get_shift_by_datetime(service_user, service_identity, r.date)
if shift:
details = RestaurantShiftDetailsTO()
details.shift = RestaurantShiftTO.fromShift(shift)
details.start_time = TimestampTO.fromDatetime(start_time)
details.reservations = list()
for reservation in get_restaurant_reservation(service_user, service_identity, start_time):
details.reservations.append(RestaurantReservationTO.fromReservation(reservation))
dtrTO.shift = details
else:
dtrTO.shift = None
dtsTO.reservations.append(dtrTO)
return dtsTO
| apache-2.0 | -8,618,700,201,135,557,000 | 46.828244 | 166 | 0.743676 | false |
os-cloud-storage/openstack-workload-disaster-recovery | dragon/openstack/common/processutils.py | 1 | 9182 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
from dragon.openstack.common.gettextutils import _
from dragon.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""
Helper method to shell out and execute a command through subprocess with
optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type proces_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
'helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""
A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError, exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
| apache-2.0 | -8,191,133,469,897,886,000 | 36.174089 | 79 | 0.574167 | false |
jinzekid/codehub | python/code_snippet/DataAnalysis/1.py | 1 | 1107 | # -*- encoding:utf-8 -*-
import jieba.analyse
from os import path
from scipy.misc import imread
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
list_test = [1,2,3,4,5]
for i in list_test:
print(i)
if __name__ == "__main__":
mpl.rcParams['font.sans-serif'] = ['FangSong']
#mpl.rcParams['axes.unicode_minus'] = False
content = open("testing.txt","rb").read()
# tags extraction based on TF-IDF algorithm
tags = jieba.analyse.extract_tags(content, topK=100, withWeight=False)
text =" ".join(tags)
#text = unicode(text)
# read the mask
d = path.dirname(__file__)
trump_coloring = imread(path.join(d, "Trump.jpg"))
wc = WordCloud(font_path='simsun.ttc',
background_color="white", max_words=300, mask=trump_coloring,
max_font_size=40, random_state=42)
# generate word cloud
wc.generate(text)
# generate color from image
image_colors = ImageColorGenerator(trump_coloring)
plt.imshow(wc)
plt.axis("off")
plt.show()
| gpl-3.0 | 6,991,800,573,141,162,000 | 24.744186 | 74 | 0.65673 | false |
J77D/fallout-terminal | fallout_hack.py | 1 | 7441 | import curses
import random
import os
from fallout_functions import slowWrite
from fallout_functions import upperInput
################## text strings ######################
HEADER_TEXT = 'ROBCO INDUSTRIES (TM) TERMLINK PROTOCOL'
################## global "constants" ################
# number of characters for hex digits and spaces
CONST_CHARS = 16
# position of the attempt squares
SQUARE_X = 19
SQUARE_Y = 3
LOGIN_ATTEMPTS = 4
HEADER_LINES = 5
# amount of time to pause after correct password input
LOGIN_PAUSE = 3000
# starting number for hex generation
START_HEX = 0xf650
# list of possible symbols for password hiding
SYMBOLS = '!@#$%^*()_-+={}[]|\\:;\'",<>./?'
################## functions #########################
def generateHex(n):
"""
generates n numbers starting at START_HEX and increasing by 12 each time
"""
num = START_HEX
list = []
for i in xrange(n):
list.append(num)
num += 12
return list
def getSymbols(n):
"""
return n random symbols
"""
count = len(SYMBOLS)
result = ""
for i in xrange(n):
result += SYMBOLS[random.randint(0, count - 1)]
return result
def getPasswords():
"""
Returns an array of strings to be used as the password and the decoys
"""
groups = []
# script file / password file location
__location__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
# read from passwords.txt
with open(os.path.join(__location__, "passwords.txt")) as pwfile:
for line in pwfile:
if not line.strip():
groups.append([])
elif len(groups) > 0:
groups[len(groups) - 1].append(line[:-1])
passwords = groups[random.randint(0, len(groups) - 1)]
random.shuffle(passwords)
return passwords
def getFiller(length, passwords):
"""
Return a string of symbols with potential passwords mixed in
length - the length of the string to create
passwords - an array of passwords to hide in the symbols
"""
filler = getSymbols(length)
# add the passwords to the symbols
pwdLen = len(passwords[0])
pwdCount = len(passwords)
i = 0
for pwd in passwords:
# skip a distance based on total size to cover then place a password
maxSkip = length / pwdCount - pwdLen
i += random.randint(maxSkip - 2, maxSkip)
filler = filler[:i] + pwd + filler[i + pwdLen:]
i += pwdLen
return filler
def initScreen(scr):
"""
Fill the screen to prepare for password entry
scr - curses window returned from curses.initscr()
"""
size = scr.getmaxyx()
height = size[0]
width = size[1]
fillerHeight = height - HEADER_LINES
hexes = generateHex(fillerHeight * 2)
hexCol1 = hexes[:fillerHeight]
hexCol2 = hexes[fillerHeight:]
# generate the symbols and passwords
fillerLength = width / 2 * fillerHeight
passwords = getPasswords()
filler = getFiller(fillerLength, passwords)
fillerCol1 = filler[:len(filler) / 2]
fillerCol2 = filler[len(filler) / 2:]
# each column of symbols and passwords should be 1/4 of the screen
fillerWidth = width / 4
# print the header stuff
slowWrite(scr, HEADER_TEXT)
slowWrite(scr, '\nENTER PASSWORD NOW\n\n')
slowWrite(scr, str(LOGIN_ATTEMPTS) + ' ATTEMPT(S) LEFT: ')
for i in xrange(LOGIN_ATTEMPTS):
scr.addch(curses.ACS_BLOCK)
slowWrite(scr, ' ')
slowWrite(scr, '\n\n')
# print the hex and filler
for i in xrange(fillerHeight):
slowWrite(scr, "0x%X %s" % (hexCol1[i], fillerCol1[i * fillerWidth: (i + 1) * fillerWidth]), 1)
if i < fillerHeight - 1:
scr.addstr('\n')
for i in xrange(fillerHeight):
scr.move(HEADER_LINES + i, CONST_CHARS / 2 + fillerWidth)
slowWrite(scr, '0x%X %s' % (hexCol2[i], fillerCol2[i * fillerWidth: (i + 1) * fillerWidth]), 1)
scr.refresh()
return passwords
def moveInput(scr, inputPad):
"""
moves the input pad to display all text then a blank line then the cursor
"""
size = scr.getmaxyx()
height = size[0]
width = size[1]
inputPad.addstr('\n>')
# cursor position relative to inputPad
cursorPos = inputPad.getyx()
inputPad.refresh(0, 0,
height - cursorPos[0] - 1,
width / 2 + CONST_CHARS,
height - 1,
width - 1)
def userInput(scr, passwords):
"""
let the user attempt to crack the password
scr - curses window returned from curses.initscr()
passwords - array of passwords hidden in the symbols
"""
size = scr.getmaxyx()
height = size[0]
width = size[1]
# set up a pad for user input
inputPad = curses.newpad(height, width / 2 + CONST_CHARS)
attempts = LOGIN_ATTEMPTS
# randomly pick a password from the list
pwd = passwords[random.randint(0, len(passwords) - 1)]
curses.noecho()
while attempts > 0:
# move the curser to the correct spot for typing
scr.move(height - 1, width / 2 + CONST_CHARS + 1)
# scroll user input up as the user tries passwords
moveInput(scr, inputPad)
guess = upperInput(scr, False, False)
cursorPos = inputPad.getyx()
# write under the last line of text
inputPad.move(cursorPos[0] - 1, cursorPos[1] - 1)
inputPad.addstr('>' + guess.upper() + '\n')
# user got password right
if guess.upper() == pwd.upper():
inputPad.addstr('>Exact match!\n')
inputPad.addstr('>Please wait\n')
inputPad.addstr('>while system\n')
inputPad.addstr('>is accessed.\n')
moveInput(scr, inputPad)
curses.napms(LOGIN_PAUSE)
return pwd
# wrong password
else:
pwdLen = len(pwd)
matched = 0
try:
for i in xrange(pwdLen):
if pwd[i].upper() == guess[i].upper():
matched += 1
except IndexError:
pass # user did not enter enough letters
inputPad.addstr('>Entry denied\n')
inputPad.addstr('>' + str(matched) + '/' + str(pwdLen) +
' correct.\n')
attempts -= 1
# show remaining attempts
scr.move(SQUARE_Y, 0)
scr.addstr(str(attempts))
scr.move(SQUARE_Y, SQUARE_X)
for i in xrange(LOGIN_ATTEMPTS):
if i < attempts:
scr.addch(curses.ACS_BLOCK)
else:
scr.addstr(' ')
scr.addstr(' ')
# Out of attempts
return None
def runLogin(scr):
"""
Start the login portion of the terminal
Returns the password if the user correctly guesses it
"""
curses.use_default_colors()
size = scr.getmaxyx()
width = size[1]
height = size[0]
random.seed()
# set screen to initial position
scr.erase()
scr.move(0, 0)
passwords = initScreen(scr)
return userInput(scr, passwords)
def beginLogin():
"""
Initialize curses and start the login process
Returns the password if the user correctly guesses it
"""
return curses.wrapper(runLogin)
| mit | -8,222,715,190,287,671,000 | 26.356618 | 103 | 0.57452 | false |
rozim/KaggleFindingElo | generate-game2json.py | 1 | 1513 | #!/usr/bin/python
import chess.pgn
import sys
import cjson
import time
def StudyGame(game):
headers = game.headers
node = game
ply = 0
positions = []
while True:
board = node.board()
node = node.variations[0]
p = {'ply': ply,
'num_legal_moves': len(board.legal_moves),
'san': node.san(),
'move': str(node.move),
'fen': board.fen()}
if board.is_check():
p['in_check'] = True
positions.append(p)
ply += 1
if not node.variations:
break
last_board = node.board()
g = {
'event': headers['Event'],
'game_ply': ply,
'result': headers['Result'],
'positions': positions,
'is_mate': last_board.is_checkmate(),
'is_stalemate': last_board.is_stalemate()
}
if 'WhiteElo' in headers:
g['white_elo'] = int(headers['WhiteElo'])
g['black_elo'] = int(headers['BlackElo'])
return g
t0 = time.time()
for fn in sys.argv[1:]:
f = file(fn)
n = 0
mod = 1
while True:
game = chess.pgn.read_game(f)
if game is None:
break
g = StudyGame(game)
with file('generated/game2json/%05d.json' % int(g['event']), 'w') as cur_f:
cur_f.write(cjson.encode(g))
n += 1
if n % mod == 0:
print "%6d %.1f" % (n, time.time() - t0)
sys.stdout.flush()
mod *= 2
| mit | 2,885,319,990,388,454,400 | 23.803279 | 83 | 0.484468 | false |
bartdag/recodoc2 | recodoc2/apps/doc/management/commands/parsedoc.py | 1 | 1160 | from __future__ import unicode_literals
from django.core.management.base import NoArgsCommand
from doc.actions import parse_doc
from optparse import make_option
from docutil.commands_util import recocommand
from docutil.str_util import smart_decode
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--pname', action='store', dest='pname',
default='-1', help='Project unix name'),
make_option('--dname', action='store', dest='dname',
default='-1', help='Document name'),
make_option('--release', action='store', dest='release',
default='-1', help='Project Release'),
make_option('--skip_refs', action='store_true', dest='skip_refs',
default=False, help='Skip code reference identification'),
)
help = "Parse document model"
@recocommand
def handle_noargs(self, **options):
pname = smart_decode(options.get('pname'))
dname = smart_decode(options.get('dname'))
release = smart_decode(options.get('release'))
skip = options.get('skip_refs')
parse_doc(pname, dname, release, not skip)
| bsd-3-clause | 3,877,381,356,536,306,000 | 40.428571 | 73 | 0.650862 | false |
FrancoisRheaultUS/dipy | doc/examples/denoise_nlmeans.py | 5 | 2864 | """
==============================================
Denoise images using Non-Local Means (NLMEANS)
==============================================
Using the non-local means filter [Coupe08]_ and [Coupe11]_ and you can denoise
3D or 4D images and boost the SNR of your datasets. You can also decide between
modeling the noise as Gaussian or Rician (default).
"""
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
from time import time
from dipy.denoise.nlmeans import nlmeans
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.data import get_fnames
from dipy.io.image import load_nifti
dwi_fname, dwi_bval_fname, dwi_bvec_fname = get_fnames('sherbrooke_3shell')
data, affine = load_nifti(dwi_fname)
mask = data[..., 0] > 80
# We select only one volume for the example to run quickly.
data = data[..., 1]
print("vol size", data.shape)
# lets create a noisy data with Gaussian data
"""
In order to call ``non_local_means`` first you need to estimate the standard
deviation of the noise. We use N=4 since the Sherbrooke dataset was acquired
on a 1.5T Siemens scanner with a 4 array head coil.
"""
sigma = estimate_sigma(data, N=4)
t = time()
"""
Calling the main function ``non_local_means``
"""
t = time()
den = nlmeans(data, sigma=sigma, mask=mask, patch_radius=1,
block_radius=1, rician=True)
print("total time", time() - t)
"""
Let us plot the axial slice of the denoised output
"""
axial_middle = data.shape[2] // 2
before = data[:, :, axial_middle].T
after = den[:, :, axial_middle].T
difference = np.abs(after.astype(np.float64) - before.astype(np.float64))
difference[~mask[:, :, axial_middle].T] = 0
fig, ax = plt.subplots(1, 3)
ax[0].imshow(before, cmap='gray', origin='lower')
ax[0].set_title('before')
ax[1].imshow(after, cmap='gray', origin='lower')
ax[1].set_title('after')
ax[2].imshow(difference, cmap='gray', origin='lower')
ax[2].set_title('difference')
plt.savefig('denoised.png', bbox_inches='tight')
"""
.. figure:: denoised.png
:align: center
**Showing axial slice before (left) and after (right) NLMEANS denoising**
"""
nib.save(nib.Nifti1Image(den, affine), 'denoised.nii.gz')
"""
An improved version of non-local means denoising is adaptive soft coefficient
matching, please refer to :ref:`example_denoise_ascm` for more details.
References
----------
.. [Coupe08] P. Coupe, P. Yger, S. Prima, P. Hellier, C. Kervrann, C. Barillot,
"An Optimized Blockwise Non Local Means Denoising Filter for 3D Magnetic
Resonance Images", IEEE Transactions on Medical Imaging, 27(4):425-441, 2008
.. [Coupe11] Pierrick Coupe, Jose Manjon, Montserrat Robles, Louis Collins.
"Adaptive Multiresolution Non-Local Means Filter for 3D MR Image Denoising"
IET Image Processing, Institution of Engineering and Technology, 2011
.. include:: ../links_names.inc
"""
| bsd-3-clause | 7,466,975,111,948,228,000 | 26.538462 | 79 | 0.688198 | false |
contractvm/libcontractvm | libcontractvm/WalletExplorer.py | 1 | 3035 | # Copyright (c) 2015 Davide Gessa
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from pycoin.networks import *
from pycoin.key import Key
from pycoin.key.BIP32Node import BIP32Node
from pycoin import encoding
from pycoin.ecdsa import is_public_pair_valid, generator_secp256k1, public_pair_for_x, secp256k1
from pycoin.serialize import b2h, h2b
from pycoin.tx import *
from pycoin.tx.tx_utils import sign_tx, create_tx
from pycoin.tx.Spendable import Spendable
from pycoin.tx.TxOut import TxOut
from pycoin.tx.script import tools
from pycoin.encoding import bitcoin_address_to_hash160_sec, is_sec_compressed, public_pair_to_sec, secret_exponent_to_wif, public_pair_to_bitcoin_address, wif_to_tuple_of_secret_exponent_compressed, sec_to_public_pair, public_pair_to_hash160_sec, wif_to_secret_exponent
from pycoin.tx.pay_to import address_for_pay_to_script, build_hash160_lookup
import logging
import json
import requests
import binascii
import random
import time
from libcontractvm import Wallet
from . import Log
logger = logging.getLogger('libcontractvm')
class WalletExplorer (Wallet.Wallet):
def __init__ (self, chain = 'XTN', address = None, wif = None, wallet_file = None):
self.lockedspendables = []
super (WalletExplorer, self).__init__ (chain, address, wif, wallet_file)
def _chaincodeToChainSoName (self, code):
if self.chain == 'XTN':
code = 'BTCTEST'
elif self.chain == 'XDT':
code = 'DOGETEST'
elif self.chain == 'XLT':
code = 'LTCTEST'
else:
code = self.chain
return code
def _spendables (self, value):
code = self._chaincodeToChainSoName (self.chain)
u = 'https://chain.so/api/v2/get_tx_unspent/'+code+'/'+self.address
#print (u)
while True:
try:
d = requests.get (u, headers={'content-type': 'application/json'}).json()
except:
time.sleep (5)
continue
sps = []
tot = 0
random.shuffle (d['data']['txs'])
for s in d['data']['txs']:
#if int (s['confirmations']) > 0:
txid = s['txid'] #''
#for x in range (len (s['txid']), -2, -2):
# txid += s['txid'][x:x+2]
if (txid+':'+str (s['output_no'])) in self.lockedspendables:
#print ('Locked spendable')
continue
tot += int (float (s['value']) * 100000000)
sps.append (Spendable.from_dict ({'coin_value': int (float (s['value']) * 100000000),
'script_hex': s['script_hex'], 'tx_hash_hex': txid, 'tx_out_index': int (s['output_no'])}))
self.lockedspendables.append (txid+':'+str (s['output_no']))
if tot >= value:
#print (sps)
return sps
return sps
def getBalance (self):
code = self._chaincodeToChainSoName (self.chain)
u = 'https://chain.so/api/v2/get_address_balance/'+code+'/'+self.address
while True:
try:
d = requests.get (u, headers={'content-type': 'application/json'}).json()
except:
time.sleep (5)
continue
return float (d['data']['confirmed_balance']) + float (d['data']['unconfirmed_balance'])
| mit | -7,089,695,856,140,569,000 | 29.049505 | 269 | 0.679407 | false |
vividvilla/olaf | olaf/__init__.py | 1 | 3809 | # -*- coding: utf-8 -*-
"""
Olaf
~~~~~~~
Flask main app
:copyright: (c) 2015 by Vivek R.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import shutil
import click
default_theme = 'basic'
module_path = os.path.dirname(os.path.abspath(__file__))
contents_dir = '_contents'
posts_dir = 'posts'
pages_dir = 'pages'
content_extension = '.md'
def get_current_dir():
return os.getcwd()
def is_valid_path(path):
"""
check if path exists
"""
if not os.path.exists(path):
click.secho(
'path "{}" does not exist'.format(path), fg='red')
raise OSError('path "{}" does not exist'.format(path))
return True
def is_valid_site():
"""
check if the current path is a valid site directory
"""
config_path = os.path.join(get_current_dir(), 'config.py')
# check if inside site directory
if not os.path.exists(config_path):
click.secho(
'Cannot find config file, please make sure'
' you are inside the site directory', fg='red')
raise OSError('Cannot find config file, please make sure'
' you are inside the site directory')
return True
def get_themes_list(path):
"""
Get list of themes from a given themes path
"""
if not os.path.exists(path):
child_dir = []
else:
child_dir = os.walk(path).next()[1]
valid_themes = []
for dir in child_dir:
if (os.listdir(os.path.join(path, dir))
and not dir.startswith('.')):
valid_themes.append(
dict(name=dir, path=os.path.join(path, dir)))
return valid_themes
def get_theme_by_name(theme):
# get list of inbuilt themes
inbuilt_themes = get_themes_list(os.path.join(module_path, 'themes'))
# get list of custom themes
custom_themes = get_themes_list(os.path.join(get_current_dir(), 'themes'))
# check for theme in inbuilt themes directory
theme_exists_in_inbuilt = [
item['name'] for item in inbuilt_themes if item['name'] == theme]
# check for theme in custom themes directory
theme_exists_in_custom = [
item['name'] for item in custom_themes if item['name'] == theme]
theme_path = None
if theme_exists_in_inbuilt:
# If theme in bundled themes list then get from default themes directory
theme_path = os.path.join(module_path, 'themes', theme)
elif theme_exists_in_custom:
# If theme not found in bundled themes then get from sites directory
theme_path = os.path.join(get_current_dir(), 'themes', theme)
return theme_path
def get_default_theme_name(theme):
"""
get theme from config or set it default
"""
# return theme name if its set via commandline argument
if theme:
return theme
# load config file
config_path = os.path.join(get_current_dir(), 'config.py')
sys.path.append(os.path.dirname(os.path.expanduser(config_path)))
import config
# If theme specified as option then ignore other rules
# else get from config file, if not found in config file set default theme
if config.SITE.get('theme'):
return config.SITE['theme']
else:
return default_theme
def create_project_site(project_name):
try:
# create project directory
os.mkdir(os.path.join(get_current_dir(), project_name))
except OSError:
raise
try:
# copy config file
shutil.copyfile(
os.path.join(module_path, 'config-sample.py'),
os.path.join(get_current_dir(), project_name, 'config.py'))
except IOError:
raise
try:
# create init file
open(
os.path.join(get_current_dir(), project_name, '__init__.py'), 'a'
).close()
# disqus file
open(
os.path.join(get_current_dir(), project_name, 'disqus.html'), 'a'
).close()
# create contents directory
os.mkdir(os.path.join(get_current_dir(), project_name, contents_dir))
os.mkdir(
os.path.join(get_current_dir(), project_name, contents_dir, posts_dir))
os.mkdir(
os.path.join(get_current_dir(), project_name, contents_dir, pages_dir))
except OSError:
raise
return True
| mit | -7,437,390,118,091,616,000 | 23.416667 | 75 | 0.688107 | false |
mefly2012/platform | src/clean_validate/qyxx_hzp_pro_prod_cert.py | 1 | 3642 | # -*- coding: utf-8 -*-
import sys
import re
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
class qyxx_hzp_pro_prod_cert():
"""中标"""
need_check_ziduan = [
u'bbd_dotime',
u'company_name',
u'location',
u'produce_address',
u'issue_date',
u'validdate',
u'certificate_no',
u'details',
u'bbd_url',
u'province',
u'product_name',
u'bbd_source'
]
def check_bbd_dotime(self, indexstr, ustr):
"""dotime 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.bbd_dotime_date_format(ustr):
ret = u"不合法日期"
return ret
def check_company_name(self, indexstr, ustr):
"""企业名称 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if not public.has_count_hz(ustr, 2):
ret = u'没有2个汉字'
else:
ret = u'为空'
return ret
def check_location(self, indexstr, ustr):
"""住所 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if not public.has_count_hz(ustr, 2):
ret = u'没有2个汉字'
else:
ret = u'为空'
return ret
def check_produce_address(self, indexstr, ustr):
"""生产地址 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.has_count_hz(ustr, 2):
ret = u'没有2个汉字'
return ret
def check_issue_date(self, indexstr, ustr):
"""发证日期 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.date_format(ustr):
ret = u"不合法日期"
else:
ret = u'为空'
return ret
def check_validdate(self, indexstr, ustr):
"""有效期 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.date_format(ustr):
ret = u"不合法日期"
else:
ret = u'为空'
return ret
def check_certificate_no(self, indexstr, ustr):
"""证书编号/许可证编号 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
ret = None
# if not re.compile(u'^XK\d{2}-\d{3} \d{4}$').match(ustr):
# ret = u'不符合格式'
else:
ret = u'为空'
return ret
def check_details(self, indexstr, ustr):
"""明细 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if not public.has_count_hz(ustr, 2):
ret = u'没有2个汉字'
else:
ret = u'为空'
return ret
def check_bbd_url(self, indexstr, ustr):
"""url 清洗验证"""
ret = None
return ret
def check_province(self, indexstr, ustr):
"""省份 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if ustr not in public.PROVINCE:
ret = u'不是合法省份'
else:
ret = u'为空'
return ret
def check_product_name(self, indexstr, ustr):
"""产品名称 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if not re.compile(u'^[\u4e00-\u9fa5]{1,}$').match(ustr):
ret = u'不纯汉字'
else:
ret = u'为空'
return ret
def check_bbd_source(self, indexstr, ustr):
"""数据源 清洗验证"""
ret = None
return ret
| apache-2.0 | 4,407,941,176,787,903,000 | 23.529412 | 70 | 0.476019 | false |
slgobinath/SafeEyes | safeeyes/rpc.py | 1 | 3412 | #!/usr/bin/env python
# Safe Eyes is a utility to remind you to take break frequently
# to protect your eyes from eye strain.
# Copyright (C) 2017 Gobinath
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
RPC server and client implementation.
"""
import logging
from threading import Thread
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.client import ServerProxy
class RPCServer:
"""
An aynchronous RPC server.
"""
def __init__(self, port, context):
self.__running = False
logging.info('Setting up an RPC server on port %d', port)
self.__server = SimpleXMLRPCServer(("localhost", port), logRequests=False, allow_none=True)
self.__server.register_function(context['api']['show_settings'], 'show_settings')
self.__server.register_function(context['api']['show_about'], 'show_about')
self.__server.register_function(context['api']['enable_safeeyes'], 'enable_safeeyes')
self.__server.register_function(context['api']['disable_safeeyes'], 'disable_safeeyes')
self.__server.register_function(context['api']['take_break'], 'take_break')
self.__server.register_function(context['api']['status'], 'status')
self.__server.register_function(context['api']['quit'], 'quit')
def start(self):
"""
Start the RPC server.
"""
if not self.__running:
self.__running = True
logging.info('Start the RPC server')
server_thread = Thread(target=self.__server.serve_forever)
server_thread.start()
def stop(self):
"""
Stop the server.
"""
if self.__running:
logging.info('Stop the RPC server')
self.__running = False
self.__server.shutdown()
class RPCClient:
"""
An RPC client to communicate with the RPC server.
"""
def __init__(self, port):
self.port = port
self.proxy = ServerProxy('http://localhost:%d/' % self.port, allow_none=True)
def show_settings(self):
"""
Show the settings dialog.
"""
self.proxy.show_settings()
def show_about(self):
"""
Show the about dialog.
"""
self.proxy.show_about()
def enable_safeeyes(self):
"""
Enable Safe Eyes.
"""
self.proxy.enable_safeeyes()
def disable_safeeyes(self):
"""
Disable Safe Eyes.
"""
self.proxy.disable_safeeyes(None)
def take_break(self):
"""
Take a break now.
"""
self.proxy.take_break()
def status(self):
"""
Return the status of Safe Eyes
"""
return self.proxy.status()
def quit(self):
"""
Quit Safe Eyes.
"""
self.proxy.quit()
| gpl-3.0 | -7,180,652,859,256,321,000 | 29.19469 | 99 | 0.611079 | false |
Kagami/kisa | lib/twisted/internet/test/reactormixins.py | 1 | 8879 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTime}.
"""
__metaclass__ = type
import signal
from twisted.internet.defer import TimeoutError
from twisted.trial.unittest import TestCase, SkipTest
from twisted.python.runtime import platform
from twisted.python.reflect import namedAny, fullyQualifiedName
from twisted.python import log
from twisted.python.failure import Failure
# Access private APIs.
if platform.isWindows():
process = None
else:
from twisted.internet import process
class ReactorBuilder:
"""
L{TestCase} mixin which provides a reactor-creation API. This mixin
defines C{setUp} and C{tearDown}, so mix it in before L{TestCase} or call
its methods from the overridden ones in the subclass.
@cvar skippedReactors: A dict mapping FQPN strings of reactors for
which the tests defined by this class will be skipped to strings
giving the skip message.
@cvar requiredInterfaces: A C{list} of interfaces which the reactor must
provide or these tests will be skipped. The default, C{None}, means
that no interfaces are required.
@ivar reactorFactory: A no-argument callable which returns the reactor to
use for testing.
@ivar originalHandler: The SIGCHLD handler which was installed when setUp
ran and which will be re-installed when tearDown runs.
@ivar _reactors: A list of FQPN strings giving the reactors for which
TestCases will be created.
"""
_reactors = [
# Select works everywhere
"twisted.internet.selectreactor.SelectReactor",
]
if platform.isWindows():
# PortableGtkReactor is only really interesting on Windows,
# but not really Windows specific; if you want you can
# temporarily move this up to the all-platforms list to test
# it on other platforms. It's not there in general because
# it's not _really_ worth it to support on other platforms,
# since no one really wants to use it on other platforms.
_reactors.extend([
"twisted.internet.gtk2reactor.PortableGtkReactor",
"twisted.internet.win32eventreactor.Win32Reactor",
"twisted.internet.iocpreactor.reactor.IOCPReactor"])
else:
_reactors.extend([
"twisted.internet.glib2reactor.Glib2Reactor",
"twisted.internet.gtk2reactor.Gtk2Reactor",
"twisted.internet.kqreactor.KQueueReactor"])
if platform.isMacOSX():
_reactors.append("twisted.internet.cfreactor.CFReactor")
else:
_reactors.extend([
"twisted.internet.pollreactor.PollReactor",
"twisted.internet.epollreactor.EPollReactor"])
reactorFactory = None
originalHandler = None
requiredInterfaces = None
skippedReactors = {}
def setUp(self):
"""
Clear the SIGCHLD handler, if there is one, to ensure an environment
like the one which exists prior to a call to L{reactor.run}.
"""
if not platform.isWindows():
self.originalHandler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def tearDown(self):
"""
Restore the original SIGCHLD handler and reap processes as long as
there seem to be any remaining.
"""
if self.originalHandler is not None:
signal.signal(signal.SIGCHLD, self.originalHandler)
if process is not None:
while process.reapProcessHandlers:
log.msg(
"ReactorBuilder.tearDown reaping some processes %r" % (
process.reapProcessHandlers,))
process.reapAllProcesses()
def unbuildReactor(self, reactor):
"""
Clean up any resources which may have been allocated for the given
reactor by its creation or by a test which used it.
"""
# Chris says:
#
# XXX These explicit calls to clean up the waker (and any other
# internal readers) should become obsolete when bug #3063 is
# fixed. -radix, 2008-02-29. Fortunately it should probably cause an
# error when bug #3063 is fixed, so it should be removed in the same
# branch that fixes it.
#
# -exarkun
reactor._uninstallHandler()
if getattr(reactor, '_internalReaders', None) is not None:
for reader in reactor._internalReaders:
reactor.removeReader(reader)
reader.connectionLost(None)
reactor._internalReaders.clear()
# Here's an extra thing unrelated to wakers but necessary for
# cleaning up after the reactors we make. -exarkun
reactor.disconnectAll()
# It would also be bad if any timed calls left over were allowed to
# run.
calls = reactor.getDelayedCalls()
for c in calls:
c.cancel()
def buildReactor(self):
"""
Create and return a reactor using C{self.reactorFactory}.
"""
try:
from twisted.internet.cfreactor import CFReactor
from twisted.internet import reactor as globalReactor
except ImportError:
pass
else:
if (isinstance(globalReactor, CFReactor)
and self.reactorFactory is CFReactor):
raise SkipTest(
"CFReactor uses APIs which manipulate global state, "
"so it's not safe to run its own reactor-builder tests "
"under itself")
try:
reactor = self.reactorFactory()
except:
# Unfortunately, not all errors which result in a reactor
# being unusable are detectable without actually
# instantiating the reactor. So we catch some more here
# and skip the test if necessary. We also log it to aid
# with debugging, but flush the logged error so the test
# doesn't fail.
log.err(None, "Failed to install reactor")
self.flushLoggedErrors()
raise SkipTest(Failure().getErrorMessage())
else:
if self.requiredInterfaces is not None:
missing = filter(
lambda required: not required.providedBy(reactor),
self.requiredInterfaces)
if missing:
self.unbuildReactor(reactor)
raise SkipTest("%s does not provide %s" % (
fullyQualifiedName(reactor.__class__),
",".join([fullyQualifiedName(x) for x in missing])))
self.addCleanup(self.unbuildReactor, reactor)
return reactor
def runReactor(self, reactor, timeout=None):
"""
Run the reactor for at most the given amount of time.
@param reactor: The reactor to run.
@type timeout: C{int} or C{float}
@param timeout: The maximum amount of time, specified in seconds, to
allow the reactor to run. If the reactor is still running after
this much time has elapsed, it will be stopped and an exception
raised. If C{None}, the default test method timeout imposed by
Trial will be used. This depends on the L{IReactorTime}
implementation of C{reactor} for correct operation.
@raise TimeoutError: If the reactor is still running after C{timeout}
seconds.
"""
if timeout is None:
timeout = self.getTimeout()
timedOut = []
def stop():
timedOut.append(None)
reactor.stop()
reactor.callLater(timeout, stop)
reactor.run()
if timedOut:
raise TimeoutError(
"reactor still running after %s seconds" % (timeout,))
def makeTestCaseClasses(cls):
"""
Create a L{TestCase} subclass which mixes in C{cls} for each known
reactor and return a dict mapping their names to them.
"""
classes = {}
for reactor in cls._reactors:
shortReactorName = reactor.split(".")[-1]
name = (cls.__name__ + "." + shortReactorName).replace(".", "_")
class testcase(cls, TestCase):
__module__ = cls.__module__
if reactor in cls.skippedReactors:
skip = cls.skippedReactors[reactor]
try:
reactorFactory = namedAny(reactor)
except:
skip = Failure().getErrorMessage()
testcase.__name__ = name
classes[testcase.__name__] = testcase
return classes
makeTestCaseClasses = classmethod(makeTestCaseClasses)
__all__ = ['ReactorBuilder']
| cc0-1.0 | 1,266,961,347,281,232,600 | 37.271552 | 80 | 0.611105 | false |
sagiss/sardana | src/sardana/taurus/qt/qtgui/extra_macroexecutor/macroparameterseditor/customeditors/senv.py | 1 | 14391 | #!/usr/bin/env python
##############################################################################
##
## This file is part of Sardana
##
## http://www.sardana-controls.org/
##
## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
## Sardana is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Sardana is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
from taurus.external.qt import Qt
from taurus import Database
from taurus.core.taurusbasetypes import TaurusElementType
from taurus.core.taurusdatabase import TaurusAttrInfo
from taurus.qt.qtgui.input import TaurusAttrListComboBox
from taurus.qt.qtgui.tree import TaurusDbTreeWidget
from taurus.qt.qtgui.resource import getThemeIcon
from sardana.taurus.qt.qtgui.extra_macroexecutor.macroparameterseditor.macroparameterseditor import MacroParametersEditor
from sardana.taurus.qt.qtgui.extra_macroexecutor.macroparameterseditor.parameditors import LineEditParam, ParamBase, ComboBoxParam, CheckBoxParam, DirPathParam, MSAttrListComboBoxParam
from sardana.taurus.qt.qtgui.extra_macroexecutor.macroparameterseditor.model import ParamEditorModel
from sardana.taurus.qt.qtgui.extra_macroexecutor.common import MSAttrListComboBox
class SenvEditor(Qt.QWidget, MacroParametersEditor):
def __init__(self, parent=None):
Qt.QWidget.__init__(self, parent)
MacroParametersEditor.__init__(self)
self.valueWidget = None
def initComponents(self):
self.setLayout(Qt.QFormLayout())
self.layout().addRow(Qt.QLabel("Setting environment variable:", self))
self.nameComboBox = ComboBoxParam(self)
self.nameComboBox.addItems(["ActiveMntGrp", "ExtraColumns", "JsonRecorder", "ScanFile", "ScanDir"])
self.nameComboBox.setEditable(True)
self.connect(self.nameComboBox, Qt.SIGNAL("currentIndexChanged(int)"), self.onNameComboBoxChanged)
self.layout().addRow("name:", self.nameComboBox)
nameIndex = self.model().index(0, 1, self.rootIndex())
self.nameComboBox.setIndex(nameIndex)
def setRootIndex(self, rootIndex):
self._rootIndex = rootIndex
self.initComponents()
def rootIndex(self):
return self._rootIndex
def model(self):
return self._model
def setModel(self, model):
self._model = model
if isinstance(model, ParamEditorModel):
self.setRootIndex(Qt.QModelIndex())
def onNameComboBoxChanged(self, index):
text = str(self.nameComboBox.currentText())
if self.valueWidget is not None:
label = self.layout().labelForField(self.valueWidget)
if label is not None:
self.layout().removeWidget(label)
label.setParent(None)
label = None
self.layout().removeWidget(self.valueWidget)
self.valueWidget.resetValue()
self.valueWidget.setParent(None)
self.valueWidget = None
self.valueWidget, label = getSenvValueEditor(text, self)
paramRepeatIndex = self.model().index(1, 0, self.rootIndex())
repeatIndex = paramRepeatIndex.child(0, 0)
valueIndex = repeatIndex.child(0, 1)
self.valueWidget.setIndex(valueIndex)
if label:
self.layout().addRow(label, self.valueWidget)
else:
self.layout().addRow(self.valueWidget)
def getSenvValueEditor(envName, parent):
"""Factory method, requires: string, and QWidget as a parent for returned editor.
Factory returns a tuple of widget and a label for it.
:return: (Qt.QWidget, str) """
label = "value:"
if envName == "ActiveMntGrp":
editor = MSAttrListComboBoxParam(parent)
editor.setUseParentModel(True)
editor.setModel("/MeasurementGroupList")
elif envName == "ExtraColumns":
editor = ExtraColumnsEditor(parent)
label = None
elif envName == "JsonRecorder":
editor = CheckBoxParam(parent)
elif envName == "ScanDir":
editor = DirPathParam(parent)
elif envName == "ScanFile":
editor = LineEditParam(parent)
else:
editor = LineEditParam(parent)
return editor, label
class ExtraColumnsEditor(ParamBase, Qt.QWidget):
def __init__(self, parent=None, paramModel=None):
ParamBase.__init__(self, paramModel)
Qt.QWidget.__init__(self, parent)
self.setLayout(Qt.QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
addNewColumnButton = Qt.QPushButton(getThemeIcon("list-add") , "Add new column...", self)
removeSelectedColumnsButton = Qt.QPushButton(getThemeIcon("list-remove") , "Remove selected...", self)
buttonsLayout = Qt.QHBoxLayout()
buttonsLayout.addWidget(addNewColumnButton)
buttonsLayout.addWidget(removeSelectedColumnsButton)
self.layout().addLayout(buttonsLayout)
self.extraColumnsTable = ExtraColumnsTable(self)
self.extraColumnsModel = ExtraColumnsModel()
self.extraColumnsTable.setModel(self.extraColumnsModel)
self.extraColumnsTable.setItemDelegate(ExtraColumnsDelegate(self.extraColumnsTable))
self.layout().addWidget(self.extraColumnsTable)
self.connect(addNewColumnButton, Qt.SIGNAL("clicked()"), self.onAddNewColumn)
self.connect(removeSelectedColumnsButton, Qt.SIGNAL("clicked()"), self.onRemoveSelectedColumns)
self.connect(self.extraColumnsModel, Qt.SIGNAL("dataChanged (const QModelIndex&,const QModelIndex&)"), self.onExtraColumnsChanged)
self.connect(self.extraColumnsModel, Qt.SIGNAL("modelReset()"), self.onExtraColumnsChanged)
def getValue(self):
return repr(self.extraColumnsTable.model().columns())
def setValue(self, value):
try:
columns = eval(value)
except:
columns = []
self.extraColumnsTable.setColumns(columns)
def onAddNewColumn(self):
self.extraColumnsTable.insertRows()
self.emit(Qt.SIGNAL("modelChanged()"))
def onRemoveSelectedColumns(self):
self.extraColumnsTable.removeRows()
self.emit(Qt.SIGNAL("modelChanged()"))
def onExtraColumnsChanged(self):
self.emit(Qt.SIGNAL("modelChanged()"))
class ExtraColumnsTable(Qt.QTableView):
def __init__(self, parent):
Qt.QTableView.__init__(self, parent)
self.setSelectionBehavior(Qt.QAbstractItemView.SelectRows)
self.setSelectionMode(Qt.QAbstractItemView.ExtendedSelection)
def setColumns(self, columns):
if columns == None: columns = []
self.model().setColumns(columns)
self.resizeColumnsToContents()
def insertRows(self):
self.model().insertRows(self.model().rowCount())
def removeRows(self):
rows = [index.row() for index in self.selectedIndexes()]
rows = list(set(rows))
rows.sort(reverse=True)
for row in rows:
self.model().removeRows(row)
class ExtraColumnsDelegate(Qt.QItemDelegate):
def __init__(self, parent=None):
Qt.QItemDelegate.__init__(self, parent)
db = Database()
self.host = db.getNormalName()
def createEditor(self, parent, option, index):
if index.column() == 1:
self.combo_attr_tree_widget = TaurusDbTreeWidget(perspective=TaurusElementType.Device)
self.combo_attr_tree_widget.setModel(self.host)
treeView = self.combo_attr_tree_widget.treeView()
qmodel = self.combo_attr_tree_widget.getQModel()
editor = Qt.QComboBox(parent)
editor.setModel(qmodel)
editor.setMaxVisibleItems(20)
editor.setView(treeView)
elif index.column() == 2:
editor = MSAttrListComboBox(parent)
editor.setUseParentModel(True)
editor.setModel("/InstrumentList")
else:
editor = Qt.QItemDelegate.createEditor(self, parent, option, index)
return editor
def setEditorData(self, editor, index):
if index.column() == 2:
text = Qt.from_qvariant(index.model().data(index, Qt.Qt.DisplayRole), str)
editor.setCurrentText(text)
else:
Qt.QItemDelegate.setEditorData(self, editor, index)
def setModelData(self, editor, model, index):
column = index.column()
if column == 1:
selectedItems = self.combo_attr_tree_widget.selectedItems()
if not len(selectedItems) == 1: return
taurusTreeAttributeItem = selectedItems[0]
itemData = taurusTreeAttributeItem.itemData()
if isinstance(itemData, TaurusAttrInfo):
model.setData(index, Qt.QVariant(itemData.fullName()))
elif column == 2:
model.setData(index, Qt.QVariant(editor.currentText()))
else:
Qt.QItemDelegate.setModelData(self, editor, model, index)
def sizeHint(self, option, index):
if index.column() == 0:
fm = option.fontMetrics
text = Qt.from_qvariant(index.model().data(index, Qt.Qt.DisplayRole), str)
document = Qt.QTextDocument()
document.setDefaultFont(option.font)
document.setHtml(text)
size = Qt.QSize(document.idealWidth() + 5, fm.height())
elif index.column() == 1:
editor = self.createEditor(self.parent(), option, index)
if editor is None:
size = Qt.QItemDelegate.sizeHint(self, option, index)
else:
size = editor.sizeHint()
editor.hide()
editor.setParent(None)
# editor.destroy()
else:
size = Qt.QItemDelegate.sizeHint(self, option, index)
return size
class ExtraColumnsModel(Qt.QAbstractTableModel):
def __init__(self, columns=None):
if columns is None: columns = []
Qt.QAbstractItemModel.__init__(self)
self.__columns = columns
def setColumns(self, columns):
self.__columns = columns
self.reset()
def columns(self):
return self.__columns
def rowCount(self, index=Qt.QModelIndex()):
return len(self.__columns)
def columnCount(self, index=Qt.QModelIndex()):
return 3
def data(self, index, role=Qt.Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < self.rowCount()):
return Qt.QVariant()
row = index.row()
column = index.column()
#Display Role
if role == Qt.Qt.DisplayRole:
if column == 0: return Qt.QVariant(Qt.QString(self.__columns[row]['label']))
elif column == 1: return Qt.QVariant(Qt.QString(self.__columns[row]['model']))
elif column == 2: return Qt.QVariant(Qt.QString(self.__columns[row]['instrument']))
return Qt.QVariant()
def headerData(self, section, orientation, role=Qt.Qt.DisplayRole):
if role == Qt.Qt.TextAlignmentRole:
if orientation == Qt.Qt.Horizontal:
return Qt.QVariant(int(Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter))
return Qt.QVariant(int(Qt.Qt.AlignRight | Qt.Qt.AlignVCenter))
if role != Qt.Qt.DisplayRole:
return Qt.QVariant()
#So this is DisplayRole...
if orientation == Qt.Qt.Horizontal:
if section == 0: return Qt.QVariant("Label")
elif section == 1: return Qt.QVariant("Attribute")
elif section == 2: return Qt.QVariant("Instrument")
return Qt.QVariant()
else:
return Qt.QVariant(Qt.QString.number(section + 1))
def flags(self, index):
flags = Qt.Qt.ItemIsEnabled | Qt.Qt.ItemIsSelectable
if index.isValid():
column = index.column()
if column in (0, 1, 2):
flags |= Qt.Qt.ItemIsEditable
return flags
def setData(self, index, value=None, role=Qt.Qt.EditRole):
if index.isValid() and (0 <= index.row() < self.rowCount()):
row = index.row()
column = index.column()
value = Qt.from_qvariant(value, str)
if column == 0: self.__columns[row]['label'] = value
elif column == 1: self.__columns[row]['model'] = value
elif column == 2: self.__columns[row]['instrument'] = value
self.emit(Qt.SIGNAL("dataChanged(QModelIndex,QModelIndex)"), index, index)
return True
return False
def insertRows(self, row, rows=1, parentindex=None):
if parentindex is None: parentindex = Qt.QModelIndex()
first = row
last = row + rows - 1
self.beginInsertRows(parentindex, first, last)
for row in range(first, last + 1):
self.insertRow(row)
self.endInsertRows()
return True
def insertRow(self, row, parentIndex=None):
self.__columns.insert(row, {'label':'', 'model':'', 'instrument':''})
def removeRows(self, row, rows=1, parentindex=None):
if parentindex is None: parentindex = Qt.QModelIndex()
first = row
last = row + rows - 1
self.beginRemoveRows(parentindex, first, last)
for row in range(first, last + 1):
self.removeRow(row)
self.endRemoveRows()
return True
def removeRow(self, row, parentIndex=None):
self.__columns.pop(row)
CUSTOM_EDITOR = SenvEditor
if __name__ == "__main__":
import sys
import taurus
from taurus.qt.qtgui.application import TaurusApplication
app = TaurusApplication(sys.argv)
args = app.get_command_line_args()
editor = SenvEditor()
macroServer = taurus.Device(args[0])
macroInfoObj = macroServer.getMacroInfoObj("senv")
macroNode = MacroNode()
editor.setMacroNode(macroNode)
editor.show()
sys.exit(app.exec_())
| lgpl-3.0 | 7,709,384,933,967,993,000 | 37.47861 | 184 | 0.637968 | false |
openvsip/openvsip | support/QMTest/database.py | 1 | 17953 | #
# Copyright (c) 2009 CodeSourcery
# Copyright (c) 2013 Stefan Seefeld
# All rights reserved.
#
# This file is part of OpenVSIP. It is made available under the
# license contained in the accompanying LICENSE.GPL file.
import os
import qm
from qm.fields import *
from qm.executable import *
from qm.extension import parse_descriptor
import qm.test.base
from qm.test.base import get_extension_class
from qm.test.test import *
from qm.test.resource import Resource
from qm.test.database import get_database
from qm.test.database import NoSuchTestError
from qm.test.parameter_database import ParameterDatabase
from qm.test.classes.explicit_suite import ExplicitSuite
from qm.test.classes.compilation_test import ExecutableTest
from qm.test.classes.compilation_test_database import CompilationTestDatabase
from qm.test.classes.compilation_test_database import CompilationTest as CTBase
from qm.test.classes.compiler_test import CompilerTest
from qm.test.classes.compiler_table import CompilerTable
from qm.test.classes.python import ExecTest as PythonExecTest
from qm.test.directory_suite import DirectorySuite
from qm.test.classes.command_host import CommandHost
from remote_host import RemoteHost
import dircache
########################################################################
# Classes
########################################################################
def _get_host(context, variable):
"""Get a host instance according to a particular context variable.
Return a default 'LocalHost' host if the variable is undefined.
'context' -- The context to read the host descriptor from.
'variable' -- The name to which the host descriptor is bound.
returns -- A Host instance.
"""
# This function is cloned from qm.test.classes.compilation_test !
target_desc = context.get(variable)
if target_desc is None:
target = LocalHost({})
else:
f = lambda n: get_extension_class(n, "host", get_database())
host_class, arguments = parse_descriptor(target_desc.strip(), f)
target = host_class(arguments)
return target
class CompilationTest(CTBase):
execute = BooleanField(computed="true")
def Run(self, context, result):
"""Determine from the context whether or not to execute
the compiled executable."""
self.execute = context.GetBoolean("CompilationTest.execute", True)
super(CompilationTest, self).Run(context, result)
def _GetEnvironment(self, context):
env = os.environ.copy()
paths = ':'.join(context.get('CompilerTest.library_dirs', '').split())
if paths:
ld_library_path = paths
alf_library_path = '.:' + paths
if 'LD_LIBRARY_PATH' in env:
ld_library_path += ':' + env['LD_LIBRARY_PATH']
env['LD_LIBRARY_PATH'] = ld_library_path
return env
def _GetTarget(self, context):
"""Return a target instance suitable to run the test executable.
The choice of implementations is determined from the current context.
The global context may have provided a 'CompilationTest.target' variable
to indicate the Host subclass to use. In addition, we consider the
value of the 'ParallelActivator.num_processors' variable that may have
been set by the ParallelActivator resource.
If a value of num_processors > 1 is requested, we attempt to run the
executable using a command determined from the 'par_service.run'
variable. (This is typically 'mpirun'.)
If the requested target is LocalHost, we replace it
by CommandHost (which was designed precisely for such use-cases).
If it is a CommandHost or any other host instance that is known
to support injecting commands, we attempt to modify its arguments
appropriately.
Requesting num_processors > 1 with an unsupported host is an error
and yields an exception."""
# We can't use a regular import for LocalHost, as that wouldn't
# match the type found via 'get_extension_class'.
local_host = get_extension_class('local_host.LocalHost', 'host', get_database())
target_desc = context.get('CompilationTest.target')
if target_desc is None:
host_class, arguments = local_host, {}
else:
f = lambda n: get_extension_class(n, 'host', get_database())
host_class, arguments = parse_descriptor(target_desc.strip(), f)
num_processors = context.get('ParallelActivator.num_processors', 1)
if num_processors > 1:
if host_class is local_host:
host_class = CommandHost
arguments['command'] = context.get('par_service.run')
arguments['command_args'] = ['-np', str(num_processors)]
elif host_class is CommandHost:
# Assume that the command is required to invoke any
# executable on this machine. Prepend the mpirun command
# simply as an argument in that case.
arguments['command_args'] = [context.get('par_service.run'),
'-np', str(num_processors)]
elif host_class is RemoteHost:
pass # ...
else:
raise Exception('target "%s" does not support parallel execution'%target_desc)
path = context.get('par_service.run')
return host_class(arguments)
def _CheckOutput(self, context, result, prefix, output, diagnostics):
"""Determine from the context whether or not to treat warnings
as errors."""
if output:
result[prefix + "output"] = result.Quote(output)
check_warnings = context.GetBoolean("CompilationTest.check_warnings", False)
if not output or not check_warnings:
return True
lang = self.language
compiler = context['CompilerTable.compilers'][lang]
errors_occured = False
diagnostics = compiler.ParseOutput(output)
for d in diagnostics:
# We only check for warnings, since errors are already dealt with
# elsewhere.
if d.severity == 'warning':
errors_occured = True
result.Fail("The compiler issued an un-expected warning.")
return not errors_occured
class CompiledResource(Resource):
"""A CompiledResource fetches compilation parameters from environment
variables CPPFLAGS, <lang>_options, and <lang>_ldflags in addition
to the CompilerTable-related parameters."""
options = SetField(TextField(), computed="true")
ldflags = SetField(TextField(), computed="true")
source_files = SetField(TextField(), computed="true")
executable = TextField(computed="true")
language = TextField()
def SetUp(self, context, result):
self._context = context
self._compiler = CTBase({'options':self.options,
'ldflags':self.ldflags,
'source_files':self.source_files,
'executable':self.executable,
'language':self.language,
'execute':False},
qmtest_id = self.GetId(),
qmtest_database = self.GetDatabase())
self._compiler.Run(context, result)
directory = self._compiler._GetDirectory(context)
self._executable = os.path.join(directory, self.executable)
context['CompiledResource.executable'] = self._executable
def CleanUp(self, result):
self._compiler._RemoveDirectory(self._context, result)
class DataTest(Test):
"""A DataTest runs an executable from a CompiledResource, with a data-file as input.
"""
data_file = TextField(description="Arguments to pass to the executable.")
def Run(self, context, result):
executable = context['CompiledResource.executable']
host = _get_host(context, 'CompilationTest.target')
env = os.environ.copy()
paths = ':'.join(context.get('CompilerTest.library_dirs', '').split())
if paths:
ld_library_path = paths
if 'LD_LIBRARY_PATH' in env:
ld_library_path += ':' + env['LD_LIBRARY_PATH']
env['LD_LIBRARY_PATH'] = ld_library_path
remote_data_file = os.path.basename(self.data_file)
host.UploadFile(self.data_file, remote_data_file)
status, output = host.UploadAndRun(executable, [remote_data_file],
environment = env)
host.DeleteFile(remote_data_file)
if not result.CheckExitStatus('DataTest.', 'Program', status):
result.Fail('Unexpected exit_code')
if output:
result['DataTest.output'] = result.Quote(output)
class ParallelService(Resource):
def SetUp(self, context, result):
setup = Executable()
command = []
self.halt_command = []
command = context.get('par_service.boot', '').split()
self.halt_command = context.get('par_service.halt', '').split()
if command:
status = setup.Run(command)
result.CheckExitStatus('ParallelService', ' '.join(command), status)
def CleanUp(self, result):
if self.halt_command:
command = self.halt_command
cleanup = Executable()
status = cleanup.Run(command)
result.CheckExitStatus('ParallelService', ' '.join(command), status)
class ParallelActivator(Resource):
"""This resource defines the 'ParallelActivator.use_num_processors'
context variable to indicate that any dependent test should be run in parallel."""
def SetUp(self, context, result):
num_processors = context.get('par_service.num_processors')
if num_processors:
context['ParallelActivator.num_processors'] = int(num_processors)
class Database(CompilationTestDatabase):
"""'Database' stores the OpenVSIP test and benchmark suites.
In addition to the CompilationTestDatabase behavior, we must:
* make all tests depend on the ParallelService resource
* add special handling for directories containing 'data/' subdirs.
"""
no_exclusions = BooleanField()
flags = DictionaryField(TextField(), TextField())
excluded_subdirs = SetField(TextField(),
default_value = ['QMTest', 'data', 'build'],
description="Subdirectories not to scan for tests.",
computed="true")
def __init__(self, *args, **kwds):
super(Database, self).__init__(*args, **kwds)
self.test_extensions['.py'] = 'python'
if self.no_exclusions == 'false':
if self.flags.get('have_mpi') != '1':
self.excluded_subdirs.append('mpi')
self.excluded_subdirs.append('parallel')
self.excluded_subdirs.append('regressions/parallel')
if self.flags.get('enable_threading') != '1':
self.excluded_subdirs.append('thread')
if self.flags.get('have_ipp') != '1':
self.excluded_subdirs.append('ipp')
if self.flags.get('have_sal') != '1':
self.excluded_subdirs.append('sal')
if self.flags.get('have_fftw') != '1':
self.excluded_subdirs.append('fftw')
if self.flags.get('have_opencl') != '1':
self.excluded_subdirs.append('opencl')
if self.flags.get('have_cuda') != '1':
self.excluded_subdirs.append('cuda')
if self.flags.get('enable_cvsip_bindings') != 'yes':
self.excluded_subdirs.append('cvsip')
if self.flags.get('enable_python_bindings') not in ('1', 'yes'):
self.excluded_subdirs.append('python')
if self.flags.get('enable_threading') != '1':
self.excluded_subdirs.append('threading')
def GetSubdirectories(self, directory):
subdirs = super(Database, self).GetSubdirectories(directory)
subdirs = [s for s in subdirs
if self.JoinLabels(directory, s) not in self.excluded_subdirs]
return subdirs
def GetIds(self, kind, directory = '', scan_subdirs = 1):
# Directories containing 'data/' subdir are special.
# Everything else is handled by the base class.
dirname = os.path.join(self.srcdir, directory)
if not os.path.isdir(dirname):
raise NoSuchSuiteError, directory
elif os.path.isdir(os.path.join(dirname, 'data')):
if kind == Database.TEST:
return [self.JoinLabels(directory, f)
for f in dircache.listdir(os.path.join(dirname, 'data'))
if f not in self.excluded_subdirs]
else:
return []
else:
return super(Database, self).GetIds(kind, directory, scan_subdirs)
def GetExtension(self, id):
if not id:
return DirectorySuite(self, id)
elif id == 'compiler_table':
return CompilerTable({}, qmtest_id = id, qmtest_database = self)
elif id == 'parallel_service':
return ParallelService({}, qmtest_id = id, qmtest_database = self)
elif id == 'parallel_activator':
return ParallelActivator({}, qmtest_id = id, qmtest_database = self)
resources = ['compiler_table', 'parallel_service']
id_components = self.GetLabelComponents(id)
# 'data' subdirectories have special meaning, and so
# are not allowed as label components.
if 'data' in id_components:
return None
dirname = os.path.join(self.srcdir, *id_components[:-1])
basename = id_components[-1]
file_ext = os.path.splitext(basename)[1]
# If <dirname>/data is an existing directory...
if os.path.isdir(os.path.join(dirname, 'data')):
if file_ext in self.test_extensions:
executable = os.path.splitext(os.path.basename(id))[0]
if sys.platform == 'win32':
executable += '.exe'
# ...<dirname>/<basename> is a resource.
src = os.path.abspath(os.path.join(self.srcdir, id))
return self._MakeTest(id,
CompiledResource,
language=self.test_extensions[file_ext],
source_files=[src],
executable=executable,
resources=resources)
else:
# ...<dirname>/<basename> is a test.
path = os.path.join(dirname, 'data', basename)
if not os.path.isfile(path):
return None
src = [f for f in dircache.listdir(dirname)
if os.path.splitext(f)[1] in self.test_extensions]
# There must be exactly one source file, which
# is our resource.
if len(src) > 1:
raise DatabaseError('multiple source files found in %s'%dirname)
resources.append(self.JoinLabels(*(id_components[:-1] + src)))
return self._MakeTest(id,
DataTest,
resources=resources,
data_file=path)
src = os.path.join(self.srcdir, id)
if file_ext in self.test_extensions and os.path.isfile(src):
if file_ext == '.py':
return self._MakePythonTest(id, src)
else:
executable = os.path.splitext(os.path.basename(id))[0]
if sys.platform == 'win32':
executable += '.exe'
# all tests in parallel/ should be run in parallel.
if id_components[0] in ('mpi', 'parallel'):
resources.append('parallel_activator')
return self._MakeTest(id,
CompilationTest,
language=self.test_extensions[file_ext],
source_files=[src],
executable=executable,
resources=resources)
elif os.path.isfile(src + '.qms'):
qms = src + '.qms'
# Expose the flags to the suite file so it can exclude ids
# the same way the database itself does in the constructor.
context = dict(flags=self.flags,
excluded_subdirs=self.excluded_subdirs)
try:
content = open(qms).read()
exec content in context
except:
print 'Error parsing', qms
test_ids=context.get('test_ids', [])
suite_ids=context.get('suite_ids', [])
return ExplicitSuite(is_implicit=False,
test_ids=test_ids, suite_ids=suite_ids,
qmtest_id = id, qmtest_database = self)
elif os.path.isdir(src):
if not basename in self.excluded_subdirs:
return DirectorySuite(self, id)
else:
return None
def _MakeTest(self, id, class_, **args):
return class_(args, qmtest_id = id, qmtest_database = self)
def _MakePythonTest(self, id, src):
source = '\n'.join(open(src, 'r').readlines())
return PythonExecTest(source=source, qmtest_id = id, qmtest_database = self)
| gpl-2.0 | -3,170,306,366,714,759,000 | 38.631347 | 94 | 0.57929 | false |
quaddra/engage | python_pkg/engage/drivers/standard/memcached__1_4/driver.py | 1 | 7343 | """Service manager for memcached
"""
import os
import os.path
import shutil
import sys
import time
# fix path if necessary (if running from source or running as test)
try:
import engage.utils
except:
sys.exc_clear()
dir_to_add_to_python_path = os.path.abspath((os.path.join(os.path.dirname(__file__), "../../../..")))
sys.path.append(dir_to_add_to_python_path)
import engage.drivers.service_manager as service_manager
import engage.drivers.resource_metadata as resource_metadata
import engage.utils.path as iupath
import engage_utils.process as iuprocess
import engage.utils.http as iuhttp
import engage.utils.log_setup
import engage.utils.file as iufile
import engage.utils.timeout as iutimeout
import engage.drivers.utils
from engage.drivers.password_repo_mixin import PasswordRepoMixin
from engage.drivers.action import *
import engage.drivers.genforma.macports_pkg as macports_pkg
import engage.drivers.genforma.aptget as aptget
logger = engage.utils.log_setup.setup_script_logger(__name__)
from engage.utils.user_error import ScriptErrInf, UserError
import gettext
_ = gettext.gettext
errors = { }
def define_error(error_code, msg):
global errors
error_info = ScriptErrInf(__name__, error_code, msg)
errors[error_info.error_code] = error_info
# error codes
ERR_MEMCACHED_BUILD_FAILED = 1
ERR_MEMCACHED_NO_INSTALL_DIR = 2
ERR_MEMCACHED_NO_EXEC_FOUND = 3
ERR_MEMCACHED_START_FAILED = 4
ERR_MEMCACHED_STOP_FAILED = 5
ERR_MEMCACHED_EXITED = 6
ERR_MEMCACHED_UNKNOWN_OSTYPE = 7
define_error(ERR_MEMCACHED_BUILD_FAILED,
_("Memcached build failed"))
define_error(ERR_MEMCACHED_NO_INSTALL_DIR,
_("Post install check failed: missing installation directory '%(dir)s'"))
define_error(ERR_MEMCACHED_NO_EXEC_FOUND,
_("Post install check failed: missing executable in directory '%(dir)s'"))
define_error(ERR_MEMCACHED_START_FAILED,
_("Memcached daemon execution failed in resource %(id)s"))
define_error(ERR_MEMCACHED_STOP_FAILED,
_("Memcached daemon stop failed"))
define_error(ERR_MEMCACHED_EXITED,
_("Memcached daemon appears to have exited after startup"))
define_error(ERR_MEMCACHED_UNKNOWN_OSTYPE,
_("Installation on unknown os type %(ostype)s"))
def get_packages_filename():
return engage.drivers.utils.get_packages_filename(__file__)
def make_context(resource_json, sudo_password_fn, dry_run=False):
ctx = Context(resource_json, logger, __file__,
sudo_password_fn=sudo_password_fn,
dry_run=dry_run)
ctx.check_port("input_ports.host",
os_type=str,
log_directory=str,
sudo_password=str)
ctx.check_port("output_ports.cache",
host=str,
port=int,
provider=str,
home=str)
if ctx.props.input_ports.host.os_type == 'linux':
ctx.add("memcached_exe", "/usr/bin/memcached")
# we stick the linux pid file where it would go if memcached
# is started by the os. This handles the case where the
# server is rebooted and we want to see if memcached is running.
ctx.add("pidfile", os.path.join("/var/run/memcached.pid"))
elif ctx.props.input_ports.host.os_type == 'mac-osx':
ctx.add("memcached_exe", "/opt/local/bin/memcached")
# this is hack: we should really have separate drivers for macports
# and aptget
ctx.add("input_ports.macports.macports_exe", "/opt/local/bin/port")
ctx.add("pidfile", os.path.join(ctx.props.output_ports.cache.home, "memcached.pid"))
else:
raise UserError(ERR_MEMCACHED_UNKNOWN_OS_TYPE, {'ostype':ctx.props.input_ports.host.os_type})
ctx.add("logfile", os.path.join(ctx.props.input_ports.host.log_directory, "memcached.log"))
ctx.add("memsize", 64)
return ctx
@make_action
def start_memcached(self):
"""We start memcached as a daemon process. The pidfile is created
by memcached.
"""
p = self.ctx.props
memcached_args = [p.memcached_exe, "-d", "-P", p.pidfile,
"-m", str(p.memsize)]
if os.geteuid()==0:
memcached_args.extend(["-u", "root"])
rc = procutils.run_and_log_program(memcached_args,
None, self.ctx.logger)
if rc != 0:
raise UserError(errors[ERR_MEMCACHED_START_FAILED],
msg_args={"id":p.id},
developer_msg="rc was %d" % rc)
self.ctx.logger.debug("memcached daemon started successfully")
class Manager(service_manager.Manager, PasswordRepoMixin):
def __init__(self, metadata, dry_run=False):
package_name = "%s %s" % (metadata.key["name"],
metadata.key["version"])
service_manager.Manager.__init__(self, metadata, package_name)
self.ctx = make_context(metadata.to_json(),
sudo_password_fn=self._get_sudo_password,
dry_run=dry_run)
def validate_pre_install(self):
pass
def get_pid_file_path(self):
return self.ctx.props.pidfile
def install(self, package):
r = self.ctx.r
p = self.ctx.props
home_path = p.output_ports.cache.home
# on linux, use apt-get
if p.input_ports.host.os_type == 'linux':
# use apt-get
r(aptget.install, ['memcached'])
elif p.input_ports.host.os_type == 'mac-osx':
# otherwise install using macports
r(macports_pkg.port_install, ['memcached'])
else:
raise UserError(ERR_MEMCACHED_UNKNOWN_OS_TYPE, {'ostype':p.input_ports.host.os_type})
# home_path used for pidfile
r(ensure_dir_exists, home_path)
self.validate_post_install()
def is_installed(self):
p = self.ctx.props
rv = self.ctx.rv
if not os.path.exists(p.output_ports.cache.home):
return False
if p.input_ports.host.os_type == 'linux':
return rv(aptget.is_pkg_installed, 'memcached')
elif p.input_ports.host.os_type == 'mac-osx':
return rv(macports_pkg.is_installed, "memcached")
else:
raise UserError(ERR_MEMCACHED_UNKNOWN_OS_TYPE, {'ostype':p.input_ports.host.os_type})
def validate_post_install(self):
r = self.ctx.r
p = self.ctx.props
home_path = p.output_ports.cache.home
r(check_dir_exists, home_path)
if p.input_ports.host.os_type == 'linux':
r(aptget.check_installed, "memcached")
elif p.input_ports.host.os_type == 'mac-osx':
r(macports_pkg.check_installed, "memcached")
else:
raise UserError(ERR_MEMCACHED_UNKNOWN_OS_TYPE, {'ostype':p.input_ports.host.os_type})
def start(self):
p = self.ctx.props
self.ctx.r(start_memcached)
# make sure that it is up
self.ctx.poll_rv(10, 1.0, lambda x: x, get_server_status,
p.pidfile)
def is_running(self):
return self.ctx.rv(get_server_status, self.ctx.props.pidfile)!=None
def stop(self):
r = self.ctx.r
p = self.ctx.props
r(stop_server, p.pidfile, force_stop=True, timeout_tries=20)
| apache-2.0 | 4,515,107,118,665,952,000 | 37.244792 | 105 | 0.625221 | false |
mhan-team/service | mhan_service/followme/admin.py | 1 | 1235 | """
The MIT License (MIT)
Copyright (c) 2016 mhan-team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from django.contrib import admin
from .models import Trace, Point
# Register your models here.
admin.site.register(Trace)
admin.site.register(Point)
| mit | -594,035,834,307,902,700 | 40.166667 | 78 | 0.796761 | false |
enricopesce/ec2_snap | ec2_snap_exec.py | 1 | 3106 | import logging
import boto3
import datetime
logger = logging.getLogger()
logger.setLevel(logging.INFO)
simulate = False
def get_instance_tag(id, tag_name):
res_ec2 = boto3.resource('ec2')
tags = res_ec2.Instance(id).tags
if tags is not None:
for tag in tags:
if tag['Key'] == tag_name:
return tag['Value']
return id
def get_volume_tag(id, tag_name):
res_ec2 = boto3.resource('ec2')
tags = res_ec2.Volume(id).tags
if tags is not None:
for tag in tags:
if tag['Key'] == tag_name:
return tag['Value']
return id
def snapshots_by_instance(instance, delete_date, mode):
devices = instance.block_device_mappings
inst_id = instance.instance_id
inst_name = get_instance_tag(inst_id, "Name")
mode_type = "HOT-SNAPSHOT"
try:
if mode == 'cold':
res_instance = boto3.resource('ec2').Instance(inst_id)
res_instance.stop(DryRun=simulate)
logging.info("Stopping instance %s" % inst_name)
res_instance.wait_until_stopped()
logging.info("Stopped instance %s" % inst_name)
mode_type = "COLD-SNAPSHOT"
for dev in devices:
if dev.get('Ebs', None) is None:
continue
vol_id = dev['Ebs']['VolumeId']
vol_name = get_volume_tag(vol_id, "Name")
dev_name = dev['DeviceName']
volume = boto3.resource('ec2').Volume(vol_id)
logging.info("Snapshotting instance %s (%s) mode %s device %s" % (inst_id, inst_name, mode_type, dev_name))
res_snap = volume.create_snapshot(DryRun=simulate)
res_snap.create_tags(DryRun=simulate, Tags=[{'Key': 'Name', 'Value': vol_name},
{'Key': 'DeviceName', 'Value': dev_name},
{'Key': 'InstanceName', 'Value': inst_name},
{'Key': 'VolumeID', 'Value': vol_id},
{'Key': 'SnapMode', 'Value': mode_type},
{'Key': 'DeleteOn', 'Value': delete_date}])
logging.info("Snapshots finished")
if mode == "cold":
logging.info("Starting instance %s %s" % (inst_id, inst_name))
res_instance.start(DryRun=simulate)
except Exception as e:
logging.error("Unexpected error: %s" % e)
return
#lambda call
def ec2_snap_exec(event, context):
try:
days = int(event['retention'])
instance = boto3.resource('ec2').Instance(event['instance_id'])
delete_date = datetime.date.today() + datetime.timedelta(days=days)
mode = event['mode']
except Exception as e:
logging.error("Unexpected error: %s" % e)
else:
snapshots_by_instance(instance, delete_date.strftime('%Y-%m-%d'), mode)
return
params = {'instance_id': 'i-a44d9064', 'retention': '15', 'mode': 'hot'}
print params
ec2_snap_exec(params, '')
| gpl-3.0 | 1,819,731,942,005,402,600 | 34.295455 | 119 | 0.538635 | false |
iLoveTux/unitils | setup.py | 1 | 1185 | import sys
from setuptools import setup
tests_require = ["nose>=1.0"]
if sys.version_info < (3,0):
tests_require = ["nose>=1.0", "mock"]
setup(
name="unitils",
version="0.1.2",
author="iLoveTux",
author_email="[email protected]",
description="Cross platform utilities I have found to be incredibly useful",
license="GPLv3",
keywords="utility tools cli",
url="http://github.com/ilovetux/unitils",
packages=['unitils'],
install_requires=["colorama"],
entry_points={
"console_scripts": [
"cat.py=unitils.cli:cat",
"cp.py=unitils.cli:cp",
"find.py=unitils.cli:find",
"grep.py=unitils.cli:grep",
"head.py=unitils.cli:head",
"ls.py=unitils.cli:ls",
"mv.py=unitils.cli:mv",
"watch.py=unitils.cli:watch",
"wc.py=unitils.cli:wc",
"which.py=unitils.cli:which",
]
},
test_suite="nose.collector",
tests_require=tests_require,
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
],
)
| gpl-3.0 | 460,417,516,272,238,600 | 28.625 | 80 | 0.56962 | false |
dianchang/flask-debugtoolbar | flask_debugtoolbar/panels/routes.py | 1 | 1876 | from collections import OrderedDict
from flask_debugtoolbar.panels import DebugPanel
from flask import current_app
_ = lambda x: x
class RoutesDebugPanel(DebugPanel):
"""
A panel to display Flask app routes.
"""
name = 'Routes'
has_content = True
def nav_title(self):
return _('Routes')
def title(self):
return _('Routes')
def url(self):
return ''
def process_request(self, request):
pass
def content(self):
context = self.context.copy()
blueprints = {}
raw_endpoints = {}
for endpoint, _rules in current_app.url_map._rules_by_endpoint.iteritems():
if any(item in endpoint for item in ['_debug_toolbar', 'debugtoolbar', 'static']):
continue
for rule in _rules:
rule.methods = sorted(filter(lambda x: x not in ['HEAD', 'OPTIONS'], rule.methods))
if '.' in endpoint:
blueprint_name = endpoint.split('.')[0]
if not blueprint_name in blueprints:
blueprints[blueprint_name] = {}
blueprints[blueprint_name][endpoint] = _rules
else:
raw_endpoints[endpoint] = _rules
# Reorder
blueprints = OrderedDict(sorted(blueprints.iteritems()))
for key in blueprints.keys():
blueprints[key] = OrderedDict(sorted(blueprints[key].iteritems()))
raw_endpoints = OrderedDict(sorted(raw_endpoints.iteritems()))
context.update({
'blueprints': blueprints,
'raw_endpoints': raw_endpoints
})
return self.render('panels/routes.html', context)
def remove_http_methods(rules):
"""Do not show HEAD, OPTION methods."""
for rule in rules:
rule.methods = sorted(filter(lambda x: x not in ['HEAD', 'OPTIONS'], rule.methods))
| bsd-3-clause | -7,090,675,488,362,132,000 | 30.266667 | 99 | 0.585288 | false |
google-research/google-research | poem/cv_mim/train.py | 1 | 1327 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose representation training with TFRecord inputs."""
from absl import app
from absl import flags
import tensorflow as tf
from poem.core import common
from poem.core import input_generator
from poem.core import keypoint_profiles
from poem.core import tfe_input_layer
from poem.cv_mim import train_base
FLAGS = flags.FLAGS
flags.adopt_module_key_flags(train_base)
def main(_):
train_base.run(
input_dataset_class=tf.data.TFRecordDataset,
common_module=common,
keypoint_profiles_module=keypoint_profiles,
input_example_parser_creator=tfe_input_layer.create_tfe_parser,
keypoint_preprocessor_3d=input_generator.preprocess_keypoints_3d)
if __name__ == '__main__':
app.run(main)
| apache-2.0 | 8,310,518,479,641,039,000 | 29.860465 | 74 | 0.75584 | false |
same-moon/smutils | windows/juggler/pyhook/hookit.py | 1 | 9077 | import os, collections, threading, Queue, time
import win32api
import win32com.client
import pythoncom, pyHook
"""
MessageName: key down
Message: 256
Time: 112416317
Window: 197094
WindowName: Emacs/Python <ruttbe@LAGER> hookit.py
Ascii: 120 x
Key: X
KeyID: 88
ScanCode: 45
Extended: 0
Injected: 0
Alt 0
Transition 0
"""
# Globals
last_window = None
bufs = { 'Emacs': { 'active_window' : None,
'buf': [] },
'VS': { 'active_window' : None,
'buf': [] } }
valid_abbrev_chars = [chr(x) for x in range(ord('0'), ord('0') + 10)]
valid_abbrev_chars += [chr(x) for x in range(ord('A'), ord('A') + 26)]
valid_abbrev_chars += [chr(x) for x in range(ord('a'), ord('a') + 26)]
shell = win32com.client.Dispatch("WScript.Shell")
JUGGLER_DEFNS = os.getenv("JUGGLER_DEFNS")
JUGGLER_AUTOHOTKEY_SCRIPT = os.getenv("JUGGLER_AUTOHOTKEY_SCRIPT")
assert JUGGLER_DEFNS
assert JUGGLER_AUTOHOTKEY_SCRIPT
langs = 'global python javascript'.split()
expanding_now = False
# map from lang to abbrev to text
defns = collections.defaultdict(dict)
class CommandThread(threading.Thread):
def __init__(self, cmd):
threading.Thread.__init__(self)
self.cmd = cmd
def run(self):
(status, output) = commands.getstatusoutput(self.cmd)
print 'CommandThread: (status, output) of [%s] is (%d, %s)' % \
(self.cmd, status, output)
class HelperThread(threading.Thread):
def __init__(self, q):
threading.Thread.__init__(self)
self.q = q
def run(self):
while True:
item = q.get()
if item is None:
break
action, arg = item
if action == 'SendKeys':
time.sleep(1)
shell.SendKeys(arg)
q = Queue.Queue()
helper = HelperThread(q)
helper.setDaemon(True)
helper.start()
def process_lang(lang):
for fn in os.listdir(os.path.join(JUGGLER_DEFNS, lang)):
fn2 = os.path.join(JUGGLER_DEFNS, lang, fn)
with open(fn2) as f:
txt = f.read()
defns[lang][fn] = txt
print defns
def sendkeys(keystrokes):
print 'sendkeys(%s) called' % (repr(keystrokes))
shell.SendKeys(keystrokes)
# see http://ss64.com/vb/sendkeys.html or better yet https://msdn.microsoft.com/en-us/library/aa266279%28v=vs.60%29.aspx
def executeAbbrevEmacs(lang, abbrev):
global expanding_now
if lang in defns:
if abbrev in defns[lang]:
assert not expanding_now
expanding_now = True
replacement = defns[lang][abbrev]
sendkeys("{BACKSPACE}" * len(abbrev))
replacements = []
lastChar = None
for char in replacement:
if char == '\n':
if len(replacements) > 0 and replacements[-1] == '\r':
replacements[-1] = '\r\n'
else:
replacements.append('\n')
elif char == 'l' and lastChar == '\\':
replacements[-1] = '\\l'
elif char == 'r' and lastChar == '\\':
replacements[-1] = '\\r'
elif char == '>' and len(replacements) >= 9 and replacements[-9:] == ['<','e','n','d','p','o','i','n','t']:
replacements[-9:] = ['<endpoint>']
else:
replacements.append(char)
lastChar = char
print 'replacements are', replacements
endpointActive = False
for sequence in replacements:
if sequence in ['\n', '\r\n']:
sendkeys("^o^f")
elif sequence == r'\l':
sendkeys("{Left}")
elif sequence == r'\r':
sendkeys("{Right}")
elif sequence == ':':
sendkeys("^q:")
elif sequence in ['{', '}', '[', ']', '+', '^', '%', '~', '(', ')']:
sendkeys("{%s}" % (sequence))
elif sequence == '<endpoint>':
sendkeys("%xjuggler-make-endpoint-marker{ENTER}")
endpointActive = True
else:
sendkeys(sequence)
if endpointActive:
sendkeys("%xjuggler-goto-endpoint-marker{ENTER}")
expanding_now = False
return True
return False
def executeAbbrevVS(lang, abbrev):
# global executing_now
# if lang in defns:
# if abbrev in defns[lang]:
# replacement = defns[lang][abbrev]
# executing_now = True
# shell.SendKeys("{BACKSPACE}" * len(abbrev))
# replacements = []
# for char in replacement:
# if char == '\n':
# if len(replacements) > 0 and replacements[-1] == '\r':
# replacements[-1] = '\r\n'
# else:
# replacements.append('\n')
# else:
# replacements.append(char)
# print 'replacements are', replacements
# for sequence in replacements:
# if sequence in ['\n', '\r\n']:
# shell.SendKeys("{ENTER}")
# else:
# shell.SendKeys(sequence)
# executing_now = False
# return True
return False
"""
x bar7 foo foo foo foo foo foo
bar7
ff
lklkjlkj bar7
bar7
x y z bar7
if foo:
"""
def get_editor(event):
window = event.WindowName
if window.startswith("Emacs/"):
return 'Emacs'
elif 'Microsoft Visual Studio' in window:
return 'VS'
return None
# import win32ui
# wnd = win32ui.GetForegroundWindow()
# print wnd.GetWindowText()
def get_lang(event):
if event.WindowName.startswith("Emacs/") and '<' in event.WindowName:
return event.WindowName.split('Emacs/')[1].split('<')[0].strip().lower()
return None
def try_expand_abbrev(editor, candidate, event):
lang = get_lang(event)
executed = False
cmd = 'executeAbbrev' + editor
execution_cmd = globals()[cmd]
if lang:
executed = execution_cmd(lang, candidate)
print '%s(%s, %s) returned %s' % (cmd, lang, candidate, executed)
if not executed:
print '%s(%s, %s) in global returned %s' % (cmd, 'global', candidate, executed)
executed = execution_cmd('global', candidate)
return executed
def intercepted_VS_keys(event):
if event.Ascii == 14: # Ctrl-n
# sendkeys('{DOWN}')
q.put(('SendKeys', "{DOWN}"))
elif event.Ascii == 16: # Ctrl-p
# sendkeys('{UP}')
q.put(('SendKeys', "{UP}"))
else:
return False
return True
def OnKeyUp(event):
print 'key up event:', event
if not q.empty():
try:
item = q.get_nowait()
action, arg = item
if action == "SendKeys":
sendkeys(arg)
except Queue.Empty:
pass
def OnKeyDown(event):
if expanding_now:
return True
print 'key down event:', event
editor = get_editor(event)
if not editor:
return True
global last_window, bufs
print bufs
# intercept for VS first
if editor == 'VS':
intercepted = intercepted_VS_keys(event)
if intercepted:
return False
prev_window, buf = bufs[editor]['active_window'], bufs[editor]['buf']
if prev_window is None:
assert buf == []
bufs[editor]['active_window'] = event.Window
elif event.Window != prev_window:
bufs[editor]['active_window'] = event.Window
bufs[editor]['buf'] = []
return True
k = event.Ascii
if chr(k) in valid_abbrev_chars:
buf.append(chr(k))
else:
if chr(k) == ' ':
candidate = ''.join(buf)
executed = try_expand_abbrev(editor, candidate, event)
bufs[editor]['buf'] = []
if executed:
return False # disable other handlers
bufs[editor]['buf'] = []
# print 'MessageName:',event.MessageName
# print 'Message:',event.Message
# print 'Time:',event.Time
# print 'Window:',event.Window
# print 'WindowName:',event.WindowName
print 'Ascii:', event.Ascii, chr(event.Ascii)
# print 'Key:', event.Key
# print 'KeyID:', event.KeyID
# print 'ScanCode:', event.ScanCode
# print 'Extended:', event.Extended
# print 'Injected:', event.Injected
# print 'Alt', event.Alt
# print 'Transition', event.Transition
# print '---'
# return True to pass the event to other handlers
return True
def main():
for lang in os.listdir(JUGGLER_DEFNS):
if lang in langs:
process_lang(lang)
# create a hook manager
hm = pyHook.HookManager()
# watch for all key events
hm.KeyDown = OnKeyDown
hm.KeyUp = OnKeyUp
# set the hook
hm.HookKeyboard()
# wait forever
pythoncom.PumpMessages()
if __name__ == "__main__": # when run as a script
main()
| mit | 169,636,825,907,364,860 | 30.19244 | 123 | 0.539495 | false |
YiqunPeng/Leetcode-pyq | solutions/298BinaryTreeLongestConsecutiveSequence.py | 1 | 1902 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# dfs, post order, no global variable
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def dfs(node, ans):
if not node:
return 0, 0
if not node.left and not node.right:
return 1, max(ans, 1)
res = 1
left, la = dfs(node.left, ans)
right, ra = dfs(node.right, ans)
if node.left and node.left.val == node.val + 1:
res = max(left + 1, res)
if node.right and node.right.val == node.val + 1:
res = max(right + 1, res)
return res, max(ans, la, ra, res)
return dfs(root, 0)[1]
# dfs, post order traverse, use global variable
# def __init__(self):
# self.ans = 0
# def longestConsecutive(self, root):
# """
# :type root: TreeNode
# :rtype: int
# """
# def dfs(node):
# if not node:
# return 0
# if not node.left and not node.right:
# self.ans = max(self.ans, 1)
# return 1
# res = 1
# left = dfs(node.left)
# right = dfs(node.right)
# if node.left and node.left.val == node.val + 1:
# res = max(left + 1, res)
# if node.right and node.right.val == node.val + 1:
# res = max(right + 1, res)
# self.ans = max(self.ans, res)
# return res
# dfs(root)
# return self.ans
| gpl-3.0 | -9,160,639,779,577,161,000 | 27.402985 | 63 | 0.419558 | false |
UCL-dataspring/cluster-code | bluclobber/model/corpus.py | 1 | 2359 | import glob
import os
import traceback
from book import Book
from archive import Archive
from functools import reduce
from dataset import DataSet
from ..harness.mapreduce import MapReduce
from ..harness.utils import merge
from ..harness.decomposer import Decomposer
import logging
class Corpus(DataSet):
def __init__(self, path=None, communicator=None):
if type(path)==str:
path+='/*.zip'
super(Corpus, self).__init__(Archive,path,communicator )
def analyse_by_book_in_archives(self, mapper, reducer, subsample=1, shuffler=None):
partition=Corpus(Decomposer(self.paths, self.communicator))
harness=MapReduce(self.loadingMap(mapper), reducer, self.communicator,
prepartitioned=True, subsample=subsample, shuffler=shuffler )
return harness.execute(partition)
def analyse_by_book(self, mapper, reducer, subsample=1, shuffler=None):
harness = MapReduce(self.loadingMap(mapper), reducer, self.communicator, subsample, shuffler=shuffler)
return harness.execute(self)
def analyse(self,mapper, reducer, subsample=1, bybook=False, shuffler=None):
if bybook:
self.logger.info("Analysing by book")
return self.analyse_by_book_in_archives(mapper, reducer, subsample, shuffler)
else:
self.logger.info("Analysing by archive")
return self.analyse_by_file(self.loadingMap(mapper), reducer, subsample, shuffler)
def loadingMap(self, mapper):
def _map(book):
self.logger.debug("Loading book")
try:
book.load()
except Exception as exception:
self.logger.warn("Problem loading " + book.code + " in " + book.archive.path)
self.logger.warn(traceback.format_exc())
self.logger.warn(str(exception))
self.logger.debug("Loaded book")
try:
self.logger.debug("Considering book")
result= mapper(book)
self.logger.debug("Considered book")
return result
except Exception as exception:
self.logger.warn("Problem parsing " + book.code + " in " + book.archive.path)
self.logger.warn(traceback.format_exc())
self.logger.warn(str(exception))
return _map
| mit | 1,672,840,677,074,940,200 | 38.316667 | 110 | 0.635863 | false |
tovrstra/sympy | sympy/printing/tests/test_repr.py | 1 | 2767 | from sympy.utilities.pytest import XFAIL
from sympy import Symbol, symbols, Function, Integer, Matrix, nan, oo, abs, \
Rational, Real, S, WildFunction
from sympy.geometry import Point, Circle, Ellipse
from sympy.printing import srepr
x, y = symbols('xy')
# eval(srepr(expr)) == expr has to succeed in the right environment. The right
# environment is the scope of "from sympy import *" for most cases.
ENV = {}
exec "from sympy import *" in ENV
# These classes have to be added separately:
ENV["Infinity"] = S.Infinity
ENV["NegativeInfinity"] = S.NegativeInfinity
ENV["NegativeOne"] = S.NegativeOne
ENV["One"] = S.One
ENV["Zero"] = S.Zero
def sT(expr, string):
"""
sT := sreprTest
Tests that srepr delivers the expected string and that
the condition eval(srepr(expr))==expr holds.
"""
assert srepr(expr) == string
assert eval(string, ENV) == expr
def test_printmethod():
class R(oo.__class__):
def _sympyrepr_(self, printer):
return "foo"
assert srepr(R()) == "foo"
class R(abs):
def _sympyrepr_(self, printer):
return "foo(%s)" % printer._print(self.args[0])
assert srepr(R(x)) == "foo(Symbol('x'))"
def test_Add():
sT(x+y, "Add(Symbol('x'), Symbol('y'))")
def test_Function():
sT(Function("f")(x), "Function('f')(Symbol('x'))")
# test unapplied Function
sT(Function('f'), "Function('f')")
def test_Geometry():
sT(Point(0,0), "Point(Zero, Zero)")
sT(Ellipse(Point(0, 0), 5, 1), "Ellipse(Point(Zero, Zero), Integer(5), One)")
# TODO more tests
def test_Infinity():
sT(oo, "Infinity")
def test_Integer():
sT(Integer(4), "Integer(4)")
def test_list():
sT([x, Integer(4)], "[Symbol('x'), Integer(4)]")
def test_Matrix():
sT(Matrix([[x**+1, 1], [y, x+y]]), "Matrix([[Symbol('x'), One], [Symbol('y'), Add(Symbol('x'), Symbol('y'))]])")
def test_NaN():
sT(nan, "nan")
def test_NegativeInfinity():
sT(-oo, "NegativeInfinity")
def test_NegativeOne():
sT(-Integer(1), "NegativeOne")
def test_One():
sT(S.One, "One")
def test_Rational():
sT(Rational(1,3), "Rational(1, 3)")
sT(Rational(-1,3), "Rational(-1, 3)")
def test_Real():
sT(Real('1.23', prec=3), "Real('1.22998', prec=3)")
sT(Real('1.23456789', prec=9), "Real('1.23456788994', prec=9)")
sT(Real('1.234567890123456789', prec=19), "Real('1.234567890123456789013', prec=19)")
sT(Real('0.60038617995049726', 15), "Real('0.60038617995049726', prec=15)")
def test_Symbol():
sT(x, "Symbol('x')")
sT(y, "Symbol('y')")
def test_tuple():
sT((x,), "(Symbol('x'),)")
sT((x,y), "(Symbol('x'), Symbol('y'))")
def test_WildFunction():
sT(WildFunction('w'), "WildFunction('w')")
def test_Zero():
sT(S.Zero, "Zero")
| bsd-3-clause | 2,438,983,180,236,338,700 | 26.949495 | 116 | 0.607156 | false |
canesin/FEVal | feval/ShapeFunctions.py | 1 | 37904 | # -*- coding: iso-8859-1 -*-
#============================================================
#
# This file is part of FEval, a module for the
# evaluation of Finite Element results
#
# Licencse: FEval is provided under the GNU General Public License (GPL)
#
# Authors: Martin Lüthi, [email protected]
#
# Homepage: http://feval.sourceforge.net
#
# History: long, long, most of it in 2000
# 2001.09.21 (ml): Code cleaned up for intial release
#
# Purpose: Provide Finite Element shape functions
#
#============================================================
import numpy as N
try:
import scipy.linalg as LA
except:
print 'could not import scipy.linalg!'
pass
## calculate coordinates and weights of Gauss points
## (cf. Numerical Recipies in C++, p.157)
##
## Results are the same as in Bathe 1982 for the first 7 digits
#============================================================================
# try to use Psyco (psyco.sourceforge.net)
# if configured, this will speed up things considerably
# try:
# import psyco
# from psyco.classes import *
# except ImportError:
# class _psyco:
# def jit(self): pass
# def bind(self, f): pass
# def proxy(self, f): return f
# psyco = _psyco()
class ShapeFunctionPrototype:
"""Defines the prototype of a interpolation function
cornernodes defines the nodes at the geometrical corner
We use MARC numbering, i.e. corner nodes first, anticlockwise
"""
dim, nnodes = 0, 0 # dimension and number of nodes
nsides = 0 # number of element sides
cornernodes = N.array([]) # list of corner nodes
sidenodes = N.array([]) # list of nodes on the side with index
nextnodes = N.array([]) # list of nodes that are adjecent to the node with index
lcoordGauss = None
gaussShape = None
gaussShapeInv = None
def __init__(self):
self.f = N.zeros(self.nnodes, N.float_)
self.df = N.zeros( (self.dim, self.nnodes), N.float_)
def __call__(self, args):
return self.calcShape(args)
# must be overrridden by the shape function
def calcShape(self, lcoord):
return None
# must be overrridden by the shape function
def calcShapeDeriv(self, lcoord):
return None
def nextPattern(self, lcoord):
return [0]
def calcGauss(self):
"""Calculate the inverse of the shape functions at the Gauss points"""
a = []
for lc in self.lcoordGauss:
a.append(N.array(self.calcShape(lc)))
self.gaussShape = N.take(N.array(a), self.cornernodes, 1)
#self.gaussShapeInv = LA.inverse(self.gaussShape)
self.gaussShapeInv = LA.pinv(self.gaussShape)
class ShapeFunction_Line2(ShapeFunctionPrototype):
"""Element function for linear element defined
0-------1
"""
name = 'Line2'
dim, nnodes = 2, 2
cornernodes = N.array([0,1])
nsides = 2
sidetype = 'Point1'
sidenodes = N.array(
[[0],
[1],
])
nextnodes = N.array(
[[1],
[0],
])
gaussDist = 0.577350269189626 # 1./N.sqrt(3)
lcoordGauss = N.array([ [-1.],
[ 1.],
])*gaussDist
def calcShape(self, lcoord):
x = lcoord[0]
return 0.5*N.array([
1.0-x,
1.0+x ])
def calcShapeDeriv(self, lcoord):
x = lcoord[0]
return N.array([ -0.5, 0.5 ])
def nextPattern(self, lcoord):
x = lcoord * 1.01
if x > 1: return [1]
elif x < -1: return [0]
else: return None
class ShapeFunction_Tri3(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
2
/ \
/ \
/ \
0-------1
"""
name = 'Tri3'
dim, nnodes = 2, 3
cornernodes = N.array([0,1,2])
nsides = 3
sidetype = 'Line2'
sidenodes = N.array(
[[0,1],
[1,2],
[2,0],
])
nextnodes = N.array(
[[1,2],
[0,2],
[0,1],
])
triangles = N.array([[0,1,2]])
#!!!! worng, this is still from quads
gaussDist = 0.577350269189626 # 1./N.sqrt(3)
lcoordGauss = N.array([ [-1., -1.],
[ 1., -1.],
[-1., 1.],
[ 1., 1.] ])*gaussDist
# def calcShape(self, lcoord):
# x, y = lcoord
# return N.array([
# 1.-x-y,
# x,
# y ])
def calcShape(self, lcoord):
x, y = lcoord
# 0.5*(x+1) [-1,1] -> x [0,1]
x = 0.5*(x+1)
y = 0.5*(y+1)
return N.array([
1.-x-y,
x,
y ])
def calcShapeDeriv(self, lcoord):
x, y = lcoord
self.df[0,0] = -0.5
self.df[0,1] = 0.5
self.df[0,2] = 0.
self.df[1,0] = -0.5
self.df[1,1] = 0.
self.df[1,2] = 0.5
return self.df
def nextPattern(self, lcoord):
x,y = lcoord / max(N.absolute(lcoord)) * 1.01
if x+y > 1: return [1,2]
elif y < 0: return [0,1]
elif x < 0: return [2,0]
else: return None
class ShapeFunction_Tri6(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
2
/ \
5 4
/ \
0---3---1
"""
name = 'Tri6'
dim, nnodes = 2, 6
cornernodes = N.array([0,1,2])
nsides = 3
sidetype = 'Line3'
sidenodes = N.array(
[[0,3,1],
[1,4,2],
[2,5,0],
])
nextnodes = N.array(
[[1,2],
[0,2],
[0,1],
])
#triangles = N.array([[0,1,2]])
#!!!! worng, this is still from quads
gaussDist = 0.577350269189626 # 1./N.sqrt(3)
lcoordGauss = N.array([ [-1., -1.],
[ 1., -1.],
[-1., 1.],
[ 1., 1.] ])*gaussDist
def calcShape(self, lcoord):
xi1, xi2 = lcoord
# 0.5*(x+1) [-1,1] -> x [0,1]
y = 0.5*(xi1+1.)
z = 0.5*(xi2+1.)
x = 1. - y - z
return N.array([
2.*x*(x-0.5),
2.*y*(y-0.5),
2.*z*(z-0.5),
4.*y*z,
4.*z*x,
4.*x*y,
])
def calcShapeDeriv(self, lcoord):
stop
xi1, xi2 = lcoord
# 0.5*(x+1) [-1,1] -> x [0,1]
zeta1 = 0.5*(xi1+1.)
zeta2 = 0.5*(xi2+1.)
zeta0 = 1. - zeta1 - zeta2
self.df[0,0] = 4.*zeta0-1.
self.df[0,1] = 4.*zeta1-1.
self.df[0,2] = 4.*zeta2-1.
self.df[0,3] = 4.*zeta2-1.
self.df[0,4] = 4.*zeta2-1.
self.df[0,5] = 4.*zeta2-1.
self.df[1,0] = -0.5
self.df[1,1] = 0.
self.df[1,2] = 0.5
return self.df
def nextPattern(self, lcoord):
x,y = lcoord / max(N.absolute(lcoord)) * 1.01
if x+y > 1: return [1,2]
elif y < 0: return [0,1]
elif x < 0: return [2,0]
else: return None
# def nextPattern(self, lcoord):
# xi1, xi2 = lcoord / max(N.absolute(lcoord)) * 1.01
# # 0.5*(x+1) [-1,1] -> x [0,1]
# y = 0.5*(xi1+1.)
# z = 0.5*(xi2+1.)
# x = 1. - y - z
# #x,y = lcoord
# if x < 0: return [0,1]
# elif y < 0: return [1,2]
# elif z < 0: return [2,0]
# else: return None
class ShapeFunction_Quad4(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
3-------2
| |
| |
| |
0-------1
"""
name = 'Quad4'
dim, nnodes = 2, 4
cornernodes = N.array([0,1,2,3])
nsides = 4
sidetype = 'Line2'
sidenodes = N.array(
[[0,1],
[1,2],
[2,3],
[3,0],
])
nextnodes = N.array(
[[1,3],
[0,2],
[1,3],
[0,2],
])
triangles = N.array([[0,1,3],
[1,2,3]])
gaussDist = 0.577350269189626 # 1./N.sqrt(3)
lcoordGauss = N.array([ [-1., -1.],
[ 1., -1.],
[-1., 1.],
[ 1., 1.] ])*gaussDist
def calcShape(self, lcoord):
x, y = lcoord
xy = x*y
return 0.25*N.array([
1.0-x-y+xy,
1.0+x-y-xy,
1.0+x+y+xy,
1.0-x+y-xy ])
def calcShapeDeriv(self, lcoord):
x, y = lcoord
return 0.25*N.array([
[ -1.0+y, 1.0-y, 1.0+y, -1.0-y],
[ -1.0+x, -1.0-x, 1.0+x, 1.0-x ]])
def nextPattern(self, lcoord):
x,y = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return [1,2]
elif x < -1: return [3,0]
elif y > 1: return [2,3]
elif y < -1: return [0,1]
else: return None
class ShapeFunction_Quad8(ShapeFunctionPrototype):
"""Element function for quadratic element defined in MARC Manual B2.7-1
Taylor&Hughes (1981), p. 50
Element nodes numbering is the same as for MARC
3-----6-----2
|(5) (6) (7)|
| |
7(3) (4)5
| |
|(0) (1) (2)|
0-----4-----1
"""
name = 'Quad8'
dim, nnodes = 2, 8
cornernodes = [0,1,2,3]
nsides = 4
sidetype = 'Line3'
sidenodes = N.array(
[[0,4,1],
[1,5,2],
[2,6,3],
[3,7,0],
])
nextnodes = N.array(
[[1,3],
[0,2],
[1,3],
[0,2],
])
triangles = N.array([[7,0,4],
[4,1,5],
[5,2,6],
[6,3,7],
[7,4,5],
[5,6,7]])
gaussDist = 0.774596669241483 # = N.sqrt(0.6)
lcoordGauss = N.array([ [-1., -1.],
[ 0., -1.],
[ 1., -1.],
[-1., 0.],
[ 1., 0.],
[-1., 1.],
[ 0., 1.],
[ 1., 1.] ])*gaussDist
def calcShape(self, lcoord):
x, y = lcoord
xx, yy, xy = x*x, y*y, x*y
xxy, xyy = xx*y, x*yy
return 0.25*N.array([
# the corner nodes
(-1.0+xy+xx+yy-xxy-xyy),
(-1.0-xy+xx+yy-xxy+xyy),
(-1.0+xy+xx+yy+xxy+xyy),
(-1.0-xy+xx+yy+xxy-xyy),
# the mid-side nodes
2.*(1.0-y-xx+xxy),
2*(1.0+x-yy-xyy),
2*(1.0+y-xx-xxy),
2*(1.0-x-yy+xyy)])
def calcShapeDeriv(self, lcoord):
x, y = lcoord
xx, yy, xy = x*x, y*y, x*y
xxy, xyy, xy2 = xx*y, x*yy, xy*xy
return 0.25*N.array([
[
# the corner nodes
y+xx-xy2-yy,
y+xx-xy2+yy,
y+xx+xy2+yy,
y+xx+xy2-yy,
# the mid-side nodes
(-x+xy)*4.,
(1.0-yy)*2.,
(-x-xy)*4.,
(-1.0+yy)*2.,
],[
# the corner nodes
x+yy-xx-xy2,
x+yy-xx+xy2,
x+yy+xx+xy2,
x+yy+xx-xy2,
# the mid-side nodes
(-1.0+xx)*2.,
(-y-xy)*4.,
(1.0-xx)*2.,
(-y+xy)*4.]])
def nextPattern(self, lcoord):
x,y = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return [1,2]
elif x < -1: return [3,0]
elif y > 1: return [2,3]
elif y < -1: return [0,1]
else: return None
class ShapeFunction_Quad9(ShapeFunctionPrototype):
"""Element function for quadratic element defined in MARC Manual B2.7-1
Taylor&Hughes (1981), p. 50
Element nodes numbering is the same as for MARC
3-----6-----2
|(5) (6) (7)|
| |
7(3) 8 (4)5
| |
|(0) (1) (2)|
0-----4-----1
"""
name = 'Quad9'
dim, nnodes = 2, 9
cornernodes = [0,1,2,3]
nsides = 4
sidetype = 'Line3'
sidenodes = N.array(
[[0,4,1],
[1,5,2],
[2,6,3],
[3,7,0],
])
nextnodes = N.array(
[[1,3],
[0,2],
[1,3],
[0,2],
])
triangles = N.array([[7,0,4],
[4,1,5],
[5,2,6],
[6,3,7],
[7,4,5],
[5,6,7]])
gaussDist = 0.774596669241483 # = N.sqrt(0.6)
lcoordGauss = N.array([ [-1., -1.],
[ 0., -1.],
[ 1., -1.],
[-1., 0.],
[ 1., 0.],
[-1., 1.],
[ 0., 1.],
[ 1., 1.] ])*gaussDist
def calcShape(self, lcoord):
print "not implemented correctly"
stop
x, y = lcoord
xx, yy, xy = x*x, y*y, x*y
xxy, xyy = xx*y, x*yy
return 0.25*N.array([
# the corner nodes
(-1.0+xy+xx+yy-xxy-xyy),
(-1.0-xy+xx+yy-xxy+xyy),
(-1.0+xy+xx+yy+xxy+xyy),
(-1.0-xy+xx+yy+xxy-xyy),
# the mid-side nodes
2.*(1.0-y-xx+xxy),
2*(1.0+x-yy-xyy),
2*(1.0+y-xx-xxy),
2*(1.0-x-yy+xyy)])
def calcShapeDeriv(self, lcoord):
print "not implemented correctly"
stop
x, y = lcoord
xx, yy, xy = x*x, y*y, x*y
xxy, xyy, xy2 = xx*y, x*yy, xy*xy
return 0.25*N.array([
[
# the corner nodes
y+xx-xy2-yy,
y+xx-xy2+yy,
y+xx+xy2+yy,
y+xx+xy2-yy,
# the mid-side nodes
(-x+xy)*4.,
(1.0-yy)*2.,
(-x-xy)*4.,
(-1.0+yy)*2.,
],[
# the corner nodes
x+yy-xx-xy2,
x+yy-xx+xy2,
x+yy+xx+xy2,
x+yy+xx-xy2,
# the mid-side nodes
(-1.0+xx)*2.,
(-y-xy)*4.,
(1.0-xx)*2.,
(-y+xy)*4.]])
def nextPattern(self, lcoord):
x,y = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return [1,2]
elif x < -1: return [3,0]
elif y > 1: return [2,3]
elif y < -1: return [0,1]
else: return None
class ShapeFunction_Hex8(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
The integration points (in parentheses) are located at unexpected
locations (for MARC)!
7---------------6
/|(6) (7)/|
/ | / |
/ | / |
/ | / |
/ (4)| (5)/ |
4---------------5 |
| | | |
| 3---------|-----2
| / (2) | (3)/
| / | /
| / | /
| / | /
|/ (0) (1)|/
0---------------1
7-------6
/| /|
/ | / |
4-------5 |
| 3----|--2
| / | /
|/ |/
0-------1
"""
name = 'Hex8'
dim, nnodes = 3, 8
cornernodes = [0,1,2,3,4,5,6,7]
nsides = 6
sidetype = 'Quad4'
sidenodes = N.array(
[[0,3,2,1],
[0,1,5,4],
[1,2,6,5],
[2,3,7,6],
[3,0,4,7],
[4,5,6,7],
])
nextnodes = N.array(
[[1,3,4],
[0,2,5],
[1,3,6],
[0,2,7],
[0,5,7],
[1,4,6],
[2,5,7],
[3,4,6],
])
gaussDist = 0.577350269189626 # = 1./N.sqrt(3)
lcoordGauss = N.array([ [-1., -1., -1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., -1.],
[-1., -1., 1.],
[ 1., -1., 1.],
[-1., 1., 1.],
[ 1., 1., 1.]])*gaussDist
# lcoordGauss = N.array([ [-1., -1., -1.],
# [ 1., -1., -1.],
# [ 1., 1., -1.],
# [-1., 1., -1.],
# [-1., -1., 1.],
# [ 1., -1., 1.],
# [ 1., 1., 1.],
# [-1., 1., 1.]])*gaussDist
def calcShape(self, lcoord):
x, y, z = lcoord
xy, xz, yz = x*y, x*z, y*z
xyz = x*y*z
return 0.125*N.array([
1.0-x-y-z+xy+xz+yz-xyz, # -1,-1,-1,
1.0+x-y-z-xy-xz+yz+xyz, # 1,-1,-1,
1.0+x+y-z+xy-xz-yz-xyz, # 1, 1,-1,
1.0-x+y-z-xy+xz-yz+xyz, # -1, 1,-1,
1.0-x-y+z+xy-xz-yz+xyz, # -1,-1, 1,
1.0+x-y+z-xy+xz-yz-xyz, # 1,-1, 1,
1.0+x+y+z+xy+xz+yz+xyz, # 1, 1, 1,
1.0-x+y+z-xy-xz+yz-xyz]) # -1, 1, 1,
def calcShapeDeriv(self, lcoord):
x, y, z = lcoord
xy, xz, yz = x*y, x*z, y*z
self.df[0,0] = -1.0+y+z-yz
self.df[1,0] = -1.0+x+z-xz
self.df[2,0] = -1.0+x+y-xy
self.df[0,1] = 1.0-y-z+yz
self.df[1,1] = -1.0-x+z+xz
self.df[2,1] = -1.0-x+y+xy
self.df[0,2] = 1.0+y-z-yz
self.df[1,2] = 1.0+x-z-xz
self.df[2,2] = -1.0-x-y-xy
self.df[0,3] = -1.0-y+z+yz
self.df[1,3] = 1.0-x-z+xz
self.df[2,3] = -1.0+x-y+xy
self.df[0,4] = -1.0+y-z+yz
self.df[1,4] = -1.0+x-z+xz
self.df[2,4] = 1.0-x-y+xy
self.df[0,5] = 1.0-y+z-yz
self.df[1,5] = -1.0-x-z-xz
self.df[2,5] = 1.0+x-y-xy
self.df[0,6] = 1.0+y+z+yz
self.df[1,6] = 1.0+x+z+xz
self.df[2,6] = 1.0+x+y+xy
self.df[0,7] = -1.0-y-z-yz
self.df[1,7] = 1.0-x+z-xz
self.df[2,7] = 1.0-x+y-xy
self.df = self.df/8.0
return self.df
def nextPattern(self, lcoord):
x,y,z = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return self.sidenodes[2] #[1,2,6,5]
elif x < -1: return self.sidenodes[4] #[0,4,7,3]
elif y > 1: return self.sidenodes[3] #[2,3,7,6]
elif y < -1: return self.sidenodes[1] #[0,1,5,4]
elif z > 1: return self.sidenodes[5] #[4,5,6,7]
elif z < -1: return self.sidenodes[0] #[0,3,2,1]
else: return None
class ShapeFunction_Hex20(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
Here we adopt the numbering from Libmesh, i.e. the second level
of second order nodes comes befor the 3rd level
The integration points (in parentheses) are located at unexpected
locations (for MARC)!
# 7-------14------6
# /|(6) (7)/|
# / | / |
# 15 | 13 |
# / 19 / 18
# / (4)| (5)/ |
# 4-------12------5 |
# | | | |
# | 3------10-|-----2
# | / (2) | (3)/
# 16 / 17 /
# | 11 | 9
# | / | /
# |/ (0) (1)|/
# 0-------8-------1
7-------18------6
/|(6) (7)/|
/ | / |
19 | 17 |
/ 15 / 14
/ (4)| (5)/ |
4-------16------5 |
| | | |
| 3------10-|-----2
| / (2) | (3)/
12 / 13 /
| 11 | 9
| / | /
|/ (0) (1)|/
0-------8-------1
16 - 12
17 - 13
18 - 14
19 - 15
12 - 16
13 - 17
14 - 18
15 - 19
"""
name = 'Hex20'
dim, nnodes = 3, 20
cornernodes = [0,1,2,3,4,5,6,7]
nsides = 6
sidetype = 'Quad8'
sidenodes = N.array(
[[0,3,2,1,11,10,9,8], # side 0
[0,1,5,4,8, 13, 16, 12], # side 1
[1,2,6,5,9, 14, 17, 13], # side 2
[2,3,7,6,10, 15, 18, 14], # side 3
[3,0,4,7,11, 12, 19, 15], # side 4
[4,5,6,7,16, 17, 18, 19] # side 5
])
nextnodes = N.array(
[[1,3,4],
[0,2,5],
[1,3,6],
[0,2,7],
[0,5,7],
[1,4,6],
[2,5,7],
[3,4,6],
])
gaussDist = 0.774596669241483 # = N.sqrt(0.6)
lcoordGauss = N.array([ [-1., -1., -1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., -1.],
[-1., -1., 1.],
[ 1., -1., 1.],
[-1., 1., 1.],
[ 1., 1., 1.]])*gaussDist
def calcShape(self, lcoord):
x, y, z = lcoord
xy, xz, yz = x*y, x*z, y*z
xx, yy, zz = x*x, y*y, z*z
xyz, xxy, xxz, xyy = xy*z, xx*y, xx*z, x*yy
yyz, xzz, yzz = yy*z, x*zz, y*zz
xxyz, xyyz, xyzz = xxy*z, xyy*z, xyz*z
self.f[0] = (x+y+z-xyz+xx+yy+zz-xxy-xyy-xxz-xzz-yyz-yzz+ \
xxyz+xyyz+xyzz-2.0)/8.0
self.f[1] = (-x+y+z+xyz+xx+yy+zz-xxy+xyy-xxz+xzz-yyz-yzz+ \
xxyz-xyyz-xyzz-2.0)/8.0
self.f[2] = (-x-y+z-xyz+xx+yy+zz+xxy+xyy-xxz+xzz-yyz+yzz- \
xxyz-xyyz+xyzz-2.0)/8.0
self.f[3] = (x-y+z+xyz+xx+yy+zz+xxy-xyy-xxz-xzz-yyz+yzz- \
xxyz+xyyz-xyzz-2.0)/8.0
self.f[4] = (x+y-z+xyz+xx+yy+zz-xxy-xyy+xxz-xzz+yyz-yzz- \
xxyz-xyyz+xyzz-2.0)/8.0
self.f[5] = (-x+y-z-xyz+xx+yy+zz-xxy+xyy+xxz+xzz+yyz-yzz- \
xxyz+xyyz-xyzz-2.0)/8.0
self.f[6] = (-x-y-z+xyz+xx+yy+zz+xxy+xyy+xxz+xzz+yyz+yzz+ \
xxyz+xyyz+xyzz-2.0)/8.0
self.f[7] = (x-y-z-xyz+xx+yy+zz+xxy-xyy+xxz-xzz+yyz+yzz+ \
xxyz-xyyz-xyzz-2.0)/8.0
self.f[8] = (1.0-z-y+yz-xx+xxz+xxy-xxyz)/4.0
self.f[9] = (1.0-z+x-xz-yy+yyz-xyy+xyyz)/4.0
self.f[10] = (1.0-z+y-yz-xx+xxz-xxy+xxyz)/4.0
self.f[11] = (1.0-z-x+xz-yy+yyz+xyy-xyyz)/4.0
self.f[16] = (1.0+z-y-yz-xx-xxz+xxy+xxyz)/4.0
self.f[17] = (1.0+z+x+xz-yy-yyz-xyy-xyyz)/4.0
self.f[18] = (1.0+z+y+yz-xx-xxz-xxy-xxyz)/4.0
self.f[19] = (1.0+z-x-xz-yy-yyz+xyy+xyyz)/4.0
self.f[12] = (1.0-y-x+xy-zz+yzz+xzz-xyzz)/4.0
self.f[13] = (1.0-y+x-xy-zz+yzz-xzz+xyzz)/4.0
self.f[14] = (1.0+y+x+xy-zz-yzz-xzz-xyzz)/4.0
self.f[15] = (1.0+y-x-xy-zz-yzz+xzz+xyzz)/4.0
return self.f
def calcShapeDeriv(self, lcoord):
x, y, z = lcoord
xy, xz, yz = x*y, x*z, y*z
xx, yy, zz = x*x, y*y, z*z
xyz, xxy, xxz, xyy = xy*z, xx*y, xx*z, x*yy
yyz, xzz, yzz = yy*z, x*zz, y*zz
self.df[0, 0] = 1.0-yz+2.0*x-2.0*xy-yy-2.0*xz-zz+2.0*xyz+yyz+yzz
self.df[1, 0] = 1.0-xz+2.0*y-xx-2.0*xy-2.0*yz-zz+xxz+2.0*xyz+xzz
self.df[2, 0] = 1.0-xy+2.0*z-xx-2.0*xz-yy-2.0*yz+xxy+xyy+2.0*xyz
self.df[0, 1] = -1.0+yz+2.0*x-2.0*xy+yy-2.0*xz+zz+2.0*xyz-yyz-yzz
self.df[1, 1] = 1.0+xz+2.0*y-xx+2.0*xy-2.0*yz-zz+xxz-2.0*xyz-xzz
self.df[2, 1] = 1.0+xy+2.0*z-xx+2.0*xz-yy-2.0*yz+xxy-xyy-2.0*xyz
self.df[0, 2] = -1.0-yz+2.0*x+2.0*xy+yy-2.0*xz+zz-2.0*xyz-yyz+yzz
self.df[1, 2] = -1.0-xz+2.0*y+xx+2.0*xy-2.0*yz+zz-xxz-2.0*xyz+xzz
self.df[2, 2] = 1.0-xy+2.0*z-xx+2.0*xz-yy+2.0*yz-xxy-xyy+2.0*xyz
self.df[0, 3] = 1.0+yz+2.0*x+2.0*xy-yy-2.0*xz-zz-2.0*xyz+yyz-yzz
self.df[1, 3] = -1.0+xz+2.0*y+xx-2.0*xy-2.0*yz+zz-xxz+2.0*xyz-xzz
self.df[2, 3] = 1.0+xy+2.0*z-xx-2.0*xz-yy+2.0*yz-xxy+xyy-2.0*xyz
self.df[0, 4] = 1.0+yz+2.0*x-2.0*xy-yy+2.0*xz-zz-2.0*xyz-yyz+yzz
self.df[1, 4] = 1.0+xz+2.0*y-xx-2.0*xy+2.0*yz-zz-xxz-2.0*xyz+xzz
self.df[2, 4] = -1.0+xy+2.0*z+xx-2.0*xz+yy-2.0*yz-xxy-xyy+2.0*xyz
self.df[0, 5] = -1.0-yz+2.0*x-2.0*xy+yy+2.0*xz+zz-2.0*xyz+yyz-yzz
self.df[1, 5] = 1.0-xz+2.0*y-xx+2.0*xy+2.0*yz-zz-xxz+2.0*xyz-xzz
self.df[2, 5] = -1.0-xy+2.0*z+xx+2.0*xz+yy-2.0*yz-xxy+xyy-2.0*xyz
self.df[0, 6] = -1.0+yz+2.0*x+2.0*xy+yy+2.0*xz+zz+2.0*xyz+yyz+yzz
self.df[1, 6] = -1.0+xz+2.0*y+xx+2.0*xy+2.0*yz+zz+xxz+2.0*xyz+xzz
self.df[2, 6] = -1.0+xy+2.0*z+xx+2.0*xz+yy+2.0*yz+xxy+xyy+2.0*xyz
self.df[0, 7] = 1.0-yz+2.0*x+2.0*xy-yy+2.0*xz-zz+2.0*xyz-yyz-yzz
self.df[1, 7] = -1.0-xz+2.0*y+xx-2.0*xy+2.0*yz+zz+xxz-2.0*xyz-xzz
self.df[2, 7] = -1.0-xy+2.0*z+xx-2.0*xz+yy+2.0*yz+xxy-xyy-2.0*xyz
self.df[:, 0:8] = self.df[:, 0:8]/2.0
self.df[0, 8] = -2.0*x+2.0*xz+2.0*xy-2.0*xyz
self.df[1, 8] = -1.0+z+xx-xxz
self.df[2, 8] = -1.0+y+xx-xxy
self.df[0, 9] = 1.0-z-yy+yyz
self.df[1, 9] = -2.0*y+2.0*yz-2.0*xy+2.0*xyz
self.df[2, 9] = -1.0-x+yy+xyy
self.df[0, 10] = -2.0*x+2.0*xz-2.0*xy+2.0*xyz
self.df[1, 10] = 1.0-z-xx+xxz
self.df[2, 10] = -1.0-y+xx+xxy
self.df[0, 11] = -1.0+z+yy-yyz
self.df[1, 11] = -2.0*y+2.0*yz+2.0*xy-2.0*xyz
self.df[2, 11] = -1.0+x+yy-xyy
self.df[0, 16] = -2*x-2*xz+2*xy+2*xyz
self.df[1, 16] = -1.0-z+xx+xxz
self.df[2, 16] = 1.0-y-xx+xxy
self.df[0, 17] = 1.0+z-yy-yyz
self.df[1, 17] = -2*y-2*yz-2*xy-2*xyz
self.df[2, 17] = 1.0+x-yy-xyy
self.df[0, 18] = -2*x-2*xz-2*xy-2*xyz
self.df[1, 18] = 1.0+z-xx-xxz
self.df[2, 18] = 1.0+y-xx-xxy
self.df[0, 19] = -1.0-z+yy+yyz
self.df[1, 19] = -2*y-2*yz+2*xy+2*xyz
self.df[2, 19] = 1.0-x-yy+xyy
self.df[0, 12] = -1.0+y+zz-yzz
self.df[1, 12] = -1.0+x+zz-xzz
self.df[2, 12] = -2*z+2*yz+2*xz-2*xyz
self.df[0, 13] = 1.0-y-zz+yzz
self.df[1, 13] = -1.0-x+zz+xzz
self.df[2, 13] = -2*z+2*yz-2*xz+2*xyz
self.df[0, 14] = 1.0+y-zz-yzz
self.df[1, 14] = 1.0+x-zz-xzz
self.df[2, 14] = -2*z-2*yz-2*xz-2*xyz
self.df[0, 15] = -1.0-y+zz+yzz
self.df[1, 15] = 1.0-x-zz+xzz
self.df[2, 15] = -2*z-2*yz+2*xz+2*xyz
self.df = self.df/4.0
return self.df
def nextPattern(self, lcoord):
x,y,z = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return [1,2,6,5]
elif x < -1: return [0,3,7,4]
elif y > 1: return [2,3,7,6]
elif y < -1: return [0,1,5,4]
elif z > 1: return [4,5,6,7]
elif z < -1: return [0,1,2,3]
else: return None
class ShapeFunction_Hex27(ShapeFunctionPrototype):
"""Element function for linear element defined in MARC Manual B2.4-1,
Taylor&Hughes (1981), p. 49
Here we adopt the numbering from Libmesh, i.e. the second level
of second order nodes comes before the 3rd level
The integration points (in parentheses) are located at unexpected
locations (for MARC)!
7-------18------6
/|(6) (7)/|
/ | / |
19 | [25] 17 |
/ 15 [23] / 14 center node: 26
/ (4)| (5)/ |
4-------16------5 |
| [24]| | [22]|
| 3------10-|-----2
| / (2) | (3)/
12 / [21] 13 /
| 11 [20] | 9
| / | /
|/ (0) (1)|/
0-------8-------1
"""
name = 'Hex27'
dim, nnodes = 3, 27
cornernodes = [0,1,2,3,4,5,6,7]
nsides = 6
sidetype = 'Quad9'
sidenodes = N.array([
[0, 3, 2, 1, 11, 10, 9, 8, 20], # Side 0 (exodus: 5) 20 -> 22
[0, 1, 5, 4, 8, 13, 16, 12, 21], # Side 1 (exodus: 1) 21 -> 26
[1, 2, 6, 5, 9, 14, 17, 13, 22], # Side 2 (exodus: 2) 22 -> 25
[2, 3, 7, 6, 10, 15, 18, 14, 23], # Side 3 (exodus: 3) 23 -> 27
[3, 0, 4, 7, 11, 12, 19, 15, 24], # Side 4 (exodus: 4) 24 -> 24
[4, 5, 6, 7, 16, 17, 18, 19, 25] # Side 5 (exodus: 6) 25 -> 23
])
nextnodes = N.array(
[[1,3,4],
[0,2,5],
[1,3,6],
[0,2,7],
[0,5,7],
[1,4,6],
[2,5,7],
[3,4,6],
])
gaussDist = 0.774596669241483 # = N.sqrt(0.6)
lcoordGauss = N.array([ [-1., -1., -1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., -1.],
[-1., -1., 1.],
[ 1., -1., 1.],
[-1., 1., 1.],
[ 1., 1., 1.]])*gaussDist
def calcShape(self, lcoord):
print 'not implemented'
return None
# x, y, z = lcoord
# xy, xz, yz = x*y, x*z, y*z
# xx, yy, zz = x*x, y*y, z*z
# xyz, xxy, xxz, xyy = xy*z, xx*y, xx*z, x*yy
# yyz, xzz, yzz = yy*z, x*zz, y*zz
# xxyz, xyyz, xyzz = xxy*z, xyy*z, xyz*z
# self.f[0] = (x+y+z-xyz+xx+yy+zz-xxy-xyy-xxz-xzz-yyz-yzz+ \
# xxyz+xyyz+xyzz-2.0)/8.0
# self.f[1] = (-x+y+z+xyz+xx+yy+zz-xxy+xyy-xxz+xzz-yyz-yzz+ \
# xxyz-xyyz-xyzz-2.0)/8.0
# self.f[2] = (-x-y+z-xyz+xx+yy+zz+xxy+xyy-xxz+xzz-yyz+yzz- \
# xxyz-xyyz+xyzz-2.0)/8.0
# self.f[3] = (x-y+z+xyz+xx+yy+zz+xxy-xyy-xxz-xzz-yyz+yzz- \
# xxyz+xyyz-xyzz-2.0)/8.0
# self.f[4] = (x+y-z+xyz+xx+yy+zz-xxy-xyy+xxz-xzz+yyz-yzz- \
# xxyz-xyyz+xyzz-2.0)/8.0
# self.f[5] = (-x+y-z-xyz+xx+yy+zz-xxy+xyy+xxz+xzz+yyz-yzz- \
# xxyz+xyyz-xyzz-2.0)/8.0
# self.f[6] = (-x-y-z+xyz+xx+yy+zz+xxy+xyy+xxz+xzz+yyz+yzz+ \
# xxyz+xyyz+xyzz-2.0)/8.0
# self.f[7] = (x-y-z-xyz+xx+yy+zz+xxy-xyy+xxz-xzz+yyz+yzz+ \
# xxyz-xyyz-xyzz-2.0)/8.0
# self.f[8] = (1.0-z-y+yz-xx+xxz+xxy-xxyz)/4.0
# self.f[9] = (1.0-z+x-xz-yy+yyz-xyy+xyyz)/4.0
# self.f[10] = (1.0-z+y-yz-xx+xxz-xxy+xxyz)/4.0
# self.f[11] = (1.0-z-x+xz-yy+yyz+xyy-xyyz)/4.0
# self.f[12] = (1.0+z-y-yz-xx-xxz+xxy+xxyz)/4.0
# self.f[13] = (1.0+z+x+xz-yy-yyz-xyy-xyyz)/4.0
# self.f[14] = (1.0+z+y+yz-xx-xxz-xxy-xxyz)/4.0
# self.f[15] = (1.0+z-x-xz-yy-yyz+xyy+xyyz)/4.0
# self.f[16] = (1.0-y-x+xy-zz+yzz+xzz-xyzz)/4.0
# self.f[17] = (1.0-y+x-xy-zz+yzz-xzz+xyzz)/4.0
# self.f[18] = (1.0+y+x+xy-zz-yzz-xzz-xyzz)/4.0
# self.f[19] = (1.0+y-x-xy-zz-yzz+xzz+xyzz)/4.0
# return self.f
def calcShapeDeriv(self, lcoord):
print 'not implemented'
return None
# x, y, z = lcoord
# xy, xz, yz = x*y, x*z, y*z
# xx, yy, zz = x*x, y*y, z*z
# xyz, xxy, xxz, xyy = xy*z, xx*y, xx*z, x*yy
# yyz, xzz, yzz = yy*z, x*zz, y*zz
# self.df[0, 0] = 1.0-yz+2.0*x-2.0*xy-yy-2.0*xz-zz+2.0*xyz+yyz+yzz
# self.df[1, 0] = 1.0-xz+2.0*y-xx-2.0*xy-2.0*yz-zz+xxz+2.0*xyz+xzz
# self.df[2, 0] = 1.0-xy+2.0*z-xx-2.0*xz-yy-2.0*yz+xxy+xyy+2.0*xyz
# self.df[0, 1] = -1.0+yz+2.0*x-2.0*xy+yy-2.0*xz+zz+2.0*xyz-yyz-yzz
# self.df[1, 1] = 1.0+xz+2.0*y-xx+2.0*xy-2.0*yz-zz+xxz-2.0*xyz-xzz
# self.df[2, 1] = 1.0+xy+2.0*z-xx+2.0*xz-yy-2.0*yz+xxy-xyy-2.0*xyz
# self.df[0, 2] = -1.0-yz+2.0*x+2.0*xy+yy-2.0*xz+zz-2.0*xyz-yyz+yzz
# self.df[1, 2] = -1.0-xz+2.0*y+xx+2.0*xy-2.0*yz+zz-xxz-2.0*xyz+xzz
# self.df[2, 2] = 1.0-xy+2.0*z-xx+2.0*xz-yy+2.0*yz-xxy-xyy+2.0*xyz
# self.df[0, 3] = 1.0+yz+2.0*x+2.0*xy-yy-2.0*xz-zz-2.0*xyz+yyz-yzz
# self.df[1, 3] = -1.0+xz+2.0*y+xx-2.0*xy-2.0*yz+zz-xxz+2.0*xyz-xzz
# self.df[2, 3] = 1.0+xy+2.0*z-xx-2.0*xz-yy+2.0*yz-xxy+xyy-2.0*xyz
# self.df[0, 4] = 1.0+yz+2.0*x-2.0*xy-yy+2.0*xz-zz-2.0*xyz-yyz+yzz
# self.df[1, 4] = 1.0+xz+2.0*y-xx-2.0*xy+2.0*yz-zz-xxz-2.0*xyz+xzz
# self.df[2, 4] = -1.0+xy+2.0*z+xx-2.0*xz+yy-2.0*yz-xxy-xyy+2.0*xyz
# self.df[0, 5] = -1.0-yz+2.0*x-2.0*xy+yy+2.0*xz+zz-2.0*xyz+yyz-yzz
# self.df[1, 5] = 1.0-xz+2.0*y-xx+2.0*xy+2.0*yz-zz-xxz+2.0*xyz-xzz
# self.df[2, 5] = -1.0-xy+2.0*z+xx+2.0*xz+yy-2.0*yz-xxy+xyy-2.0*xyz
# self.df[0, 6] = -1.0+yz+2.0*x+2.0*xy+yy+2.0*xz+zz+2.0*xyz+yyz+yzz
# self.df[1, 6] = -1.0+xz+2.0*y+xx+2.0*xy+2.0*yz+zz+xxz+2.0*xyz+xzz
# self.df[2, 6] = -1.0+xy+2.0*z+xx+2.0*xz+yy+2.0*yz+xxy+xyy+2.0*xyz
# self.df[0, 7] = 1.0-yz+2.0*x+2.0*xy-yy+2.0*xz-zz+2.0*xyz-yyz-yzz
# self.df[1, 7] = -1.0-xz+2.0*y+xx-2.0*xy+2.0*yz+zz+xxz-2.0*xyz-xzz
# self.df[2, 7] = -1.0-xy+2.0*z+xx-2.0*xz+yy+2.0*yz+xxy-xyy-2.0*xyz
# self.df[:, 0:8] = self.df[:, 0:8]/2.0
# self.df[0, 8] = -2.0*x+2.0*xz+2.0*xy-2.0*xyz
# self.df[1, 8] = -1.0+z+xx-xxz
# self.df[2, 8] = -1.0+y+xx-xxy
# self.df[0, 9] = 1.0-z-yy+yyz
# self.df[1, 9] = -2.0*y+2.0*yz-2.0*xy+2.0*xyz
# self.df[2, 9] = -1.0-x+yy+xyy
# self.df[0, 10] = -2.0*x+2.0*xz-2.0*xy+2.0*xyz
# self.df[1, 10] = 1.0-z-xx+xxz
# self.df[2, 10] = -1.0-y+xx+xxy
# self.df[0, 11] = -1.0+z+yy-yyz
# self.df[1, 11] = -2.0*y+2.0*yz+2.0*xy-2.0*xyz
# self.df[2, 11] = -1.0+x+yy-xyy
# self.df[0, 12] = -2*x-2*xz+2*xy+2*xyz
# self.df[1, 12] = -1.0-z+xx+xxz
# self.df[2, 12] = 1.0-y-xx+xxy
# self.df[0, 13] = 1.0+z-yy-yyz
# self.df[1, 13] = -2*y-2*yz-2*xy-2*xyz
# self.df[2, 13] = 1.0+x-yy-xyy
# self.df[0, 14] = -2*x-2*xz-2*xy-2*xyz
# self.df[1, 14] = 1.0+z-xx-xxz
# self.df[2, 14] = 1.0+y-xx-xxy
# self.df[0, 15] = -1.0-z+yy+yyz
# self.df[1, 15] = -2*y-2*yz+2*xy+2*xyz
# self.df[2, 15] = 1.0-x-yy+xyy
# self.df[0, 16] = -1.0+y+zz-yzz
# self.df[1, 16] = -1.0+x+zz-xzz
# self.df[2, 16] = -2*z+2*yz+2*xz-2*xyz
# self.df[0, 17] = 1.0-y-zz+yzz
# self.df[1, 17] = -1.0-x+zz+xzz
# self.df[2, 17] = -2*z+2*yz-2*xz+2*xyz
# self.df[0, 18] = 1.0+y-zz-yzz
# self.df[1, 18] = 1.0+x-zz-xzz
# self.df[2, 18] = -2*z-2*yz-2*xz-2*xyz
# self.df[0, 19] = -1.0-y+zz+yzz
# self.df[1, 19] = 1.0-x-zz+xzz
# self.df[2, 19] = -2*z-2*yz+2*xz+2*xyz
# self.df = self.df/4.0
# return self.df
def nextPattern(self, lcoord):
x,y,z = lcoord / max(N.absolute(lcoord)) * 1.01
if x > 1: return [1,2,6,5]
elif x < -1: return [0,3,7,4]
elif y > 1: return [2,3,7,6]
elif y < -1: return [0,1,5,4]
elif z > 1: return [4,5,6,7]
elif z < -1: return [0,1,2,3]
else: return None
# all shape functions are registered here
shapeFunctions = {
'Line2': ShapeFunction_Line2,
'Tri3': ShapeFunction_Tri3,
'Tri6': ShapeFunction_Tri6,
'Quad4': ShapeFunction_Quad4,
'Quad8': ShapeFunction_Quad8,
'Quad9': ShapeFunction_Quad9,
'Hex8' : ShapeFunction_Hex8,
'Hex20': ShapeFunction_Hex20,
'Hex27': ShapeFunction_Hex27
}
if __name__ == '__main__':
sh6 = ShapeFunction_Tri6()
sh3 = ShapeFunction_Tri3()
def shape(zeta):
zeta1, zeta2 = zeta
zeta0 = 1. - zeta1 - zeta2
return [2.*zeta0*(zeta0-0.5),
2.*zeta1*(zeta1-0.5),
2.*zeta2*(zeta2-0.5),
4.*zeta1*zeta2,
4.*zeta2*zeta0,
4.*zeta0*zeta1,
]
print shape([0.,0.])
print sh6([-1.,-1.])
print sh3([-1.,-1.])
print '----------------------'
print shape([1.,0.])
print sh6([1.,-1.])
print sh3([1.,-1.])
print '----------------------'
print shape([0.,1.])
print sh6([-1.,1.])
print sh3([-1.,1.])
print '----------------------'
print shape([0.5,0.5])
print sh6([0.,0.])
print sh3([0.,0.])
print '----------------------'
print shape([0.,0.5])
print sh6([-1.,0.])
print sh3([-1.,0.])
print '----------------------'
print shape([0.5,0.])
print sh6([0.,-1.])
print sh3([0.,-1.])
print '----------------------'
print shape([0.3,0.4])
print sh6([-0.4,-0.2])
print sh3([-0.4,-0.2])
# for n, sf in shapeFunctions.items():
# print '===== %s =====' % n
# s = sf()
# s.calcGauss()
# print s.gaussShapeInv
| gpl-2.0 | -1,197,586,550,653,715,200 | 31.675862 | 93 | 0.400828 | false |
gibil5/openhealth | models/management/mgt_patient_line.py | 1 | 2816 | # -*- coding: utf-8 -*-
"""
Management Patient Line
Should contain class methods
Created: 20 Jun 2019
Last up: 27 oct 2020
"""
from __future__ import print_function
from __future__ import absolute_import
from openerp import models, fields, api
from .management_db import ManagementDb
#from openerp.addons.openhealth.models.patient import pat_vars
from ..patient import pat_vars
class MgtPatientLine(models.Model):
"""
Patient line
"""
_name = 'openhealth.management.patient.line'
_order = 'amount_total desc'
# ----------------------------------------------------- Const ------------------
_MODEL = "openhealth.management.patient.line"
# ----------------------------------------------------- Class methods ----------
# Create
@classmethod
#def create_oh(cls, patient_id, management_id, env):
def create_oh(cls, patient, management_id, env):
#print('Class method - create')
#print(cls)
#print(patient_id, management_id)
# create
patient_line = env.create({
'patient': patient.id,
'management_id': management_id,
})
#cls.sex = patient.sex
#cls.age = patient.age
return patient_line
# Count
@classmethod
def count_oh(cls, patient_id, management_id, env):
#print('Class method - count')
#print(cls)
#print(patient_id, management_id)
# count
count = env.search_count([
('patient', '=', patient_id),
('management_id', '=', management_id),
],
#order='x_serial_nr asc',
#limit=1,
)
return count
# ----------------------------------------------------------- Handles ----------
# Management
management_id = fields.Many2one(
'openhealth.management',
ondelete='cascade',
)
# Patient
patient = fields.Many2one(
'oeh.medical.patient',
#string='Paciente',
)
# -------------------------------------------------------------- Vars ----------
amount_total = fields.Float()
count_total = fields.Integer()
age = fields.Char(
#string="Edad",
)
sex = fields.Selection(
selection=pat_vars.get_sex_type_list(),
#string="Sexo",
#required=False,
)
# ----------------------------------------------------------- Update -------------------------
# Update
@api.multi
def update(self):
"""
Update
"""
print()
print('** MgtPatientLine - Update')
# Update vars
self.sex = self.patient.sex
self.age = self.patient.age
# Calc Amount total - All sales ever
self.amount_total = 0
self.count_total = 0
# Get Orders
#orders, count = mgt_db.get_orders_filter_by_patient_fast(self, self.patient.id)
orders, count = ManagementDb.get_orders_filter_by_patient(self, self.patient.id)
for order in orders:
self.amount_total = self.amount_total + order.x_amount_flow
for line in order.order_line:
self.count_total = self.count_total + line.product_uom_qty
| agpl-3.0 | 2,078,314,972,504,652,000 | 22.663866 | 94 | 0.57848 | false |
redhat-imaging/imagefactory | imgfac/rest/bottle.py | 1 | 150206 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2016, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.12.16'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, hard=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new rule or replace the target for an existing rule. '''
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
target = None
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config, make_namespaces=True)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.") #0.12
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.') #0.12
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
''' Return the callback. If the callback is a decorated function, try to
recover the original function. '''
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
''' Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. '''
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
''' Lookup a config field and return its value, first checking the
route.config, then route.app.config.'''
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
''' Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
'''
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
''' Remove a callback from a hook. '''
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
''' Trigger a hook and return a list of results. '''
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
depr('Parameter order of Bottle.mount() changed.', True) # 0.10
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
try:
_raise(*exc_info)
finally:
exc_info = None
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
def _iter_chunked(self, read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
''' read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. '''
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request to large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request to large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
''' True if Chunked transfer encoding was. '''
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(key):
if '\n' in key or '\r' in key or '\0' in key:
raise ValueError("Header names must not contain control characters: %r" % key)
return key.title().replace('_', '-')
def _hval(value):
value = tonat(value)
if '\n' in value or '\r' in value or '\0' in value:
raise ValueError("Header value must not contain control characters: %r" % value)
return value
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=None, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.get_header(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj[self.name] = self.writer(value) if self.writer else value
def __delete__(self, obj):
del obj[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
''' Returns a copy of self. '''
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [_hval(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [_hval(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(_hval(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for name, vals in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def local_property(name=None):
if name: depr('local_property() is deprecated and will be removed.') #0.12
ls = threading.local()
def fget(self):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): ls.var = value
def fdel(self): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = local_property()
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = local_property()
_status_code = local_property()
_cookies = local_property()
_headers = local_property()
body = local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, response):
response._status_code = self._status_code
response._status_line = self._status_line
response._headers = self._headers
response._cookies = self._cookies
response.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, route):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
''' Return the value as a unicode string, or the default. '''
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [_hval(value)]
def append(self, key, value): self.dict.setdefault(_hkey(key), []).append(_hval(value))
def replace(self, key, value): self.dict[_hkey(key)] = [_hval(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in (_hkey(n) for n in names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
This storage is optimized for fast read access. Retrieving a key
or using non-altering dict methods (e.g. `dict.get()`) has no overhead
compared to a native dict.
'''
__slots__ = ('_meta', '_on_change')
class Namespace(DictMixin):
def __init__(self, config, namespace):
self._config = config
self._prefix = namespace
def __getitem__(self, key):
depr('Accessing namespaces as dicts is discouraged. '
'Only use flat item access: '
'cfg["names"]["pace"]["key"] -> cfg["name.space.key"]') #0.12
return self._config[self._prefix + '.' + key]
def __setitem__(self, key, value):
self._config[self._prefix + '.' + key] = value
def __delitem__(self, key):
del self._config[self._prefix + '.' + key]
def __iter__(self):
ns_prefix = self._prefix + '.'
for key in self._config:
ns, dot, name = key.rpartition('.')
if ns == self._prefix and name:
yield name
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._prefix + '.' + key in self._config
def __repr__(self): return '<Config.Namespace %s.*>' % self._prefix
def __str__(self): return '<Config.Namespace %s.*>' % self._prefix
# Deprecated ConfigDict features
def __getattr__(self, key):
depr('Attribute access is deprecated.') #0.12
if key not in self and key[0].isupper():
self[key] = ConfigDict.Namespace(self._config, self._prefix + '.' + key)
if key not in self and key.startswith('__'):
raise AttributeError(key)
return self.get(key)
def __setattr__(self, key, value):
if key in ('_config', '_prefix'):
self.__dict__[key] = value
return
depr('Attribute assignment is deprecated.') #0.12
if hasattr(DictMixin, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], self.__class__):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self:
val = self.pop(key)
if isinstance(val, self.__class__):
prefix = key + '.'
for key in self:
if key.startswith(prefix):
del self[prefix+key]
def __call__(self, *a, **ka):
depr('Calling ConfDict is deprecated. Use the update() method.') #0.12
self.update(*a, **ka)
return self
def __init__(self, *a, **ka):
self._meta = {}
self._on_change = lambda name, value: None
if a or ka:
depr('Constructor does no longer accept parameters.') #0.12
self.update(*a, **ka)
def load_config(self, filename):
''' Load values from an *.ini style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
'''
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace='', make_namespaces=False):
''' Import values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> ConfigDict().load_dict({'name': {'space': {'key': 'value'}}})
{'name.space.key': 'value'}
'''
stack = [(namespace, source)]
while stack:
prefix, source = stack.pop()
if not isinstance(source, dict):
raise TypeError('Source is not a dict (r)' % type(key))
for key, value in source.items():
if not isinstance(key, basestring):
raise TypeError('Key is not a string (%r)' % type(key))
full_key = prefix + '.' + key if prefix else key
if isinstance(value, dict):
stack.append((full_key, value))
if make_namespaces:
self[full_key] = self.Namespace(self, full_key)
else:
self[full_key] = value
return self
def update(self, *a, **ka):
''' If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` '''
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
def clear(self):
for key in self:
del self[key]
def meta_get(self, key, metafield, default=None):
''' Return the value of a meta field for a key. '''
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
''' Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. '''
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
''' Return an iterable of meta field names defined for a key. '''
return self._meta.get(key, {}).keys()
# Deprecated ConfigDict features
def __getattr__(self, key):
depr('Attribute access is deprecated.') #0.12
if key not in self and key[0].isupper():
self[key] = self.Namespace(self, key)
if key not in self and key.startswith('__'):
raise AttributeError(key)
return self.get(key)
def __setattr__(self, key, value):
if key in self.__slots__:
return dict.__setattr__(self, key, value)
depr('Attribute assignment is deprecated.') #0.12
if hasattr(dict, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], self.Namespace):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self:
val = self.pop(key)
if isinstance(val, self.Namespace):
prefix = key + '.'
for key in self:
if key.startswith(prefix):
del self[prefix+key]
def __call__(self, *a, **ka):
depr('Calling ConfDict is deprecated. Use the update() method.') #0.12
self.update(*a, **ka)
return self
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
''' This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). '''
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
''' Wrapper for file uploads. '''
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
def get_header(self, name, default=None):
""" Return the value of a header within the mulripart part. """
return self.headers.get(name, default)
@cached_property
def filename(self):
''' Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
'''
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
''' Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
'''
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from wsgiref.simple_server import make_server
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
srv = make_server(self.host, self.port, app, server_cls, handler_cls)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.options.pop('fast', None):
depr('The "fast" option has been deprecated and removed by Gevent.')
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '') or ''
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.') #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.') #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
if _name is None:
depr('Rebase function called without arguments.'
' You were probably looking for {{base}}?', True) #0.12
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
if _name is None:
depr('Rebase function called without arguments.'
' You were probably looking for {{base}}?', True) #0.12
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
''' Parser for stpl templates. '''
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '([urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Open and close grouping tokens
_re_tok += '|([\[\{\(])'
_re_tok += '|([\]\}\)])'
# 5,6: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 7: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 8: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=\\r?$))'
# 9: And finally, a single newline. The 10th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))(%%?)'
# Match inline statements (may contain python strings)
_re_inl = '(?m)%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl
_re_tok = '(?m)' + _re_tok
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
''' Tokens as a space separated string (default: <% %> % {{ }}) '''
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
self.offset += m.end()
if m.group(1): # New escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+m.group(5)+line+sep)
self.offset += len(line+sep)+1
continue
elif m.group(5): # Old escape syntax
depr('Escape code lines with a backslash.') #0.12
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+line+sep)
self.offset += len(line+sep)+1
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if (code_line or self.paren_depth > 0) and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
def process_inline(self, chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
line, comment = self.fix_backward_compatibility(line, comment)
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def fix_backward_compatibility(self, line, comment):
parts = line.strip().split(None, 2)
if parts and parts[0] in ('include', 'rebase'):
depr('The include and rebase keywords are functions now.') #0.12
if len(parts) == 1: return "_printlist([base])", comment
elif len(parts) == 2: return "_=%s(%r)" % tuple(parts), comment
else: return "_=%s(%r, %s)" % tuple(parts), comment
if self.lineno <= 2 and not line.strip() and 'coding' in comment:
m = re.match(r"#.*coding[:=]\s*([-\w.]+)", comment)
if m:
depr('PEP263 encoding strings in templates are deprecated.') #0.12
enc = m.group(1)
self.source = self.source.encode(self.encoding).decode(enc)
self.encoding = enc
return line, comment.replace('coding','coding*')
return line, comment
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[422] = "Unprocessable Entity" # RFC 4918
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
| apache-2.0 | -5,441,184,906,544,719,000 | 38.916556 | 103 | 0.578312 | false |
ajenhl/eats | server/eats/tests/views/test_entity_delete.py | 1 | 2674 | from django.core.urlresolvers import reverse
from eats.models import Entity
from eats.tests.views.view_test_case import ViewTestCase
class EntityDeleteViewTestCase (ViewTestCase):
def setUp (self):
super(EntityDeleteViewTestCase, self).setUp()
user = self.create_django_user('user', '[email protected]', 'password')
self.editor = self.create_user(user)
self.editor.editable_authorities = [self.authority]
self.editor.set_current_authority(self.authority)
def test_non_existent_entity (self):
url = reverse('entity-delete', kwargs={'entity_id': 0})
self.app.get(url, status=404, user='user')
def test_delete (self):
# Test for successful deletion of an entity associated with a
# single authority that the user is an editor for.
entity = self.tm.create_entity(self.authority)
self.assertEqual(Entity.objects.count(), 1)
url = reverse('entity-delete', kwargs={'entity_id': entity.get_id()})
form = self.app.get(url, user='user').forms['entity-delete-form']
response = form.submit('_delete')
self.assertRedirects(response, reverse('search'))
self.assertEqual(Entity.objects.count(), 0)
# Test for unsuccessful deletion of an entity associated with
# two authorities, one of which the user is not an editor for.
authority2 = self.tm.create_authority('Test2')
entity2 = self.tm.create_entity(self.authority)
entity2.create_note_property_assertion(authority2, 'Test note')
self.assertEqual(Entity.objects.count(), 1)
url = reverse('entity-delete', kwargs={'entity_id': entity2.get_id()})
response = self.app.get(url, user='user')
# There should be no delete form.
self.assertTrue('entity-delete-form' not in response.forms)
# A submission even without the form should fail.
self.csrf_checks = False
self._patch_settings()
self.renew_app()
response = self.app.post(url, {'_delete': 'Delete'}, user='user')
self.assertEqual(response.status_code, 200)
self.assertEqual(Entity.objects.count(), 1)
self.csrf_checks = True
self._patch_settings()
self.renew_app()
# Test that the delete succeeds when the user is made an
# editor for the second authority too.
self.editor.editable_authorities = [self.authority, authority2]
form = self.app.get(url, user='user').forms['entity-delete-form']
response = form.submit('_delete')
self.assertRedirects(response, reverse('search'))
self.assertEqual(Entity.objects.count(), 0)
| gpl-3.0 | -3,766,901,904,554,540,000 | 46.75 | 78 | 0.655572 | false |
Fokko/incubator-airflow | airflow/contrib/example_dags/example_winrm_operator.py | 1 | 2457 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# --------------------------------------------------------------------------------
# Written By: Ekhtiar Syed
# Last Update: 8th April 2016
# Caveat: This Dag will not run because of missing scripts.
# The purpose of this is to give you a sample of a real world example DAG!
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Load The Dependencies
# --------------------------------------------------------------------------------
"""
This is an example dag for using the WinRMOperator.
"""
from datetime import timedelta
import airflow
from airflow.contrib.hooks.winrm_hook import WinRMHook
from airflow.contrib.operators.winrm_operator import WinRMOperator
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
default_args = {
'owner': 'Airflow',
'start_date': airflow.utils.dates.days_ago(2)
}
with DAG(
dag_id='POC_winrm_parallel',
default_args=default_args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60)
) as dag:
cmd = 'ls -l'
run_this_last = DummyOperator(task_id='run_this_last')
winRMHook = WinRMHook(ssh_conn_id='ssh_POC1')
t1 = WinRMOperator(
task_id="wintask1",
command='ls -altr',
winrm_hook=winRMHook
)
t2 = WinRMOperator(
task_id="wintask2",
command='sleep 60',
winrm_hook=winRMHook
)
t3 = WinRMOperator(
task_id="wintask3",
command='echo \'luke test\' ',
winrm_hook=winRMHook
)
[t1, t2, t3] >> run_this_last
| apache-2.0 | -5,874,133,508,260,719,000 | 31.76 | 82 | 0.607245 | false |
mikewesner-wf/glasshouse | glasshouse.indigoPlugin/Contents/Server Plugin/werkzeug/testsuite/wsgi.py | 1 | 10308 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.wsgi
~~~~~~~~~~~~~~~~~~~~~~~
Tests the WSGI utilities.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
from os import path
from cStringIO import StringIO
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.wrappers import BaseResponse
from werkzeug.exceptions import BadRequest, ClientDisconnected
from werkzeug.test import Client, create_environ, run_wsgi_app
from werkzeug import wsgi
class WSGIUtilsTestCase(WerkzeugTestCase):
def test_shareddatamiddleware_get_file_loader(self):
app = wsgi.SharedDataMiddleware(None, {})
assert callable(app.get_file_loader('foo'))
def test_shared_data_middleware(self):
def null_application(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
yield 'NOT FOUND'
app = wsgi.SharedDataMiddleware(null_application, {
'/': path.join(path.dirname(__file__), 'res'),
'/sources': path.join(path.dirname(__file__), 'res'),
'/pkg': ('werkzeug.debug', 'shared')
})
for p in '/test.txt', '/sources/test.txt':
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
self.assert_equal(status, '200 OK')
self.assert_equal(''.join(app_iter).strip(), 'FOUND')
app_iter, status, headers = run_wsgi_app(app, create_environ('/pkg/debugger.js'))
contents = ''.join(app_iter)
assert '$(function() {' in contents
app_iter, status, headers = run_wsgi_app(app, create_environ('/missing'))
self.assert_equal(status, '404 NOT FOUND')
self.assert_equal(''.join(app_iter).strip(), 'NOT FOUND')
def test_get_host(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env), 'example.org')
assert wsgi.get_host(create_environ('/', 'http://example.org')) \
== 'example.org'
def test_responder(self):
def foo(environ, start_response):
return BaseResponse('Test')
client = Client(wsgi.responder(foo), BaseResponse)
response = client.get('/')
self.assert_equal(response.status_code, 200)
self.assert_equal(response.data, 'Test')
def test_pop_path_info(self):
original_env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b///c'}
# regular path info popping
def assert_tuple(script_name, path_info):
self.assert_equal(env.get('SCRIPT_NAME'), script_name)
self.assert_equal(env.get('PATH_INFO'), path_info)
env = original_env.copy()
pop = lambda: wsgi.pop_path_info(env)
assert_tuple('/foo', '/a/b///c')
self.assert_equal(pop(), 'a')
assert_tuple('/foo/a', '/b///c')
self.assert_equal(pop(), 'b')
assert_tuple('/foo/a/b', '///c')
self.assert_equal(pop(), 'c')
assert_tuple('/foo/a/b///c', '')
assert pop() is None
def test_peek_path_info(self):
env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/aaa/b///c'}
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
def test_limited_stream(self):
class RaisingLimitedStream(wsgi.LimitedStream):
def on_exhausted(self):
raise BadRequest('input stream exhausted')
io = StringIO('123456')
stream = RaisingLimitedStream(io, 3)
self.assert_equal(stream.read(), '123')
self.assert_raises(BadRequest, stream.read)
io = StringIO('123456')
stream = RaisingLimitedStream(io, 3)
self.assert_equal(stream.tell(), 0)
self.assert_equal(stream.read(1), '1')
self.assert_equal(stream.tell(), 1)
self.assert_equal(stream.read(1), '2')
self.assert_equal(stream.tell(), 2)
self.assert_equal(stream.read(1), '3')
self.assert_equal(stream.tell(), 3)
self.assert_raises(BadRequest, stream.read)
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_equal(stream.readline(), '123456\n')
self.assert_equal(stream.readline(), 'ab')
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_equal(stream.readlines(), ['123456\n', 'ab'])
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_equal(stream.readlines(2), ['12'])
self.assert_equal(stream.readlines(2), ['34'])
self.assert_equal(stream.readlines(), ['56\n', 'ab'])
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_equal(stream.readline(100), '123456\n')
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_equal(stream.readlines(100), ['123456\n', 'ab'])
io = StringIO('123456')
stream = wsgi.LimitedStream(io, 3)
self.assert_equal(stream.read(1), '1')
self.assert_equal(stream.read(1), '2')
self.assert_equal(stream.read(), '3')
self.assert_equal(stream.read(), '')
io = StringIO('123456')
stream = wsgi.LimitedStream(io, 3)
self.assert_equal(stream.read(-1), '123')
def test_limited_stream_disconnection(self):
io = StringIO('A bit of content')
# disconnect detection on out of bytes
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
# disconnect detection because file close
io = StringIO('x' * 255)
io.close()
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
def test_path_info_extraction(self):
x = wsgi.extract_path_info('http://example.com/app', '/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app',
'https://example.com/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app')
self.assert_equal(x, u'/')
x = wsgi.extract_path_info(u'http://☃.net/', u'/fööbär')
self.assert_equal(x, u'/fööbär')
x = wsgi.extract_path_info(u'http://☃.net/x', u'http://☃.net/x/fööbär')
self.assert_equal(x, u'/fööbär')
env = create_environ(u'/fööbär', u'http://☃.net/x/')
x = wsgi.extract_path_info(env, u'http://☃.net/x/fööbär')
self.assert_equal(x, u'/fööbär')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/a/hello')
assert x is None
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello',
collapse_http_schemes=False)
assert x is None
def test_get_host_fallback(self):
assert wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '80'
}) == 'foobar.example.com'
assert wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '81'
}) == 'foobar.example.com:81'
def test_get_current_url_unicode(self):
env = create_environ()
env['QUERY_STRING'] = 'foo=bar&baz=blah&meh=\xcf'
rv = wsgi.get_current_url(env)
self.assertEqual(rv, 'http://localhost/?foo=bar&baz=blah&meh=%CF')
def test_multi_part_line_breaks(self):
data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
data = 'abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz'
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
self.assert_equal(lines, ['abc\r\n', 'This line is broken by the buffer length.\r\n', 'Foo bar baz'])
def test_multi_part_line_breaks_problematic(self):
data = 'abc\rdef\r\nghi'
for x in xrange(1, 10):
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=4))
assert lines == ['abc\r', 'def\r\n', 'ghi']
def test_iter_functions_support_iterators(self):
data = ['abcdef\r\nghi', 'jkl\r\nmnopqrstuvwxyz\r', '\nABCDEFGHIJK']
lines = list(wsgi.make_line_iter(data))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
def test_make_chunk_iter(self):
data = ['abcdefXghi', 'jklXmnopqrstuvwxyzX', 'ABCDEFGHIJK']
rv = list(wsgi.make_chunk_iter(data, 'X'))
self.assert_equal(rv, ['abcdef', 'ghijkl', 'mnopqrstuvwxyz', 'ABCDEFGHIJK'])
data = 'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
test_stream = StringIO(data)
rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data), buffer_size=4))
self.assert_equal(rv, ['abcdef', 'ghijkl', 'mnopqrstuvwxyz', 'ABCDEFGHIJK'])
def test_lines_longer_buffer_size(self):
data = '1234567890\n1234567890\n'
for bufsize in xrange(1, 15):
lines = list(wsgi.make_line_iter(StringIO(data), limit=len(data), buffer_size=4))
self.assert_equal(lines, ['1234567890\n', '1234567890\n'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WSGIUtilsTestCase))
return suite
| apache-2.0 | 2,790,075,410,506,452,500 | 39.301961 | 109 | 0.587331 | false |
romanarranz/PPR | P1/calculaGanancia.py | 1 | 2508 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import argv
import re
# Si el numero de parametros del programa es menor que 3 o los parametros primero y segundo son el mismo archivo
if len(argv) < 3 or argv[1] == argv[2]:
print "Error la sintaxis es:"
print "\t$",argv[0]," output/floydS.dat"," output/floyd1D.dat"
print "\t$",argv[0]," output/floydS.dat"," output/floyd2D.dat"
else:
archivoFloydS = argv[1]
archivoFloydP = argv[2]
flujoArchivoS = open(archivoFloydS)
flujoArchivoP = open(archivoFloydP)
# creo un diccionario vacio
ganancia = {}
# <== Me quedo con los tiempos del archivo secuencial
# =========================================>
print "Flujo de %r:" % archivoFloydS
# Para cada linea
for linea in flujoArchivoS:
# me creo una lista usando como delimitador el caracter '\t'
arrayLinea = re.split(r'\t+', linea.rstrip('\t'))
# reemplazo en cada elemento de la lista el salto de linea por la cadena vacia
arrayLinea = ([elemento.replace('\n', '') for elemento in arrayLinea])
if arrayLinea:
print "\tarrayLinea: ", arrayLinea
clave = int(arrayLinea[0])
ganancia[clave] = float(arrayLinea[1])
else:
print "\tNo match"
flujoArchivoS.close()
print ""
# <== Me quedo con los tiempos del archivo paralelo
# =========================================>
print "Flujo de %r:" % archivoFloydP
# Para cada linea
for linea in flujoArchivoP:
# me creo una lista usando como delimitador el caracter '\t'
arrayLinea = re.split(r'\t+', linea.rstrip('\t'))
# reemplazo en cada elemento de la lista el salto de linea por la cadena vacia
arrayLinea = ([elemento.replace('\n', '') for elemento in arrayLinea])
if arrayLinea:
print "\tarrayLinea: ", arrayLinea
clave = int(arrayLinea[0])
# divido el tiempo secuencial entre el tiempo paralelo y lo guardo como valor del diccionario
ganancia[clave] = ganancia[clave]/float(arrayLinea[1])
else:
print "\tNo match"
flujoArchivoP.close()
print ""
# <== Imprimo el diccionario
# =========================================>
print "Diccionario ganancia"
for key, value in sorted(ganancia.iteritems()):
print "\t",key, value
# <== Guardo el diccionario en un fichero ganancia.dat
# =========================================>
archivoSalida = "ganancia"+archivoFloydP[-6:]
flujoSalida = open("output/"+archivoSalida, 'w')
for key, value in sorted(ganancia.iteritems()):
linea = str(key)+'\t'+str(value)+'\n'
s = str(linea)
flujoSalida.write(s)
flujoSalida.close() | mit | -8,811,057,606,774,372,000 | 31.141026 | 112 | 0.647247 | false |
pernici/sympy | sympy/polys/tests/test_euclidtools.py | 1 | 20892 | """Tests for Euclidean algorithms, GCDs, LCMs and polynomial remainder sequences. """
from sympy.polys.euclidtools import (
dup_gcdex, dup_half_gcdex, dup_invert,
dup_euclidean_prs, dmp_euclidean_prs,
dup_primitive_prs, dmp_primitive_prs,
dup_subresultants, dmp_subresultants,
dup_prs_resultant, dmp_prs_resultant,
dmp_zz_collins_resultant,
dmp_qq_collins_resultant,
dup_resultant, dmp_resultant,
dup_discriminant, dmp_discriminant,
dup_zz_heu_gcd, dmp_zz_heu_gcd,
dup_qq_heu_gcd, dmp_qq_heu_gcd,
dup_rr_prs_gcd, dmp_rr_prs_gcd,
dup_ff_prs_gcd, dmp_ff_prs_gcd,
dup_inner_gcd, dmp_inner_gcd,
dup_lcm, dmp_lcm,
dmp_content, dmp_primitive,
dup_cancel, dmp_cancel)
from sympy.polys.densebasic import (
dmp_one_p,
dup_LC, dmp_LC,
dup_normal, dmp_normal)
from sympy.polys.densearith import (
dup_add,
dup_mul, dmp_mul,
dup_exquo)
from sympy.polys.densetools import (
dup_diff)
from sympy.polys.specialpolys import (
f_4, f_5, f_6,
dmp_fateman_poly_F_1,
dmp_fateman_poly_F_2,
dmp_fateman_poly_F_3)
from sympy.polys.domains import ZZ, QQ
def test_dup_gcdex():
f = dup_normal([1,-2,-6,12,15], QQ)
g = dup_normal([1,1,-4,-4], QQ)
s = [QQ(-1,5),QQ(3,5)]
t = [QQ(1,5),QQ(-6,5),QQ(2)]
h = [QQ(1),QQ(1)]
assert dup_half_gcdex(f, g, QQ) == (s, h)
assert dup_gcdex(f, g, QQ) == (s, t, h)
f = dup_normal([1,4,0,-1,1], QQ)
g = dup_normal([1,0,-1,1], QQ)
s, t, h = dup_gcdex(f, g, QQ)
S, T, H = dup_gcdex(g, f, QQ)
assert dup_add(dup_mul(s, f, QQ),
dup_mul(t, g, QQ), QQ) == h
assert dup_add(dup_mul(S, g, QQ),
dup_mul(T, f, QQ), QQ) == H
f = dup_normal([2,0], QQ)
g = dup_normal([1,0,-16], QQ)
s = [QQ(1,32),QQ(0)]
t = [QQ(-1,16)]
h = [QQ(1)]
assert dup_half_gcdex(f, g, QQ) == (s, h)
assert dup_gcdex(f, g, QQ) == (s, t, h)
def test_dup_invert():
assert dup_invert([QQ(2),QQ(0)], [QQ(1),QQ(0),QQ(-16)], QQ) == [QQ(1,32),QQ(0)]
def test_dup_euclidean_prs():
f = QQ.map([1, 0, 1, 0, -3, -3, 8, 2, -5])
g = QQ.map([3, 0, 5, 0, -4, -9, 21])
assert dup_euclidean_prs(f, g, QQ) == [f, g,
[-QQ(5,9), QQ(0,1), QQ(1,9), QQ(0,1), -QQ(1,3)],
[-QQ(117,25), -QQ(9,1), QQ(441,25)],
[QQ(233150,19773), -QQ(102500,6591)],
[-QQ(1288744821,543589225)]]
def test_dup_primitive_prs():
f = ZZ.map([1, 0, 1, 0, -3, -3, 8, 2, -5])
g = ZZ.map([3, 0, 5, 0, -4, -9, 21])
assert dup_primitive_prs(f, g, ZZ) == [f, g,
[-ZZ(5), ZZ(0), ZZ(1), ZZ(0), -ZZ(3)],
[ZZ(13), ZZ(25), -ZZ(49)],
[ZZ(4663), -ZZ(6150)],
[ZZ(1)]]
def test_dup_subresultants():
assert dup_resultant([], [], ZZ) == ZZ(0)
assert dup_resultant([ZZ(1)], [], ZZ) == ZZ(0)
assert dup_resultant([], [ZZ(1)], ZZ) == ZZ(0)
f = dup_normal([1,0,1,0,-3,-3,8,2,-5], ZZ)
g = dup_normal([3,0,5,0,-4,-9,21], ZZ)
a = dup_normal([15,0,-3,0,9], ZZ)
b = dup_normal([65,125,-245], ZZ)
c = dup_normal([9326,-12300], ZZ)
d = dup_normal([260708], ZZ)
assert dup_subresultants(f, g, ZZ) == [f, g, a, b, c, d]
assert dup_resultant(f, g, ZZ) == dup_LC(d, ZZ)
f = dup_normal([1,-2,1], ZZ)
g = dup_normal([1,0,-1], ZZ)
a = dup_normal([2,-2], ZZ)
assert dup_subresultants(f, g, ZZ) == [f, g, a]
assert dup_resultant(f, g, ZZ) == 0
f = dup_normal([1,0, 1], ZZ)
g = dup_normal([1,0,-1], ZZ)
a = dup_normal([-2], ZZ)
assert dup_subresultants(f, g, ZZ) == [f, g, a]
assert dup_resultant(f, g, ZZ) == 4
f = dup_normal([1,0,-1], ZZ)
g = dup_normal([1,-1,0,2], ZZ)
assert dup_resultant(f, g, ZZ) == 0
f = dup_normal([3,0,-1,0], ZZ)
g = dup_normal([5,0,1], ZZ)
assert dup_resultant(f, g, ZZ) == 64
f = dup_normal([1,-2,7], ZZ)
g = dup_normal([1,0,-1,5], ZZ)
assert dup_resultant(f, g, ZZ) == 265
f = dup_normal([1,-6,11,-6], ZZ)
g = dup_normal([1,-15,74,-120], ZZ)
assert dup_resultant(f, g, ZZ) == -8640
f = dup_normal([1,-6,11,-6], ZZ)
g = dup_normal([1,-10,29,-20], ZZ)
assert dup_resultant(f, g, ZZ) == 0
f = dup_normal([1,0,0,-1], ZZ)
g = dup_normal([1,2,2,-1], ZZ)
assert dup_resultant(f, g, ZZ) == 16
f = dup_normal([1,0,0,0,0,0,0,0,-2], ZZ)
g = dup_normal([1,-1], ZZ)
assert dup_resultant(f, g, ZZ) == -1
def test_dmp_subresultants():
assert dmp_resultant([[]], [[]], 1, ZZ) == []
assert dmp_prs_resultant([[]], [[]], 1, ZZ)[0] == []
assert dmp_zz_collins_resultant([[]], [[]], 1, ZZ) == []
assert dmp_qq_collins_resultant([[]], [[]], 1, ZZ) == []
assert dmp_resultant([[ZZ(1)]], [[]], 1, ZZ) == []
assert dmp_resultant([[ZZ(1)]], [[]], 1, ZZ) == []
assert dmp_resultant([[ZZ(1)]], [[]], 1, ZZ) == []
assert dmp_resultant([[]], [[ZZ(1)]], 1, ZZ) == []
assert dmp_prs_resultant([[]], [[ZZ(1)]], 1, ZZ)[0] == []
assert dmp_zz_collins_resultant([[]], [[ZZ(1)]], 1, ZZ) == []
assert dmp_qq_collins_resultant([[]], [[ZZ(1)]], 1, ZZ) == []
f = dmp_normal([[3,0],[],[-1,0,0,-4]], 1, ZZ)
g = dmp_normal([[1],[1,0,0,0],[-9]], 1, ZZ)
a = dmp_normal([[3,0,0,0,0],[1,0,-27,4]], 1, ZZ)
b = dmp_normal([[-3,0,0,-12,1,0,-54,8,729,-216,16]], 1, ZZ)
r = dmp_LC(b, ZZ)
assert dmp_subresultants(f, g, 1, ZZ) == [f, g, a, b]
assert dmp_resultant(f, g, 1, ZZ) == r
assert dmp_prs_resultant(f, g, 1, ZZ)[0] == r
assert dmp_zz_collins_resultant(f, g, 1, ZZ) == r
assert dmp_qq_collins_resultant(f, g, 1, ZZ) == r
f = dmp_normal([[-1],[],[],[5]], 1, ZZ)
g = dmp_normal([[3,1],[],[]], 1, ZZ)
a = dmp_normal([[45,30,5]], 1, ZZ)
b = dmp_normal([[675,675,225,25]], 1, ZZ)
r = dmp_LC(b, ZZ)
assert dmp_subresultants(f, g, 1, ZZ) == [f, g, a]
assert dmp_resultant(f, g, 1, ZZ) == r
assert dmp_prs_resultant(f, g, 1, ZZ)[0] == r
assert dmp_zz_collins_resultant(f, g, 1, ZZ) == r
assert dmp_qq_collins_resultant(f, g, 1, ZZ) == r
f = [[[[[6]]]], [[[[-3]]], [[[-2]], [[]]]], [[[[1]], [[]]], [[[]]]]]
g = [[[[[1]]]], [[[[-1], [-1, 0]]]], [[[[1, 0], []]]]]
r = [[[[1]], [[-3], [-3, 0]], [[9, 0], []]], [[[-2], [-2, 0]], [[6],
[12, 0], [6, 0, 0]], [[-18, 0], [-18, 0, 0], []]], [[[4, 0],
[]], [[-12, 0], [-12, 0, 0], []], [[36, 0, 0], [], []]]]
assert dmp_zz_collins_resultant(f, g, 4, ZZ) == r
f = [[[[[QQ(1,1)]]]], [[[[QQ(-1,2)]]], [[[QQ(-1,3)]], [[]]]], [[[[QQ(1,6)]], [[]]], [[[]]]]]
g = [[[[[QQ(1,1)]]]], [[[[QQ(-1,1)], [QQ(-1,1), QQ(0, 1)]]]], [[[[QQ(1,1), QQ(0,1)], []]]]]
r = [[[[QQ(1,36)]], [[QQ(-1,12)], [QQ(-1,12), QQ(0,1)]], [[QQ(1,4), QQ(0,1)], []]],
[[[QQ(-1,18)], [QQ(-1,18), QQ(0,1)]], [[QQ(1,6)], [QQ(1,3), QQ(0,1)], [QQ(1,6),
QQ(0,1), QQ(0,1)]], [[QQ(-1,2), QQ(0,1)], [QQ(-1,2), QQ(0,1), QQ(0,1)], []]],
[[[QQ(1,9), QQ(0,1)], []], [[QQ(-1,3), QQ(0,1)], [QQ(-1,3), QQ(0,1), QQ(0,1)], []],
[[QQ(1,1), QQ(0,1), QQ(0,1)], [], []]]]
assert dmp_qq_collins_resultant(f, g, 4, QQ) == r
def test_dup_discriminant():
assert dup_discriminant([], ZZ) == 0
assert dup_discriminant([1,0], ZZ) == 1
assert dup_discriminant([1,3,9,-13], ZZ) == -11664
assert dup_discriminant([5,0,1,0,0,2], ZZ) == 31252160
assert dup_discriminant([1,2,6,-22,13], ZZ) == 0
assert dup_discriminant([12,0,0,15,30,1,0,1], ZZ) == -220289699947514112
def test_dmp_discriminant():
assert dmp_discriminant([], 0, ZZ) == 0
assert dmp_discriminant([[]], 1, ZZ) == []
assert dmp_discriminant([[1,0]], 1, ZZ) == []
assert dmp_discriminant([1,3,9,-13], 0, ZZ) == -11664
assert dmp_discriminant([5,0,1,0,0,2], 0, ZZ) == 31252160
assert dmp_discriminant([1,2,6,-22,13], 0, ZZ) == 0
assert dmp_discriminant([12,0,0,15,30,1,0,1], 0, ZZ) == -220289699947514112
assert dmp_discriminant([[1,0],[],[2,0]], 1, ZZ) == [-8,0,0]
assert dmp_discriminant([[1,0,2],[]], 1, ZZ) == [1]
assert dmp_discriminant([[[1],[]],[[1,0]]], 2, ZZ) == [[1]]
assert dmp_discriminant([[[[1]],[[]]],[[[1],[]]],[[[1,0]]]], 3, ZZ) == \
[[[-4, 0]], [[1], [], []]]
assert dmp_discriminant([[[[[1]]],[[[]]]],[[[[1]],[[]]]],[[[[1],[]]]],[[[[1,0]]]]], 4, ZZ) == \
[[[[-27,0,0]]],[[[18,0],[]],[[-4],[],[],[]]],[[[-4,0]],[[1],[],[]],[[]],[[]]]]
def test_dup_gcd():
assert dup_zz_heu_gcd([], [], ZZ) == ([], [], [])
assert dup_rr_prs_gcd([], [], ZZ) == ([], [], [])
assert dup_zz_heu_gcd([2], [], ZZ) == ([2], [1], [])
assert dup_rr_prs_gcd([2], [], ZZ) == ([2], [1], [])
assert dup_zz_heu_gcd([-2], [], ZZ) == ([2], [-1], [])
assert dup_rr_prs_gcd([-2], [], ZZ) == ([2], [-1], [])
assert dup_zz_heu_gcd([], [-2], ZZ) == ([2], [], [-1])
assert dup_rr_prs_gcd([], [-2], ZZ) == ([2], [], [-1])
assert dup_zz_heu_gcd([], [2,4], ZZ) == ([2,4], [], [1])
assert dup_rr_prs_gcd([], [2,4], ZZ) == ([2,4], [], [1])
assert dup_zz_heu_gcd([2,4], [], ZZ) == ([2,4], [1], [])
assert dup_rr_prs_gcd([2,4], [], ZZ) == ([2,4], [1], [])
assert dup_zz_heu_gcd([2], [2], ZZ) == ([2], [1], [1])
assert dup_rr_prs_gcd([2], [2], ZZ) == ([2], [1], [1])
assert dup_zz_heu_gcd([-2], [2], ZZ) == ([2], [-1], [1])
assert dup_rr_prs_gcd([-2], [2], ZZ) == ([2], [-1], [1])
assert dup_zz_heu_gcd([2], [-2], ZZ) == ([2], [1], [-1])
assert dup_rr_prs_gcd([2], [-2], ZZ) == ([2], [1], [-1])
assert dup_zz_heu_gcd([-2], [-2], ZZ) == ([2], [-1], [-1])
assert dup_rr_prs_gcd([-2], [-2], ZZ) == ([2], [-1], [-1])
assert dup_zz_heu_gcd([1,2,1], [1], ZZ) == ([1], [1, 2, 1], [1])
assert dup_rr_prs_gcd([1,2,1], [1], ZZ) == ([1], [1, 2, 1], [1])
assert dup_zz_heu_gcd([1,2,1], [2], ZZ) == ([1], [1, 2, 1], [2])
assert dup_rr_prs_gcd([1,2,1], [2], ZZ) == ([1], [1, 2, 1], [2])
assert dup_zz_heu_gcd([2,4,2], [2], ZZ) == ([2], [1, 2, 1], [1])
assert dup_rr_prs_gcd([2,4,2], [2], ZZ) == ([2], [1, 2, 1], [1])
assert dup_zz_heu_gcd([2], [2,4,2], ZZ) == ([2], [1], [1, 2, 1])
assert dup_rr_prs_gcd([2], [2,4,2], ZZ) == ([2], [1], [1, 2, 1])
assert dup_zz_heu_gcd([2,4,2], [1,1], ZZ) == ([1, 1], [2, 2], [1])
assert dup_rr_prs_gcd([2,4,2], [1,1], ZZ) == ([1, 1], [2, 2], [1])
assert dup_zz_heu_gcd([1,1], [2,4,2], ZZ) == ([1, 1], [1], [2, 2])
assert dup_rr_prs_gcd([1,1], [2,4,2], ZZ) == ([1, 1], [1], [2, 2])
f, g = [1, -31], [1, 0]
assert dup_zz_heu_gcd(f, g, ZZ) == ([1], f, g)
assert dup_rr_prs_gcd(f, g, ZZ) == ([1], f, g)
f = [1,8,21,22,8]
g = [1,6,11,6]
h = [1,3,2]
cff = [1,5,4]
cfg = [1,3]
assert dup_zz_heu_gcd(f, g, ZZ) == (h, cff, cfg)
assert dup_rr_prs_gcd(f, g, ZZ) == (h, cff, cfg)
f = [1,0,0,0,-4]
g = [1,0,4,0, 4]
h = [1,0,2]
cff = [1,0,-2]
cfg = [1,0, 2]
assert dup_zz_heu_gcd(f, g, ZZ) == (h, cff, cfg)
assert dup_rr_prs_gcd(f, g, ZZ) == (h, cff, cfg)
f = [1,0,1,0,-3,-3,8,2,-5]
g = [3,0,5,-0,-4,-9,21]
h = [1]
cff = f
cfg = g
assert dup_zz_heu_gcd(f, g, ZZ) == (h, cff, cfg)
assert dup_rr_prs_gcd(f, g, ZZ) == (h, cff, cfg)
f = dup_normal([1,0,1,0,-3,-3,8,2,-5], QQ)
g = dup_normal([3,0,5,-0,-4,-9,21], QQ)
h = dup_normal([1], QQ)
assert dup_qq_heu_gcd(f, g, QQ) == (h, cff, cfg)
assert dup_ff_prs_gcd(f, g, QQ) == (h, cff, cfg)
f = [-352518131239247345597970242177235495263669787845475025293906825864749649589178600387510272,
0, 0, 0, 0, 0, 0,
46818041807522713962450042363465092040687472354933295397472942006618953623327997952,
0, 0, 0, 0, 0, 0,
378182690892293941192071663536490788434899030680411695933646320291525827756032,
0, 0, 0, 0, 0, 0,
112806468807371824947796775491032386836656074179286744191026149539708928,
0, 0, 0, 0, 0, 0,
-12278371209708240950316872681744825481125965781519138077173235712,
0, 0, 0, 0, 0, 0,
289127344604779611146960547954288113529690984687482920704,
0, 0, 0, 0, 0, 0,
19007977035740498977629742919480623972236450681,
0, 0, 0, 0, 0, 0,
311973482284542371301330321821976049]
g = [365431878023781158602430064717380211405897160759702125019136,
0, 0, 0, 0, 0, 0,
197599133478719444145775798221171663643171734081650688,
0, 0, 0, 0, 0, 0,
-9504116979659010018253915765478924103928886144,
0, 0, 0, 0, 0, 0,
-311973482284542371301330321821976049]
f = dup_normal(f, ZZ)
g = dup_normal(g, ZZ)
assert dup_zz_heu_gcd(f, dup_diff(f, 1, ZZ), ZZ)[0] == g
assert dup_rr_prs_gcd(f, dup_diff(f, 1, ZZ), ZZ)[0] == g
f = [QQ(1,2),QQ(1),QQ(1,2)]
g = [QQ(1,2),QQ(1,2)]
h = [QQ(1), QQ(1)]
assert dup_qq_heu_gcd(f, g, QQ) == (h, g, [QQ(1,2)])
assert dup_ff_prs_gcd(f, g, QQ) == (h, g, [QQ(1,2)])
def test_dmp_gcd():
assert dmp_zz_heu_gcd([[]], [[]], 1, ZZ) == ([[]], [[]], [[]])
assert dmp_rr_prs_gcd([[]], [[]], 1, ZZ) == ([[]], [[]], [[]])
assert dmp_zz_heu_gcd([[2]], [[]], 1, ZZ) == ([[2]], [[1]], [[]])
assert dmp_rr_prs_gcd([[2]], [[]], 1, ZZ) == ([[2]], [[1]], [[]])
assert dmp_zz_heu_gcd([[-2]], [[]], 1, ZZ) == ([[2]], [[-1]], [[]])
assert dmp_rr_prs_gcd([[-2]], [[]], 1, ZZ) == ([[2]], [[-1]], [[]])
assert dmp_zz_heu_gcd([[]], [[-2]], 1, ZZ) == ([[2]], [[]], [[-1]])
assert dmp_rr_prs_gcd([[]], [[-2]], 1, ZZ) == ([[2]], [[]], [[-1]])
assert dmp_zz_heu_gcd([[]], [[2],[4]], 1, ZZ) == ([[2],[4]], [[]], [[1]])
assert dmp_rr_prs_gcd([[]], [[2],[4]], 1, ZZ) == ([[2],[4]], [[]], [[1]])
assert dmp_zz_heu_gcd([[2],[4]], [[]], 1, ZZ) == ([[2],[4]], [[1]], [[]])
assert dmp_rr_prs_gcd([[2],[4]], [[]], 1, ZZ) == ([[2],[4]], [[1]], [[]])
assert dmp_zz_heu_gcd([[2]], [[2]], 1, ZZ) == ([[2]], [[1]], [[1]])
assert dmp_rr_prs_gcd([[2]], [[2]], 1, ZZ) == ([[2]], [[1]], [[1]])
assert dmp_zz_heu_gcd([[-2]], [[2]], 1, ZZ) == ([[2]], [[-1]], [[1]])
assert dmp_rr_prs_gcd([[-2]], [[2]], 1, ZZ) == ([[2]], [[-1]], [[1]])
assert dmp_zz_heu_gcd([[2]], [[-2]], 1, ZZ) == ([[2]], [[1]], [[-1]])
assert dmp_rr_prs_gcd([[2]], [[-2]], 1, ZZ) == ([[2]], [[1]], [[-1]])
assert dmp_zz_heu_gcd([[-2]], [[-2]], 1, ZZ) == ([[2]], [[-1]], [[-1]])
assert dmp_rr_prs_gcd([[-2]], [[-2]], 1, ZZ) == ([[2]], [[-1]], [[-1]])
assert dmp_zz_heu_gcd([[1],[2],[1]], [[1]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[1]])
assert dmp_rr_prs_gcd([[1],[2],[1]], [[1]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[1]])
assert dmp_zz_heu_gcd([[1],[2],[1]], [[2]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[2]])
assert dmp_rr_prs_gcd([[1],[2],[1]], [[2]], 1, ZZ) == ([[1]], [[1], [2], [1]], [[2]])
assert dmp_zz_heu_gcd([[2],[4],[2]], [[2]], 1, ZZ) == ([[2]], [[1], [2], [1]], [[1]])
assert dmp_rr_prs_gcd([[2],[4],[2]], [[2]], 1, ZZ) == ([[2]], [[1], [2], [1]], [[1]])
assert dmp_zz_heu_gcd([[2]], [[2],[4],[2]], 1, ZZ) == ([[2]], [[1]], [[1], [2], [1]])
assert dmp_rr_prs_gcd([[2]], [[2],[4],[2]], 1, ZZ) == ([[2]], [[1]], [[1], [2], [1]])
assert dmp_zz_heu_gcd([[2],[4],[2]], [[1],[1]], 1, ZZ) == ([[1], [1]], [[2], [2]], [[1]])
assert dmp_rr_prs_gcd([[2],[4],[2]], [[1],[1]], 1, ZZ) == ([[1], [1]], [[2], [2]], [[1]])
assert dmp_zz_heu_gcd([[1],[1]], [[2],[4],[2]], 1, ZZ) == ([[1], [1]], [[1]], [[2], [2]])
assert dmp_rr_prs_gcd([[1],[1]], [[2],[4],[2]], 1, ZZ) == ([[1], [1]], [[1]], [[2], [2]])
assert dmp_zz_heu_gcd([[[[1,2,1]]]], [[[[2,2]]]], 3, ZZ) == ([[[[1,1]]]], [[[[1,1]]]], [[[[2]]]])
assert dmp_rr_prs_gcd([[[[1,2,1]]]], [[[[2,2]]]], 3, ZZ) == ([[[[1,1]]]], [[[[1,1]]]], [[[[2]]]])
f, g = [[[[1,2,1],[1,1],[]]]], [[[[1,2,1]]]]
h, cff, cfg = [[[[1,1]]]], [[[[1,1],[1],[]]]], [[[[1,1]]]]
assert dmp_zz_heu_gcd(f, g, 3, ZZ) == (h, cff, cfg)
assert dmp_rr_prs_gcd(f, g, 3, ZZ) == (h, cff, cfg)
assert dmp_zz_heu_gcd(g, f, 3, ZZ) == (h, cfg, cff)
assert dmp_rr_prs_gcd(g, f, 3, ZZ) == (h, cfg, cff)
f, g, h = dmp_fateman_poly_F_1(2, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
H, cff, cfg = dmp_rr_prs_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
f, g, h = dmp_fateman_poly_F_1(4, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 4, ZZ)
assert H == h and dmp_mul(H, cff, 4, ZZ) == f \
and dmp_mul(H, cfg, 4, ZZ) == g
f, g, h = dmp_fateman_poly_F_1(6, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 6, ZZ)
assert H == h and dmp_mul(H, cff, 6, ZZ) == f \
and dmp_mul(H, cfg, 6, ZZ) == g
f, g, h = dmp_fateman_poly_F_1(8, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 8, ZZ)
assert H == h and dmp_mul(H, cff, 8, ZZ) == f \
and dmp_mul(H, cfg, 8, ZZ) == g
f, g, h = dmp_fateman_poly_F_2(2, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
H, cff, cfg = dmp_rr_prs_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
f, g, h = dmp_fateman_poly_F_3(2, ZZ)
H, cff, cfg = dmp_zz_heu_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
H, cff, cfg = dmp_rr_prs_gcd(f, g, 2, ZZ)
assert H == h and dmp_mul(H, cff, 2, ZZ) == f \
and dmp_mul(H, cfg, 2, ZZ) == g
f, g, h = dmp_fateman_poly_F_3(4, ZZ)
H, cff, cfg = dmp_inner_gcd(f, g, 4, ZZ)
assert H == h and dmp_mul(H, cff, 4, ZZ) == f \
and dmp_mul(H, cfg, 4, ZZ) == g
f = [[QQ(1,2)],[QQ(1)],[QQ(1,2)]]
g = [[QQ(1,2)],[QQ(1,2)]]
h = [[QQ(1)],[QQ(1)]]
assert dmp_qq_heu_gcd(f, g, 1, QQ) == (h, g, [[QQ(1,2)]])
assert dmp_ff_prs_gcd(f, g, 1, QQ) == (h, g, [[QQ(1,2)]])
def test_dup_lcm():
assert dup_lcm([2], [6], ZZ) == [6]
assert dup_lcm([2,0,0,0], [6,0], ZZ) == [6,0,0,0]
assert dup_lcm([2,0,0,0], [3,0], ZZ) == [6,0,0,0]
assert dup_lcm([1,1,0], [1,0], ZZ) == [1,1,0]
assert dup_lcm([1,1,0], [2,0], ZZ) == [2,2,0]
assert dup_lcm([1,2,0], [1,0], ZZ) == [1,2,0]
assert dup_lcm([2,1,0], [1,0], ZZ) == [2,1,0]
assert dup_lcm([2,1,0], [2,0], ZZ) == [4,2,0]
def test_dmp_lcm():
assert dmp_lcm([[2]], [[6]], 1, ZZ) == [[6]]
assert dmp_lcm([[1],[]], [[1,0]], 1, ZZ) == [[1,0],[]]
assert dmp_lcm([[2],[],[],[]], [[6,0,0],[]], 1, ZZ) == [[6,0,0],[],[],[]]
assert dmp_lcm([[2],[],[],[]], [[3,0,0],[]], 1, ZZ) == [[6,0,0],[],[],[]]
assert dmp_lcm([[1,0],[],[]], [[1,0,0],[]], 1, ZZ) == [[1,0,0],[],[]]
f = [[2,-3,-2,3,0,0],[]]
g = [[1,0,-2,0,1,0]]
h = [[2,-3,-4,6,2,-3,0,0],[]]
assert dmp_lcm(f, g, 1, ZZ) == h
f = [[1],[-3,0],[-9,0,0],[-5,0,0,0]]
g = [[1],[6,0],[12,0,0],[10,0,0,0],[3,0,0,0,0]]
h = [[1],[1,0],[-18,0,0],[-50,0,0,0],[-47,0,0,0,0],[-15,0,0,0,0,0]]
assert dmp_lcm(f, g, 1, ZZ) == h
def test_dmp_content():
assert dmp_content([[-2]], 1, ZZ) == [2]
f, g, F = [ZZ(3),ZZ(2),ZZ(1)], [ZZ(1)], []
for i in xrange(0, 5):
g = dup_mul(g, f, ZZ)
F.insert(0, g)
assert dmp_content(F, 1, ZZ) == f
assert dmp_one_p(dmp_content(f_4, 2, ZZ), 1, ZZ)
assert dmp_one_p(dmp_content(f_5, 2, ZZ), 1, ZZ)
assert dmp_one_p(dmp_content(f_6, 3, ZZ), 2, ZZ)
def test_dmp_primitive():
assert dmp_primitive([[]], 1, ZZ) == ([], [[]])
assert dmp_primitive([[1]], 1, ZZ) == ([1], [[1]])
f, g, F = [ZZ(3),ZZ(2),ZZ(1)], [ZZ(1)], []
for i in xrange(0, 5):
g = dup_mul(g, f, ZZ)
F.insert(0, g)
assert dmp_primitive(F, 1, ZZ) == (f,
[ dup_exquo(c, f, ZZ) for c in F ])
cont, f = dmp_primitive(f_4, 2, ZZ)
assert dmp_one_p(cont, 1, ZZ) and f == f_4
cont, f = dmp_primitive(f_5, 2, ZZ)
assert dmp_one_p(cont, 1, ZZ) and f == f_5
cont, f = dmp_primitive(f_6, 3, ZZ)
assert dmp_one_p(cont, 2, ZZ) and f == f_6
def test_dup_cancel():
f = ZZ.map([2, 0, -2])
g = ZZ.map([1, -2, 1])
p = [ZZ(2), ZZ(2)]
q = [ZZ(1), -ZZ(1)]
assert dup_cancel(f, g, ZZ) == (p, q)
assert dup_cancel(f, g, ZZ, multout=False) == (ZZ(1), ZZ(1), p, q)
f = [-ZZ(1),-ZZ(2)]
g = [ ZZ(3),-ZZ(4)]
F = [ ZZ(1), ZZ(2)]
G = [-ZZ(3), ZZ(4)]
dup_cancel(f, g, ZZ) == (f, g)
dup_cancel(F, G, ZZ) == (f, g)
def test_dmp_cancel():
f = ZZ.map([[2], [0], [-2]])
g = ZZ.map([[1], [-2], [1]])
p = [[ZZ(2)], [ZZ(2)]]
q = [[ZZ(1)], [-ZZ(1)]]
assert dmp_cancel(f, g, 1, ZZ) == (p, q)
assert dmp_cancel(f, g, 1, ZZ, multout=False) == (ZZ(1), ZZ(1), p, q)
| bsd-3-clause | 8,572,424,864,415,290,000 | 32.970732 | 101 | 0.448736 | false |
botswana-harvard/eit | eit/config/urls.py | 1 | 3578 | from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.views.generic import RedirectView
from django.db.models import get_models
import django_databrowse
from edc.subject.rule_groups.classes import site_rule_groups
from edc.subject.lab_tracker.classes import site_lab_tracker
from edc.dashboard.section.classes import site_sections
from edc.subject.visit_schedule.classes import site_visit_schedules
from edc.lab.lab_profile.classes import site_lab_profiles
from edc.dashboard.subject.views import additional_requisition
from django.contrib import admin
admin.autodiscover()
site_lab_profiles.autodiscover()
from dajaxice.core import dajaxice_autodiscover, dajaxice_config
dajaxice_autodiscover()
from eit.apps.eit.eit_app_configuration.classes import EitAppConfiguration
EitAppConfiguration().prepare()
site_visit_schedules.autodiscover()
site_visit_schedules.build_all()
site_rule_groups.autodiscover()
site_lab_tracker.autodiscover()
# data_manager.prepare()
site_sections.autodiscover()
site_sections.update_section_lists()
APP_NAME = settings.APP_NAME
for model in get_models():
try:
django_databrowse.site.register(model)
except:
pass
urlpatterns = patterns(
'',
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/logout/$', RedirectView.as_view(url='/{app_name}/logout/'.format(app_name=APP_NAME))),
(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns(
'',
url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),
)
# this is for additional_requisitions
urlpatterns += patterns(
'',
url(r'^{app_name}/dashboard/visit/add_requisition/'.format(app_name=APP_NAME),
additional_requisition,
name="add_requisition"),
)
urlpatterns += patterns(
'',
url(r'^databrowse/(.*)', login_required(django_databrowse.site.root)),
)
urlpatterns += patterns(
'',
url(r'^{app_name}/section/labclinic/'.format(app_name=APP_NAME),
include('edc.lab.lab_clinic_api.urls'), name="section_url_name"),
)
urlpatterns += patterns(
'',
url(r'^{app_name}/dashboard/'.format(app_name=APP_NAME),
include('eit.apps.{app_name}_dashboard.urls'.format(app_name=APP_NAME))),
)
urlpatterns += patterns(
'',
url(r'^{app_name}/login/'.format(app_name=APP_NAME),
'django.contrib.auth.views.login',
name='{app_name}_login'.format(app_name=APP_NAME)),
url(r'^{app_name}/logout/'.format(app_name=APP_NAME),
'django.contrib.auth.views.logout_then_login',
name='{app_name}_logout'.format(app_name=APP_NAME)),
url(r'^{app_name}/password_change/'.format(app_name=APP_NAME),
'django.contrib.auth.views.password_change',
name='password_change_url'.format(app_name=APP_NAME)),
url(r'^{app_name}/password_change_done/'.format(app_name=APP_NAME),
'django.contrib.auth.views.password_change_done',
name='password_change_done'.format(app_name=APP_NAME)),
)
urlpatterns += patterns(
'',
url(r'^{app_name}/section/'.format(app_name=APP_NAME), include('edc.dashboard.section.urls'), name='section'),
)
urlpatterns += patterns(
'',
url(r'^{app_name}/$'.format(app_name=APP_NAME),
RedirectView.as_view(url='/{app_name}/section/'.format(app_name=APP_NAME))),
url(r'', RedirectView.as_view(url='/{app_name}/section/'.format(app_name=APP_NAME))),
)
urlpatterns += staticfiles_urlpatterns()
| gpl-3.0 | -2,307,491,408,388,197,000 | 32.754717 | 114 | 0.707658 | false |
vaginessa/Pentesting-Scripts | InSpy.py | 1 | 8805 | #!/usr/bin/python
# InSpy - A LinkedIn employee enumerator
# This script enumerates employees from any organization
# using LinkedIn. Please note that this will not harvest all
# employees within a given organization.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Jonathan Broche
# Contact: @g0jhonny
# Version: 1.0
# Date: 2015-08-10
#
# usage: ./inspy.py -c <company> [-d dept/title] [-e email output format] [-i input file with dept/titles] [-o output file]
# example: ./inspy.py -c abc -e [email protected] -o abc_employees.txt
import requests, BeautifulSoup, argparse, signal, time, datetime, os
start_time = time.time()
class colors:
lightblue = "\033[1;36m"
blue = "\033[1;34m"
normal = "\033[0;00m"
red = "\033[1;31m"
yellow = "\033[1;33m"
white = "\033[1;37m"
green = "\033[1;32m"
#----------------------------------------#
# HARVEST USERS #
#----------------------------------------#
def inspy_enum(company, dept, ifile):
try:
dept_dictionary = ['sales', 'hr', 'marketing', 'finance', 'accounting', 'director', 'administrative', 'lawyer', 'it', 'security']
employees = {}
if dept is not None:
dept_dictionary = [dept.lower()]
if ifile is not None:
try:
if os.path.exists(ifile):
with open(ifile, 'r') as f:
dept_dictionary = []
for line in f.readlines():
if line.rstrip():
dept_dictionary.append(line.rstrip())
except IOError as e:
print "{}[!]{} Problem opening the file. {}".format(e)
for dd in dept_dictionary:
print "{}[*]{} Searching for employees working at {} with '{}' in their title".format(colors.lightblue, colors.normal, company, dd)
try:
response = requests.get('https://www.linkedin.com/title/{}-at-{}'.format(dd.replace('-', ' '), company.replace('-', ' ')), timeout=2)
if response.status_code == 200:
soup = BeautifulSoup.BeautifulSoup(response.text)
else:
raise Exception
except requests.exceptions.Timeout:
print "{}[!]{} Timeout enumerating the {} department".format(colors.red, colors.normal, dd)
except requests.exceptions.ConnectionError:
print "{}[!]{} Connection error.".format(colors.red, colors.normal)
except requests.exceptions.HTTPError:
print "{}[!]{} HTTP error.".format(colors.red, colors.normal)
#get employee names
for n, t in zip(soup.findAll('h3', { "class" : "name" }), soup.findAll('p', { "class" : "headline" })):
name = u''.join(n.getText()).encode('utf-8')
title = u''.join(t.getText()).encode('utf-8')
if not name in employees:
employees[name] = title
return employees
except Exception as e:
print "{}[!]{} Error harvesting users. {}".format(colors.red, colors.normal, e)
#----------------------------------------#
# EMAILS #
#----------------------------------------#
def format_email(names, eformat):
emails = []
for name in names:
spaces = []
for x,y in enumerate(name):
if ' ' in y:
spaces.append(x)
if eformat[:eformat.find('@')] == 'flast':
emails.append('{}{}{}'.format(name[0], name[(spaces[-1]+1):], eformat[eformat.find('@'):]))
elif eformat[:eformat.find('@')] == 'lfirst':
emails.append('{}{}{}'.format(name[spaces[-1]+1], name[0:spaces[0]], eformat[eformat.find('@'):]))
elif eformat[:eformat.find('@')] == 'first.last':
emails.append('{}.{}{}'.format(name[0:spaces[0]], name[(spaces[-1]+1):], eformat[eformat.find('@'):]))
elif eformat[:eformat.find('@')] == 'last.first':
emails.append('{}.{}{}'.format(name[(spaces[-1]+1):], name[0:spaces[0]], eformat[eformat.find('@'):]))
return [e.lower() for e in emails]
#----------------------------------------#
# OUTPUT #
#----------------------------------------#
def output(employees, email, company, ofile):
counter = 0
ge, be = {}, {}
print '\n'
if email:
for k, e in zip(employees, email):
if company in employees[k].lower():
if ',' in k:
be[e] = '{}, {}'.format(k, employees[k])
else:
ge[e] = '{}, {}'.format(k, employees[k])
print "{}[*]{} {}, {}, {}".format(colors.green, colors.normal, k.replace('&', '&'), employees[k].replace('&', '&'), e)
counter +=1
else:
for k in employees:
if company in employees[k].lower():
ge[k] = employees[k]
print "{}[*]{} {} {}".format(colors.green, colors.normal, k.replace('&', '&'), employees[k].replace('&', '&'))
counter +=1
if be:
print "\n{}[!]{} The following employees have commas in their names. Their emails were not accurate.".format(colors.red, colors.normal)
for k in be:
print "{}[*]{} {}".format(colors.yellow, colors.normal, be[k])
if ofile:
with open(ofile, 'w') as f:
f.write("\n" + "-" * 69 + "\n" + "InSpy Output" + "\n" + "-" * 69 + "\n\n")
if [e for e in ge.keys() if '@' in e]: #if emails in keys
f.write("\n" + "E-mails" + "\n" + "-" * 25 + "\n\n")
for k in ge.keys():
f.write(k+'\n')
f.write("\n" + "All" + "\n" + "-" * 25 + "\n\n")
for k in ge:
f.write('{}, {}\n'.format(ge[k], k))
else:
for k in ge:
f.write('{}, {}\n'.format(k, ge[k]))
print "\n{}[*]{} Done! {}{}{} employees found.".format(colors.lightblue, colors.normal, colors.green, counter, colors.normal)
print "{}[*]{} Completed in {:.1f}s\n".format(colors.lightblue, colors.normal, time.time()-start_time)
#----------------------------------------#
# MAIN #
#----------------------------------------#
def main():
print "\n " + "-" * 74 + "\n " + colors.white + "InSpy v1.0 - LinkedIn Employee Enumerator by Jonathan Broche (@g0jhonny)\n " + colors.normal + "-" * 74 + "\n "
parser = argparse.ArgumentParser(description='InSpy - A LinkedIn employee enumerator by Jonathan Broche (@g0jhonny)')
parser.add_argument('-c', '--company', required=True, help='Company name')
parser.add_argument('-d', '--dept', nargs='?', const='', help='Department or title to query employees against. Inspy searches through a predefined list by default.')
parser.add_argument('-e', '--emailformat', help='Email output format. Acceptable formats: [email protected], [email protected], [email protected], [email protected]')
parser.add_argument('-i', '--inputfilename', nargs='?', const='', help='File with list of departments or titles to query employees against (one item per line)')
parser.add_argument('-o', '--outfilename', nargs='?', const='', help='Output results to text file')
args = parser.parse_args()
employees = inspy_enum(args.company, args.dept, args.inputfilename)
if args.emailformat:
if args.emailformat.find('@') and args.emailformat[:args.emailformat.find('@')] in {'flast', 'lfirst', 'first.last', 'last.first'}:
e = format_email(employees.keys(), args.emailformat)
output(employees, e,args.company.lower(), args.outfilename)
else:
print "{}[!]{} Please provide a valid email address format (i.e., [email protected], [email protected], [email protected], [email protected])".format(colors.red, colors.normal)
else:
if employees is not None:
output(employees,'',args.company.lower(), args.outfilename)
if __name__ == '__main__':
main() | gpl-3.0 | -8,991,955,847,205,722,000 | 44.864583 | 183 | 0.527655 | false |
dbrgn/RPLCD | RPLCD/lcd.py | 1 | 16418 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013-2018 Danilo Bargen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from collections import namedtuple
from . import codecs
from . import common as c
from .compat import range
LCDConfig = namedtuple('LCDConfig', 'rows cols dotsize')
# # # MAIN # # #
class BaseCharLCD(object):
# Init, setup, teardown
def __init__(self, cols=20, rows=4, dotsize=8, charmap='A02', auto_linebreaks=True):
"""
Character LCD controller. Base class only, you should use a subclass.
Args:
cols:
Number of columns per row (usually 16 or 20). Default 20.
rows:
Number of display rows (usually 1, 2 or 4). Default: 4.
dotsize:
Some 1 line displays allow a font height of 10px.
Allowed: 8 or 10. Default: 8.
charmap:
The character map used. Depends on your LCD. This must be
either ``A00`` or ``A02`` or ``ST0B``. Default: ``A02``.
auto_linebreaks:
Whether or not to automatically insert line breaks.
Default: True.
"""
assert dotsize in [8, 10], 'The ``dotsize`` argument should be either 8 or 10.'
# Initialize codec
if charmap == 'A00':
self.codec = codecs.A00Codec()
elif charmap == 'A02':
self.codec = codecs.A02Codec()
pass
elif charmap == 'ST0B':
self.codec = codecs.ST0BCodec()
pass
else:
raise ValueError(
'The ``charmap`` argument must be either ``A00`` or ``A02`` or ``ST0B``')
# LCD configuration
self.lcd = LCDConfig(rows=rows, cols=cols, dotsize=dotsize)
# Setup initial display configuration
displayfunction = self.data_bus_mode | c.LCD_5x8DOTS
if rows == 1:
displayfunction |= c.LCD_1LINE
elif rows in [2, 4]:
# LCD only uses two lines on 4 row displays
displayfunction |= c.LCD_2LINE
if dotsize == 10:
# For some 1 line displays you can select a 10px font.
displayfunction |= c.LCD_5x10DOTS
# Create content cache
self._content = [[0x20] * cols for _ in range(rows)]
# Set up auto linebreaks
self.auto_linebreaks = auto_linebreaks
self.recent_auto_linebreak = False
# Initialize display
self._init_connection()
# Choose 4 or 8 bit mode
if self.data_bus_mode == c.LCD_4BITMODE:
# Hitachi manual page 46
self.command(0x03)
c.msleep(4.5)
self.command(0x03)
c.msleep(4.5)
self.command(0x03)
c.usleep(100)
self.command(0x02)
elif self.data_bus_mode == c.LCD_8BITMODE:
# Hitachi manual page 45
self.command(0x30)
c.msleep(4.5)
self.command(0x30)
c.usleep(100)
self.command(0x30)
else:
raise ValueError('Invalid data bus mode: {}'.format(self.data_bus_mode))
# Write configuration to display
self.command(c.LCD_FUNCTIONSET | displayfunction)
c.usleep(50)
# Configure display mode
self._display_mode = c.LCD_DISPLAYON
self._cursor_mode = c.CursorMode.hide
self.command(c.LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
c.usleep(50)
# Clear display
self.clear()
# Configure entry mode
self._text_align_mode = c.Alignment.left
self._display_shift_mode = c.ShiftMode.cursor
self._cursor_pos = (0, 0)
self.command(c.LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
c.usleep(50)
def close(self, clear=False):
if clear:
self.clear()
self._close_connection()
# Properties
def _get_cursor_pos(self):
return self._cursor_pos
def _set_cursor_pos(self, value):
if not hasattr(value, '__getitem__') or len(value) != 2:
raise ValueError('Cursor position should be determined by a 2-tuple.')
if value[0] not in range(self.lcd.rows) or value[1] not in range(self.lcd.cols):
msg = 'Cursor position {pos!r} invalid on a {lcd.rows}x{lcd.cols} LCD.'
raise ValueError(msg.format(pos=value, lcd=self.lcd))
row_offsets = [0x00, 0x40, self.lcd.cols, 0x40 + self.lcd.cols]
self._cursor_pos = value
self.command(c.LCD_SETDDRAMADDR | row_offsets[value[0]] + value[1])
c.usleep(50)
cursor_pos = property(_get_cursor_pos, _set_cursor_pos,
doc='The cursor position as a 2-tuple (row, col).')
def _get_text_align_mode(self):
if self._text_align_mode == c.Alignment.left:
return 'left'
elif self._text_align_mode == c.Alignment.right:
return 'right'
else:
raise ValueError('Internal _text_align_mode has invalid value.')
def _set_text_align_mode(self, value):
if value == 'left':
self._text_align_mode = c.Alignment.left
elif value == 'right':
self._text_align_mode = c.Alignment.right
else:
raise ValueError('Text align mode must be either `left` or `right`')
self.command(c.LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
c.usleep(50)
text_align_mode = property(_get_text_align_mode, _set_text_align_mode,
doc='The text alignment (``left`` or ``right``).')
def _get_write_shift_mode(self):
if self._display_shift_mode == c.ShiftMode.cursor:
return 'cursor'
elif self._display_shift_mode == c.ShiftMode.display:
return 'display'
else:
raise ValueError('Internal _display_shift_mode has invalid value.')
def _set_write_shift_mode(self, value):
if value == 'cursor':
self._display_shift_mode = c.ShiftMode.cursor
elif value == 'display':
self._display_shift_mode = c.ShiftMode.display
else:
raise ValueError('Write shift mode must be either `cursor` or `display`.')
self.command(c.LCD_ENTRYMODESET | self._text_align_mode | self._display_shift_mode)
c.usleep(50)
write_shift_mode = property(_get_write_shift_mode, _set_write_shift_mode,
doc='The shift mode when writing (``cursor`` or ``display``).')
def _get_display_enabled(self):
return self._display_mode == c.LCD_DISPLAYON
def _set_display_enabled(self, value):
self._display_mode = c.LCD_DISPLAYON if value else c.LCD_DISPLAYOFF
self.command(c.LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
c.usleep(50)
display_enabled = property(_get_display_enabled, _set_display_enabled,
doc='Whether or not to display any characters.')
def _get_cursor_mode(self):
if self._cursor_mode == c.CursorMode.hide:
return 'hide'
elif self._cursor_mode == c.CursorMode.line:
return 'line'
elif self._cursor_mode == c.CursorMode.blink:
return 'blink'
else:
raise ValueError('Internal _cursor_mode has invalid value.')
def _set_cursor_mode(self, value):
if value == 'hide':
self._cursor_mode = c.CursorMode.hide
elif value == 'line':
self._cursor_mode = c.CursorMode.line
elif value == 'blink':
self._cursor_mode = c.CursorMode.blink
else:
raise ValueError('Cursor mode must be one of `hide`, `line` or `blink`.')
self.command(c.LCD_DISPLAYCONTROL | self._display_mode | self._cursor_mode)
c.usleep(50)
cursor_mode = property(_get_cursor_mode, _set_cursor_mode,
doc='How the cursor should behave (``hide``, ``line`` or ``blink``).')
# High level commands
def write_string(self, value):
"""
Write the specified unicode string to the display.
To control multiline behavior, use newline (``\\n``) and carriage
return (``\\r``) characters.
Lines that are too long automatically continue on next line, as long as
``auto_linebreaks`` has not been disabled.
Make sure that you're only passing unicode objects to this function.
The unicode string is then converted to the correct LCD encoding by
using the charmap specified at instantiation time.
If you're dealing with bytestrings (the default string type in Python
2), convert it to a unicode object using the ``.decode(encoding)``
method and the appropriate encoding. Example for UTF-8 encoded strings:
.. code::
>>> bstring = 'Temperature: 30°C'
>>> bstring
'Temperature: 30\xc2\xb0C'
>>> bstring.decode('utf-8')
u'Temperature: 30\xb0C'
"""
encoded = self.codec.encode(value) # type: List[int]
ignored = False
for [char, lookahead] in c.sliding_window(encoded, lookahead=1):
# If the previous character has been ignored, skip this one too.
if ignored is True:
ignored = False
continue
# Write regular chars
if char not in [codecs.CR, codecs.LF]:
self.write(char)
continue
# We're now left with only CR and LF characters. If an auto
# linebreak happened recently, and the lookahead matches too,
# ignore this write.
if self.recent_auto_linebreak is True:
crlf = (char == codecs.CR and lookahead == codecs.LF)
lfcr = (char == codecs.LF and lookahead == codecs.CR)
if crlf or lfcr:
ignored = True
continue
# Handle newlines and carriage returns
row, col = self.cursor_pos
if char == codecs.LF:
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, col)
else:
self.cursor_pos = (0, col)
elif char == codecs.CR:
if self.text_align_mode == 'left':
self.cursor_pos = (row, 0)
else:
self.cursor_pos = (row, self.lcd.cols - 1)
def clear(self):
"""Overwrite display with blank characters and reset cursor position."""
self.command(c.LCD_CLEARDISPLAY)
self._cursor_pos = (0, 0)
self._content = [[0x20] * self.lcd.cols for _ in range(self.lcd.rows)]
c.msleep(2)
def home(self):
"""Set cursor to initial position and reset any shifting."""
self.command(c.LCD_RETURNHOME)
self._cursor_pos = (0, 0)
c.msleep(2)
def shift_display(self, amount):
"""Shift the display. Use negative amounts to shift left and positive
amounts to shift right."""
if amount == 0:
return
direction = c.LCD_MOVERIGHT if amount > 0 else c.LCD_MOVELEFT
for i in range(abs(amount)):
self.command(c.LCD_CURSORSHIFT | c.LCD_DISPLAYMOVE | direction)
c.usleep(50)
def create_char(self, location, bitmap):
"""Create a new character.
The HD44780 supports up to 8 custom characters (location 0-7).
:param location: The place in memory where the character is stored.
Values need to be integers between 0 and 7.
:type location: int
:param bitmap: The bitmap containing the character. This should be a
tuple of 8 numbers, each representing a 5 pixel row.
:type bitmap: tuple of int
:raises AssertionError: Raised when an invalid location is passed in or
when bitmap has an incorrect size.
Example:
.. sourcecode:: python
>>> smiley = (
... 0b00000,
... 0b01010,
... 0b01010,
... 0b00000,
... 0b10001,
... 0b10001,
... 0b01110,
... 0b00000,
... )
>>> lcd.create_char(0, smiley)
"""
assert 0 <= location <= 7, 'Only locations 0-7 are valid.'
assert len(bitmap) == 8, 'Bitmap should have exactly 8 rows.'
# Store previous position
pos = self.cursor_pos
# Write character to CGRAM
self.command(c.LCD_SETCGRAMADDR | location << 3)
for row in bitmap:
self._send_data(row)
# Restore cursor pos
self.cursor_pos = pos
# Mid level commands
def command(self, value):
"""Send a raw command to the LCD."""
self._send_instruction(value)
def write(self, value): # type: (int) -> None
"""Write a raw byte to the LCD."""
# Get current position
row, col = self._cursor_pos
# Write byte if changed
try:
if self._content[row][col] != value:
self._send_data(value)
self._content[row][col] = value # Update content cache
unchanged = False
else:
unchanged = True
except IndexError as e:
# Position out of range
if self.auto_linebreaks is True:
raise e
self._send_data(value)
unchanged = False
# Update cursor position.
if self.text_align_mode == 'left':
if self.auto_linebreaks is False or col < self.lcd.cols - 1:
# No newline, update internal pointer
newpos = (row, col + 1)
if unchanged:
self.cursor_pos = newpos
else:
self._cursor_pos = newpos
self.recent_auto_linebreak = False
else:
# Newline, reset pointer
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, 0)
else:
self.cursor_pos = (0, 0)
self.recent_auto_linebreak = True
else:
if self.auto_linebreaks is False or col > 0:
# No newline, update internal pointer
newpos = (row, col - 1)
if unchanged:
self.cursor_pos = newpos
else:
self._cursor_pos = newpos
self.recent_auto_linebreak = False
else:
# Newline, reset pointer
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, self.lcd.cols - 1)
else:
self.cursor_pos = (0, self.lcd.cols - 1)
self.recent_auto_linebreak = True
def cr(self): # type: () -> None
"""Write a carriage return (``\\r``) character to the LCD."""
self.write_string('\r')
def lf(self): # type: () -> None
"""Write a line feed (``\\n``) character to the LCD."""
self.write_string('\n')
def crlf(self): # type: () -> None
"""Write a line feed and a carriage return (``\\r\\n``) character to the LCD."""
self.write_string('\r\n')
| mit | 8,369,749,654,234,365,000 | 35.645089 | 91 | 0.566303 | false |
r3dact3d/tweeter | tweepy/chucknorrisTwitBot.py | 1 | 1485 | #!/usr/bin/env python
# written by r3dact3d (brady)
import requests
import tweepy
from random import choice
from config import *
'''
Chucknorris.io is free and will always be! However, as maintaining this service costs $$$,
we are glad to be sponsored by Jugendstil.io.
'''
# Available Catagories - I did this way so specific catagories could be removed if you want... but chuck would not approve.
chuckagories = ["explicit", "dev", "movie", "food", "celebrity", "science", "sport", "political", "religion", "animal", "history", "music", "travel", "career", "money", "fashion"]
chuckagory = choice(chuckagories)
url = 'https://api.chucknorris.io/jokes/random'
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
def tweet(payLoad):
try:
print(payLoad)
if payLoad != '\n':
api.update_status(payLoad)
else:
pass
except tweepy.TweepError as e:
print(e.reason)
def chuck(url, params):
myParams = {
'query' : params,
}
page = requests.get(url, params=myParams)
if page.status_code == 200:
output = page.json()
chuckSays = output['value']
payLoad = '#chucknorris "%s"' % (chuckSays[:125])
tweet(payLoad)
else:
print('Something went wrong with the API, someone is in big trouble if Chuck finds out!')
exit()
chuck(url, chuckagory)
| unlicense | 7,604,012,989,098,353,000 | 29.9375 | 179 | 0.657912 | false |
python-hospital/hospital | hospital/loading.py | 1 | 4113 | # -*- coding: utf-8 -*-
"""Utilities to discover and load health checks."""
import importlib
import inspect
import pkgutil
import unittest
from hospital.core import is_healthcheck
def is_package(module):
"""Return True if module object is a package.
>>> import hospital
>>> is_package(hospital)
True
>>> import hospital.api
>>> is_package(hospital.api)
False
"""
return inspect.getmodulename(module.__file__) == '__init__'
class HealthCheckLoader(unittest.TestLoader):
"""Encapsulate HealthCheck loading.
This is a special TestLoader which makes sure instances are actually
health checks.
"""
def is_healthcheck(self, value):
"""Return True if ``value`` is an health check.
Default implementation uses :func:`hospital.core.is_healthcheck`.
"""
return is_healthcheck(value)
def filter_suite(self, suite):
"""Return copy of TestSuite where only health checks remain."""
if isinstance(suite, unittest.TestSuite):
suite_copy = self.suiteClass()
for sub in suite:
if isinstance(sub, unittest.TestSuite):
suite_copy.addTest(self.filter_suite(sub))
else:
if self.is_healthcheck(sub):
suite_copy.addTest(sub)
elif self.is_healthcheck(suite):
suite_copy = suite.copy()
return suite_copy
def loadTestsFromTestCase(self, testCaseClass):
"""Load healthchecks from TestCase.
Combines :meth:`unittest.TestLoader.loadTestsFromTestCase` and
:meth:`filter_suite`.
"""
suite = super(HealthCheckLoader, self).loadTestsFromTestCase(
testCaseClass)
return self.filter_suite(suite)
def loadTestsFromModule(self, module, *args, **kwargs):
"""Load healthchecks from module.
Combines :meth:`unittest.TestLoader.loadTestsFromModule` and
:meth:`filter_suite`.
"""
suite = super(HealthCheckLoader, self).loadTestsFromModule(
module, *args, **kwargs)
return self.filter_suite(suite)
def loadTestsFromName(self, name, module=None):
"""Load healthchecks from name.
Combines :meth:`unittest.TestLoader.loadTestsFromName` and
:meth:`filter_suite`.
"""
suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)
return self.filter_suite(suite)
def loadTestsFromNames(self, names, module=None):
"""Load healthchecks from names.
Combines :meth:`unittest.TestLoader.loadTestsFromNames` and
:meth:`filter_suite`.
"""
suite = super(HealthCheckLoader, self).loadTestsFromNames(names,
module)
return self.filter_suite(suite)
def loadTestsFromPackage(self, package):
"""Discover and load tests from modules in package."""
tests = []
packages = pkgutil.walk_packages(package.__path__)
for (loader, module_name, is_pkg) in packages:
full_name = '{}.{}'.format(package.__name__, module_name)
tests.append(self.loadTestsFromName(full_name))
if is_pkg:
sub_package = importlib.import_module(full_name)
tests.append(self.loadTestsFromPackage(sub_package))
suite = self.suiteClass(tests)
return self.filter_suite(suite)
def discover(self, start_dir, pattern='*', top_level_dir=None):
"""Discover healthchecks in either a package, module or directory."""
try:
module = importlib.import_module(start_dir)
except ImportError:
# Maybe a filename.
return super(HealthCheckLoader, self).discover(
start_dir=start_dir,
pattern=pattern,
top_level_dir=top_level_dir)
else:
if is_package(module):
return self.loadTestsFromPackage(module)
else:
return self.loadTestsFromModule(module)
| bsd-3-clause | -5,865,171,719,997,008,000 | 32.169355 | 78 | 0.607343 | false |
olivierberten/KhmerConverter | modules/legacyConvertOdt.py | 1 | 10850 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Khmer Unicode to Legacy fonts Conversion
# Copyright(c) 2006-2008 Khmer Software Initiative
# www.khmeros.info
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# See the LICENSE file for more details.
#
# Developed by:
# Hok Kakada ([email protected])
# Keo Sophon ([email protected])
# San Titvirak ([email protected])
# Seth Chanratha ([email protected])
#
# This module convertes an *.odt file from Unicode to legacy Khmer format
from xml.dom import minidom
from FontDataXML import FontData
import legacyReorder
import legacyConverter
import unittest
import zipfile
from zlib import DEFLATED
SP = unichr(0x20)
ZWSP = unichr(0x200B)
ZWNJ = unichr(0x200C)
ZWJ = unichr(0x200D)
INKHMER = SP + ZWSP + ZWNJ + ZWJ
##STARTKHMER = u"«»" + ZWNJ + ZWSP + ZWJ
STARTKHMER = ZWNJ + ZWSP + ZWJ
MINUNIC = 0x1780
MAXUNIC = 0x17FF
KHMERSTYLE = 'kc-1.0-kstyle'
class legacyConvertOdt:
def __init__(self):
self.CONTENTXML = 'content.xml'
self.STYLESXML = 'styles.xml'
self.fd = FontData()
self.outputFont = "ABC-TEXT-05"
self.outputFontSize = None
self.data = self.fd.unicodeData(self.outputFont)
def convertOdtFile(self, inputFileName, outputFileName, outputFont, outputFontSize = None):
"""This function converts OpenOffice.org Writer file.
inputFileName : name of input file to convert
outputFileName : name of output file. Default value is converted-inputFileName.
outputFont : legacy output font name. Default depends on the font type.
outputFontSize : force the font size the output file will use. value = None to ignore.
"""
if (not self.fd.isConvertable(outputFont)):
raise TypeError('unknown output font ' + outputFont + '!')
if (inputFileName == outputFileName):
raise TypeError('input file and output file must be different!')
try:
# read zip file (.odt)
zipIn = zipfile.ZipFile(inputFileName, "r")
except IOError:
raise IOError('Cannot open file "' + inputFileName + '" for reading!')
if (not (self.CONTENTXML and self.STYLESXML) in zipIn.namelist()):
raise TypeError('Input file' + inputFileName + 'is not an odt file!')
try:
# create new zip file (.odt)
zipOut = zipfile.ZipFile(outputFileName, "w", DEFLATED)
except IOError:
raise IOError('Cannot open file "' + outputFileName + '" for writing!')
# get data for the font
self.outputFont = self.fd.defaultFont(outputFont)
self.data = self.fd.unicodeData(self.outputFont)
if (outputFontSize):
self.outputFontSize = str(outputFontSize) + 'pt'
for file in zipIn.namelist():
fdata = zipIn.read(file)
# do the converting for content.xml only
if (file == self.CONTENTXML):
fdata = self.processContent(fdata)
# TODO: do we need to test the type? When do we not want to encode in UTF-8 ?
if (type(fdata) == unicode):
fdata = fdata.encode('utf-8')
elif (file == self.STYLESXML):
fdata = self.processStyle(fdata)
# TODO: do we need to test the type? When do we not want to encode in UTF-8 ?
if (type(fdata) == unicode):
fdata = fdata.encode('utf-8')
zipOut.writestr(file, fdata)
zipOut.close()
zipIn.close()
def processContent(self, xmlData):
"""
input: xml data in unicode string
return: xml data string in legacy encoding where text is converted
"""
self.xmldoc = minidom.parseString(xmlData)
officeNode = self.xmldoc.getElementsByTagName('office:text')
officeAutoStylesNode = self.xmldoc.getElementsByTagName('office:automatic-styles')[0]
officeFontFaceDecls = self.xmldoc.getElementsByTagName('office:font-face-decls')[0]
# add font information
self.addFontInfo(officeAutoStylesNode, officeFontFaceDecls)
# go through office node and convert to legacy.
self.goThru(officeNode, self.convertIfUnicode)
return self.xmldoc.toxml()
def processStyle(self, xmldata):
"""change font name and size, convert data to legacy in xmldata
@param xmldata: xml string to parse."""
self.xmldoc = minidom.parseString(xmldata)
officeAutoStylesNode = self.xmldoc.getElementsByTagName('office:automatic-styles')[0]
officeFontFaceDecls = self.xmldoc.getElementsByTagName('office:font-face-decls')[0]
officeMasterStylesNode = self.xmldoc.getElementsByTagName('office:master-styles')
# go through node, replace font, and convert data to legacy.
self.addFontInfo(officeAutoStylesNode, officeFontFaceDecls)
self.goThru(officeMasterStylesNode, self.convertIfUnicode)
return self.xmldoc.toxml('utf-8')
def goThru (self, nodelist, function):
"""go through nodelist and call function with child node as argument.
@param nodelist: dom's node list.
@param function: function to call, child argument will be provided by goThru."""
for node in nodelist:
if node.hasChildNodes():
for child in node.childNodes:
function(child)
self.goThru (node.childNodes, function)
def addFontInfo(self, autoStyleNode, declsNode):
"""add "style:style" to node."""
# add font declaration
styleFontFaceNode = self.xmldoc.createElement('style:font-face')
styleFontFaceNode.setAttribute('style:name', self.outputFont)
styleFontFaceNode.setAttribute('svg:font-family', self.outputFont)
declsNode.appendChild(styleFontFaceNode)
# add font style
styleNode = self.xmldoc.createElement('style:style')
styleNode.setAttribute('style:family', 'text')
styleNode.setAttribute('style:name', KHMERSTYLE)
styleTextPropNode = self.xmldoc.createElement('style:text-properties')
styleTextPropNode.setAttribute('style:font-name', self.outputFont)
if (self.outputFontSize):
styleTextPropNode.setAttribute('fo:font-size', self.outputFontSize)
styleNode.appendChild(styleTextPropNode)
autoStyleNode.appendChild(styleNode)
def convertIfUnicode(self, node):
"""
take Khmer Unicode data out of current node, convert it and put
it in a new node which mark as khmerConverter_DefaultStyle.
"""
if not node.nodeValue:
return node
sin = node.data
newNode = self.xmldoc.createDocumentFragment()
cursor = 0
charCount = len(sin)
while (cursor < charCount):
khmStr = u''
othStr = u''
while (cursor < charCount):
val = ord(sin[cursor])
# in khmer range
if ((val >= MINUNIC) and (val <= MAXUNIC)) or (STARTKHMER.find(unichr(val)) != -1) or (len(khmStr) > 0 and INKHMER.find(unichr(val)) != -1):
if (othStr):
break
khmStr += sin[cursor]
# in other range
else:
if (khmStr):
break
othStr += sin[cursor]
cursor += 1
# end of while (khmer string or other string found)
if (khmStr):
# convert khmer text
khmStr = legacyReorder.reorder(khmStr)
khmStr = legacyConverter.converter(khmStr, self.data)
khmStr = khmStr.decode('cp1252')
# add new khmer node
khmNode = self.xmldoc.createElement('text:span')
khmNode.setAttribute('text:style-name', KHMERSTYLE)
# add data
txtNode = self.xmldoc.createTextNode(khmStr)
khmNode.appendChild(txtNode)
newNode.appendChild(khmNode)
elif (othStr):
txtNode = self.xmldoc.createTextNode(othStr)
newNode.appendChild(txtNode)
node.parentNode.replaceChild(newNode, node)
class TestConvertOdt(unittest.TestCase):
def testSameFile(self):
# same file raise error
self.assertRaises(TypeError, legacyConvertOdt().convertOdtFile, 'file1', 'file1', 'abc')
def testWrongFont(self):
# same file raise error
self.assertRaises(TypeError, legacyConvertOdt().convertOdtFile, 'file1', 'file2', 'fontTHATdoesNOTexist')
def testOpenUnavailableFile(self):
# raise error when file is unavailable
self.assertRaises(IOError, legacyConvertOdt().convertOdtFile, 'AfileTHATdoesNOTexist', 'file1', 'abc')
def testProcessContent(self):
header = u"<?xml version=\"1.0\" ?><office:document-content xmlns:office=\"urn:oasis:names:tc:opendocument:xmlns:office:1.0\" xmlns:text=\"urn:oasis:names:tc:opendocument:xmlns:text:1.0\">"
fontDeclOpen = u"<office:font-face-decls>"
fontDeclClose = u"</office:font-face-decls>"
autoStyleOpen = u"<office:automatic-styles>"
autoStyleClose = u"</office:automatic-styles>"
contentOpen = u"<office:body><office:text><text:p text:style-name=\"Standard\">"
contentClose = u"</text:p></office:text></office:body></office:document-content>"
myXml = header + \
fontDeclOpen + fontDeclClose + \
autoStyleOpen + autoStyleClose + \
contentOpen + \
"កខគabcច ឆ ជxyz" + \
contentClose
convertedXml = header + \
fontDeclOpen + \
u"<style:font-face style:name=\"ABC-TEXT-05\" svg:font-family=\"ABC-TEXT-05\"/>" + \
fontDeclClose + \
autoStyleOpen + \
"<style:style style:family=\"text\" style:name=\"" + KHMERSTYLE + "\"><style:text-properties style:font-name=\"ABC-TEXT-05\"/></style:style>" + \
autoStyleClose + \
contentOpen + \
"<text:span text:style-name=\"" + KHMERSTYLE + "\">kxK</text:span>abc<text:span text:style-name=\"" + KHMERSTYLE + "\">c q C</text:span>xyz" + \
contentClose
self.assertEqual(legacyConvertOdt().processContent(myXml.encode('utf-8')), convertedXml)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | -5,767,417,457,021,511,000 | 42.693548 | 197 | 0.614249 | false |
Dikovinka/HRCS | API/permissions.py | 1 | 2558 | from rest_framework import permissions
from API.models import *
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.owner == request.user
class OnlyPMorQALeadCanEdit(permissions.BasePermission):
"""
Custom permission to only allow PM and QA Leads to some object.
"""
def has_object_permission(self, request, view, obj):
if isinstance(obj, Project):
project = obj
elif isinstance(obj, (ProjectTeam, Issue)):
project = obj.project
elif isinstance(obj, (Worklog, IssueAttachment, IssueLink, Comment)):
project = obj.issue.project
else:
return False
leads = ProjectTeam.objects.filter(project=project, team_role__in=['PM', 'QALEAD'])
team = ProjectTeam.objects.filter(project=project)
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS and request.user in [member.user for member in team]:
return True
# Write permissions are only allowed to the qa lead or PM
if request.user in [member.user for member in leads]:
return True
# Superuser has full access to all endpoints
return request.user and request.user.is_staff
class IsProjectTeamOnly(permissions.BasePermission):
"""
Custom permission to only allow PM and QA Leads to some object.
"""
def has_object_permission(self, request, view, obj):
if isinstance(obj, Project):
project = obj
elif isinstance(obj, (ProjectTeam, Issue)):
project = obj.project
elif isinstance(obj, (Worklog, IssueAttachment, IssueLink, Comment)):
project = obj.issue.project
else:
return False
team = ProjectTeam.objects.filter(project=project)
# Write permissions are only allowed to the project team
if request.user in [member.user for member in team]:
return True
# Superuser has full access to all endpoints
return request.user and request.user.is_staf | apache-2.0 | -8,638,617,535,118,737,000 | 37.772727 | 107 | 0.654808 | false |
gogoair/foremast | tests/app/test_create_app.py | 1 | 1377 | """Verifies that instance_links are being retrieved properly from LINKS. Verifies that app_data.json.j2
contains the instance link information"""
from unittest import mock
from foremast.app import SpinnakerApp
@mock.patch('foremast.app.spinnaker_app.LINKS', new={'example1': 'https://example1.com'})
def test_default_instance_links():
"""Validate default instance_links are being populated properly."""
pipeline_config = {
"instance_links": {
"example2": "https://example2.com",
}
}
combined = {'example1': 'https://example1.com'}
combined.update(pipeline_config['instance_links'])
spinnaker_app = SpinnakerApp("aws", pipeline_config=pipeline_config)
instance_links = spinnaker_app.retrieve_instance_links()
assert instance_links == combined, "Instance Links are not being retrieved properly"
@mock.patch('foremast.app.spinnaker_app.LINKS', new={'example': 'example1', 'example': 'example2'})
def test_duplicate_instance_links():
"""Validate behavior when two keys are identical."""
pipeline_config = {
"instance_links": {}
}
duplicate = {'example': 'example2'}
spinnaker_app = SpinnakerApp("aws", pipeline_config=pipeline_config)
instance_links = spinnaker_app.retrieve_instance_links()
assert instance_links == duplicate, "Instance links handing duplicates are wrong."
| apache-2.0 | -1,537,231,771,002,252,500 | 36.216216 | 103 | 0.701525 | false |
IQSS/miniverse | dv_apps/metrics/stats_view_base.py | 1 | 9234 | import json
import csv
from collections import OrderedDict
from datetime import datetime
from StringIO import StringIO
#import pandas as pd
from django.conf import settings
from django.http import JsonResponse, HttpResponse
from django.template.loader import render_to_string
from django.views.generic import View
# Apply API Key and page caching to API endpoints
#
from django.views.decorators.cache import cache_page
from django.utils.decorators import method_decorator
from dv_apps.dataverse_auth.decorator import apikey_required
from dv_apps.utils.metrics_cache_time import get_metrics_api_cache_time
from dv_apps.utils.date_helper import get_timestamp_for_filename
from dv_apps.metrics.stats_util_base import StatsMakerBase
def send_cors_response(response):
"""Quick hack to allow CORS...."""
response["Access-Control-Allow-Origin"] = "*"
#response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
#response["Access-Control-Max-Age"] = "1000"
#response["Access-Control-Allow-Headers"] = "*"
return response
#@method_decorator(apikey_required, name='get')
@method_decorator(cache_page(get_metrics_api_cache_time()), name='get')
class StatsViewSwagger(View):
"""Used to help build the swagger docs"""
# params getting a bit out of control, need to make this more manageable
BASIC_DATE_PARAMS = ['startDateParam', 'endDateParam', 'selectedYearParam']
PARAM_DV_API_KEY = ['dataverseAPIKey']
PARAM_SELECTED_DV_ALIASES = ['selectedDataverseAliases']
PARAM_INCLUDE_CHILD_DVS = ['includeChildDataverses']
PARAM_AS_CSV = ['asCSV', 'asExcel']
PARAM_BIN_SIZE = ['binSize'] # bin_size
PARAM_BIN_SIZE_BYTES = ['binSizeBytes'] # bin_size
PARAM_NUM_BINS = ['numBins'] # num_bins
PARAM_SKIP_EMPTY_BINS = ['skipEmptyBins'] # skip_empty_bins
PARAM_DVOBJECT_ID = ['dataverseObjectId']
PARAM_DATASET_ID = ['datasetId']
PARAM_DATASET_PERSISTENT_ID = ['persistentId']
PARAM_DATAVERSE_ALIAS = ['dataverseAlias']
PUBLISH_PARAMS = ['publicationStateParam']
PUB_STATE_PUBLISHED = 'published'
PUB_STATE_UNPUBLISHED = 'unpublished'
PUB_STATE_ALL = 'all'
PRETTY_JSON_PARAM = ['prettyJSONParam']
DV_TYPE_UNCATEGORIZED_PARAM = ['showUncategorizedParam']
FILE_CONTENT_TYPE_PARAM = ['contentTypeParam']
RESULT_NAME_MONTH_COUNTS = 'MonthCounts'
RESULT_NAME_FILE_EXT_COUNTS = 'FileExtensionCounts'
RESULT_NAME_FILE_TYPE_COUNTS = 'FileTypeCounts'
RESULT_NAME_TOTAL_COUNT = 'TotalCount'
RESULT_NAME_NUM_UNIQUE_EXT = 'NumberUniqueExtensions'
RESULT_NAME_AFFILIATION_COUNTS = 'AffiliationCounts'
RESULT_NAME_DATASET = 'Dataset'
RESULT_NAME_DATAVERSE = 'Dataverse'
RESULT_NAME_DATAVERSE_TYPE_COUNTS = 'DataverseTypeCount'
RESULT_NAME_DATASET_SUBJECT_COUNTS = 'DatasetSubjectCounts'
RESULT_NAME_BIN_COUNTS = 'BinCounts'
RESULT_NAME_BIN_COUNTS_SIZES = 'BinCountsSizes'
TAG_METRICS = 'metrics'
TAG_DATAVERSES = 'metrics - dataverses'
TAG_DATASETS = 'metrics - datasets'
TAG_DATAFILES = 'metrics - files'
TAG_TEST_API = 'dataverse/dataset JSON - (unofficial, only published data)'
# ---------------------------------------------
# For holding errors found at the SwaggerView level
# - e.g. bad url params not caught at a lower level
# ---------------------------------------------
error_found = False
error_message = None
# ---------------------------------------------
# Swagger attributes to be defined for each subclass
# ---------------------------------------------
api_path = '/path/to/endpoint'
summary = 'add summary'
description = 'add description'
description_200 = 'description for the HTTP 200 response'
param_names = PARAM_DV_API_KEY + BASIC_DATE_PARAMS + PUBLISH_PARAMS + PRETTY_JSON_PARAM
result_name = RESULT_NAME_MONTH_COUNTS
tags = [TAG_METRICS]
# ---------------------------------------------
def get_swagger_spec(self):
"""Return a YAML representation of the swagger spec for this endpoint"""
d = {}
d['api_path'] = self.api_path
d['summary'] = self.summary
d['description'] = self.description
d['description_200'] = self.description_200
d['param_names'] = self.param_names
d['result_name'] = self.result_name
d['tags'] = self.tags
return render_to_string('metrics/swagger_spec/single_endpoint.yaml', d)
def get_content_type_param(self, request):
"""Return the result of the "?unpublished" query string param"""
ctype = request.GET.get('ctype', None) # add this as variable..
if ctype is not None and len(ctype) > 0:
return ctype
return None
def get_pub_state(self, request):
"""Return the result of the "?pub_state" query string param
Default value is: "published"
Other choices: "unpublished", "all"
When checking, use:
- PUB_STATE_PUBLISHED
- PUB_STATE_UNPUBLISHED
- PUB_STATE_ALL
"""
return request.GET.get('pub_state', self.PUB_STATE_PUBLISHED)
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic.
Overwrite this method for each subclass
"""
raise Exception("This method must return a stats_result.StatsResult object")
def get(self, request, *args, **kwargs):
"""Return a basic get request using the StatsResult object"""
# Get the StatsResult -- different for each subclass
stats_result = self.get_stats_result(request)
if stats_result is None:
err_dict = dict(status="ERROR",\
message="Unknown processing error")
return send_cors_response(JsonResponse(err_dict, status=500))
# Was there an error? If so, return the error message
#
if stats_result.has_error():
err_dict = dict(status="ERROR",
message=stats_result.error_message)
if stats_result.bad_http_status_code:
status_code = stats_result.bad_http_status_code
else:
status_code = 400
return send_cors_response(JsonResponse(err_dict, status=status_code))
# Create the dict for the response
#
resp_dict = OrderedDict()
# status is "OK"
resp_dict['status'] = "OK"
# Is we're in debug and the SQL query is available,
# send it in
if settings.DEBUG and stats_result.sql_query:
resp_dict['debug'] = dict(sql_query=stats_result.sql_query)
# Set a timestamp and params
resp_dict['info'] = OrderedDict()
resp_dict['info']['generation_time'] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
if get_metrics_api_cache_time() > 0:
resp_dict['info']['cache_time_seconds'] = get_metrics_api_cache_time()
resp_dict['info']['params'] = request.GET
# Set the actual stats data
resp_dict['data'] = stats_result.result_data
# Is there a request to send the JSON formatted within HTML tags?
if 'pretty' in request.GET:
return HttpResponse('<pre>%s</pre>' % json.dumps(resp_dict, indent=4))
if StatsMakerBase.is_param_value_true(request.GET.get('as_csv', None)):
return self.get_data_as_csv_response(request, stats_result)
if StatsMakerBase.is_param_value_true(request.GET.get('as_excel', None)):
return self.get_data_as_excel_response(request, stats_result)
# Return the actual response
return send_cors_response(JsonResponse(resp_dict))
def get_data_as_csv_response(self, request, stats_result):
"""Hasty method, proof of concept for downloads by month"""
if stats_result is None or stats_result.result_data is None:
return None
csv_content = stats_result.get_csv_content()
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(csv_content, content_type='text/csv')
csv_fname = 'metrics_%s.csv' % get_timestamp_for_filename()
response['Content-Disposition'] = 'attachment; filename="%s"' % csv_fname
return response
def get_data_as_excel_response(self, request, stats_result):
"""
http://stackoverflow.com/questions/35267585/django-pandas-to-http-response-download-file
"""
if stats_result is None or stats_result.result_data is None:
return None
excel_workbook = stats_result.get_excel_workbook()
if excel_workbook is None:
# Ah, make a better error
return HttpResponse('Sorry! An error occurred trying to create an Excel spreadsheet.')
xlsx_fname = 'metrics_%s.xlsx' % get_timestamp_for_filename()
response = HttpResponse(excel_workbook,\
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % xlsx_fname
return response
@method_decorator(apikey_required, name='get')
class StatsViewSwaggerKeyRequired(StatsViewSwagger):
pass
| mit | 2,642,223,270,353,928,000 | 36.084337 | 98 | 0.640676 | false |
ydkhatri/mac_apt | plugins/quicklook.py | 1 | 22345 | '''
Copyright (c) 2019 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
'''
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
from plugins.helpers.common import *
import sqlite3
import logging
import os
from itertools import chain, zip_longest
from PIL import Image
__Plugin_Name = "QUICKLOOK" # Cannot have spaces, and must be all caps!
__Plugin_Friendly_Name = "QuickLook Thumbnail Cache"
__Plugin_Version = "1.0"
__Plugin_Description = "Parses QuickLook Thumbnail Cache data"
__Plugin_Author = "Jack Farley - BlackStone Discovery"
__Plugin_Author_Email = "[email protected] - [email protected]"
__Plugin_Modes = "MACOS,ARTIFACTONLY"
__Plugin_ArtifactOnly_Usage = 'Provide QuickLook database folder, found at:' \
'/private/var/folders/XX/XXXXXXXXXXXXXXXXXXX_XXXXXXXXX/' \
'C/com.apple.QuickLook.thumbnailcache/index.sqlite' \
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
"""
This plugin was made using previously conducted research and scripting from Mari Degrazia and Dave:
https://github.com/mdegrazia/OSX-QuickLook-Parser
http://www.easymetadata.com/2015/01/sqlite-analysing-the-quicklook-database-in-macos/
"""
running_on_windows = (os.name == 'nt')
class QuickLook:
def __init__(self, folder, file_name, hit_count, last_hit_date, version, bitmapdata_location,
bitmapdata_length, width, height, fs_id, inode, row_id, source):
self.folder = folder
self.file_name= file_name
self.hit_count = hit_count
self.last_hit_date = last_hit_date
self.version = version
self.bitmapdata_location = bitmapdata_location
self.bitmapdata_length = bitmapdata_length
self.width = width
self.height = height
self.fs_id = fs_id
self.inode = inode
self.row_id = row_id
self.source = source
def PrintAll(quicklook_data, output_params, source_path):
quicklook_info = [ ('Folder',DataType.TEXT),('File_Name',DataType.TEXT),('Hit_Count',DataType.TEXT),
('Last_Hit_Date',DataType.TEXT), ('version',DataType.BLOB), ('bitmap_data_location',DataType.INTEGER),
('bitmap_data_length',DataType.INTEGER), ('Width',DataType.INTEGER), ('Height',DataType.INTEGER),
('fs_id',DataType.TEXT),('inode',DataType.TEXT), ('row_id',DataType.TEXT), ('Source',DataType.TEXT)
]
quicklook_list = []
for ql in quicklook_data:
ql_items = [ql.folder, ql.file_name, ql.hit_count,
ql.last_hit_date, ql.version, ql.bitmapdata_location, ql.bitmapdata_length, ql.width,
ql.height, ql.fs_id, ql.inode, ql.row_id, ql.source
]
quicklook_list.append(ql_items)
WriteList("QuickLook Info", "QuickLook", quicklook_list, quicklook_info, output_params, source_path)
def OpenDbFromImage(mac_info, inputPath):
'''Returns tuple of (connection, wrapper_obj)'''
try:
sqlite = SqliteWrapper(mac_info)
conn = sqlite.connect(inputPath)
if conn:
log.debug ("Opened database successfully")
return conn, sqlite
except sqlite3.Error as ex:
log.exception ("Failed to open database, is it a valid QuickLook DB?")
return None, None
def OpenDb(inputPath):
log.info ("Processing file " + inputPath)
try:
conn = CommonFunctions.open_sqlite_db_readonly(inputPath)
log.debug ("Opened database successfully")
return conn
except sqlite3.Error:
log.exception ("Failed to open database, is it a valid Screen Time DB?")
return None
def openSingle(path):
"""
:param path: Path of file to open, use in standalone mode
:return: handle to file
"""
handle = open(path, "rb")
return handle
def openDeadbox(path, mac_info):
"""
:param path: Path in image to file
:param mac_info: mac_info object
:return: handle to file
"""
handle = mac_info.Open(path)
return handle
def carveThumb(offset, length, thumbfile, thumbname, width, height, export, user_name, is_BGRA=False):
"""
:param offset: Offset in thumbnails.data for thumbnail
:param length: Lenght of data to carve for thumbnail in thumbnails.data
:param thumbfile: Source thumbnails.data file to carve from
:param thumbname: Name of the file that has the thumbnail
:param export: Either output directory in single plugin mode or mac_info object
:return: Nothing
"""
global running_on_windows
if length is not None:
# Seek and read thumbnails.data from offsets and lengths found in the index.sqlite
thumbfile.seek(offset)
thumb = thumbfile.read(length)
if is_BGRA:
thumb = convertBGRA_to_RGBA(thumb)
# Use the Pillow Library Image to parse and export files as images
imgSize = (width, height)
img = Image.frombytes('RGBA', imgSize, thumb, decoder_name='raw')
# Parse via mac_info
if type(export) is not str:
export_folder = os.path.join(export.output_params.export_path, __Plugin_Name, "Thumbnails", user_name)
# Parse via single plugin
else:
export_folder = os.path.join(export, __Plugin_Name, "Thumbnails")
# Create output directory if doesn't exist
if not os.path.exists(export_folder):
os.makedirs(export_folder)
thumbname = CommonFunctions.SanitizeName(thumbname) # remove illegal characters which might cause issues!
# Set up output file with png extension attached
try:
# Some of the names may have illegal characters in them, filter those out
thumbname = CommonFunctions.SanitizeName(thumbname) + " - " + str(width) + "x" + str(height) + ".png"
export_file = os.path.join(export_folder, thumbname)
export_file = CommonFunctions.GetNextAvailableFileName(export_file)
log.debug("Attempting to copy out thumbnail to file: " + export_file)
# fix for very long thumbnail names
if running_on_windows and len(export_file) > 260 and export_file[1:3]==':\\':
export_file = '\\\\?\\' + export_file
img.save(export_file)
except (ValueError, OSError) as ex:
log.exception('Failed to write out thumbnail ' + thumbname)
def parseDb(c, quicklook_array, source, path_to_thumbnails, export, user_name):
"""
:param c: Connection to index.sqlite
:param quicklook_array: Empty quicklook array to store QuickLook objects
:param source: The source file being used, the full path to the index.sqlite
:return: Nothing, fills the quicklook array
"""
# Query only gets the largest render for a file, ignoring smaller thumbnails
# TODO - Identify deleted files based on null thumbnails with non-existing inode numbers
thumbnail_query = """
select files.rowid , folder, file_name, fs_id, version, max(size) as size, hit_count,
datetime(last_hit_date + strftime('%s', '2001-01-01 00:00:00'), 'unixepoch') as last_hit_date,
width, (bytesperrow / (bitsperpixel/bitspercomponent)) as computed_width, height,
bitmapdata_location, bitmapdata_length
from files left join thumbnails on files.ROWID = thumbnails.file_id
where size is not null
group by files.ROWID
"""
try:
c.execute(thumbnail_query)
data = c.fetchall()
thumbfile = None
if len(data):
# Export the thumbnails.data file via mac_info
if type(export) is not str:
thumbfile = openDeadbox(path_to_thumbnails, export)
# Export thumbnails.data via single plugin
else:
thumbfile = openSingle(path_to_thumbnails)
# Iterate through the rows returned by the above SQL statment and create QuickLook object based off it,
# then appends to array
for item in data:
row_id = item[0]
folder = item[1]
file_name = item[2]
hit_count = item[6]
last_hit_date = item[7]
bitmapdata_location = item[11]
bitmapdata_length = item[12]
width = item[8]
computed_width = item[9]
height = item[10]
fs_id = item[3]
if fs_id and len(fs_id) > 10:
try:
inode = fs_id[10:].split('.')[1]
inode = inode.rstrip('/')
except IndexError as ex:
inode = ''
else:
inode = ''
version = item[4] #plist
ql = QuickLook(folder, file_name, hit_count, last_hit_date, version, bitmapdata_location,
bitmapdata_length, computed_width, height, fs_id, inode, row_id, source)
quicklook_array.append(ql)
# Carve out thumbnail
carveThumb(bitmapdata_location, bitmapdata_length, thumbfile, file_name, computed_width, height, export, user_name)
if thumbfile:
thumbfile.close()
# Catch SQLite3 exceptions
except sqlite3.Error as e:
log.exception("Exception while executing query for QuickLook cache. Exception was: " + str(e))
def findParents(c, CNID, full_path):
inode_query_unformat = """
SELECT Parent_CNID from Combined_Inodes where Combined_Inodes.CNID == {}
"""
inode_query = inode_query_unformat.format(CNID)
name_query_unformat = """
SELECT Name from Combined_Inodes where Combined_Inodes.CNID == {}
"""
if CNID == 2:
return
else:
c.execute(inode_query)
parent_CNID = c.fetchone()[0]
name_query = name_query_unformat.format(parent_CNID)
c.execute(name_query)
parent_folder = c.fetchone()[0]
full_path[0] = parent_folder + "/" + full_path[0]
findParents(c, parent_CNID, full_path)
def parseDbNewSinglePlug(c, quicklook_array, source, path_to_thumbnails, export):
"""
:param c: Connection to index.sqlite
:param quicklook_array: Empty quicklook array to store QuickLook objects
:param source: The source file being used, the full path to the index.sqlite
:return: Nothing, fills the quicklook array
"""
combined_query = """
SELECT fileId, version, MAX(size), hit_count,
datetime(last_hit_date + strftime('%s', '2001-01-01 00:00:00'), 'unixepoch') as last_hit_date,
width, (bytesperrow / (bitsperpixel/bitspercomponent)) as computed_width, height,
bitmapdata_location, bitmapdata_length
FROM thumbnails LEFT JOIN basic_files
ON (basic_files.fileId | -9223372036854775808) == thumbnails.file_id
group by fileId
"""
c.execute(combined_query)
combined_files = c.fetchall()
# If the statement returned anything, lets parse it further
if combined_files:
# Export thumbnails.data via mac_info
if type(export) is not str:
thumbfile = openDeadbox(path_to_thumbnails, export)
# Export thumbnails.data via single plugin
else:
thumbfile = openSingle(path_to_thumbnails)
unknown_count = 0
for entries in combined_files:
# Carve out thumbnails with no iNode
bitmapdata_location = entries[8]
bitmapdata_length = entries[9]
computed_width = entries[6]
height = entries[7]
name = "Unknown" + str(unknown_count)
hit_count = entries[3]
last_hit_date = entries[4]
version = b"" # Not writing this out
fs_id = "N/A"
inode = entries[0]
row_id = "N/A"
carveThumb(bitmapdata_location, bitmapdata_length, thumbfile, name, computed_width, height, export, '', True)
unknown_count += 1
ql = QuickLook("UNKNOWN", "UNKNOWN", hit_count, last_hit_date, version,
bitmapdata_location, bitmapdata_length, computed_width, height, fs_id, inode, row_id, source)
quicklook_array.append(ql)
if thumbfile:
thumbfile.close()
def convertBGRA_to_RGBA(data):
if len(data)%4 != 0:
print("Problem, got a remainder, trying to pad..!")
data += b'\0' * (4 - len(data)%4)
ret = tuple(chain(*((R,G,B,A) for B,G,R,A in zip_longest(*[iter(data)]*4))))
return bytes(ret)
def parseDbNew(c, quicklook_array, source, path_to_thumbnails, export, user_name):
"""
:param c: Connection to index.sqlite
:param quicklook_array: Empty quicklook array to store QuickLook objects
:param source: The source file being used, the full path to the index.sqlite
:return: Nothing, fills the quicklook array
"""
inode_query = """
SELECT Name from Combined_Inodes where Combined_Inodes.CNID == {}
"""
combined_query = """
SELECT fileId, version, MAX(size), hit_count,
datetime(last_hit_date + strftime('%s', '2001-01-01 00:00:00'), 'unixepoch') as last_hit_date,
width, (bytesperrow / (bitsperpixel/bitspercomponent)) as computed_width, height,
bitmapdata_location, bitmapdata_length
FROM thumbnails LEFT JOIN basic_files
ON (basic_files.fileId | -9223372036854775808) == thumbnails.file_id
group by fileId
"""
c.execute(combined_query)
combined_files = c.fetchall()
# If the statement returned anything, lets parse it further
if combined_files:
# Export the thumbnails.data file via mac_info
thumbfile = openDeadbox(path_to_thumbnails, export)
unknown_count = 0
for entries in combined_files:
bitmapdata_location = entries[8]
bitmapdata_length = entries[9]
width = entries[6]
height = entries[7]
hit_count = entries[3]
last_hit_date = entries[4]
version = b""
fs_id = "N/A"
inode = entries[0]
row_id = "N/A"
# Format the inode_query for our specific iNode number so we can find the filename
apfs_query = inode_query.format(inode)
# Create cursor to the APFS db created by mac_apt
if hasattr(export, 'apfs_db'): # Check if this is from mounted disk
apfs_c = export.apfs_db.conn.cursor()
apfs_c.row_factory = sqlite3.Row
cursor = apfs_c.execute(apfs_query)
test_row = cursor.fetchone()
has_db = True
else:
has_db = False
test_row = None
if test_row is None:
if has_db:
log.warning("No file matches iNode: " + str(inode) + "!!")
log.warning("This file will be outputted as Unknown" + str(unknown_count))
# Carve out thumbnails with no iNode
name = f"Unknown-{unknown_count}-{inode}"
log.debug("Carving an unknown thumbnail, this is unknown number: " + str(unknown_count))
carveThumb(bitmapdata_location, bitmapdata_length, thumbfile, name, width, height, export, user_name, True)
unknown_count += 1
ql = QuickLook("UNKNOWN", "UNKNOWN", hit_count, last_hit_date, version, bitmapdata_location,
bitmapdata_length, width, height, fs_id, inode, row_id, source)
quicklook_array.append(ql)
else:
for row in test_row:
log.debug("File matching iNode: " + str(inode) + " is: " + row)
full_path = [""]
findParents(apfs_c, inode, full_path)
ql = QuickLook(full_path[0], row, hit_count, last_hit_date, version, bitmapdata_location,
bitmapdata_length, width, height, fs_id, inode, row_id, source)
quicklook_array.append(ql)
# Carve out thumbnails
log.debug("Carving thumbnail: " + str(full_path[0]) + row + " from thumbnails.data file")
carveThumb(bitmapdata_location, bitmapdata_length, thumbfile, row, width, height, export, user_name, True)
if thumbfile:
thumbfile.close()
def findDb(mac_info):
log.debug("Finding QuickLook databases and caches now in user cache dirs")
db_path_arr = []
thumbnail_path_array = []
users = []
is_big_sur_or_higher = mac_info.GetVersionDictionary()['major'] >= 11
for user in mac_info.users:
if not user.DARWIN_USER_CACHE_DIR or not user.user_name:
continue # TODO: revisit this later!
else:
darwin_user_folders = user.DARWIN_USER_CACHE_DIR.split(',')
for darwin_user_cache_dir in darwin_user_folders:
if is_big_sur_or_higher:
db_path = darwin_user_cache_dir + '/com.apple.quicklook.ThumbnailsAgent/com.apple.QuickLook.thumbnailcache/index.sqlite'
thumbnail_path = darwin_user_cache_dir + '/com.apple.quicklook.ThumbnailsAgent/com.apple.QuickLook.thumbnailcache/thumbnails.data'
else:
db_path = darwin_user_cache_dir + '/com.apple.QuickLook.thumbnailcache/index.sqlite'
thumbnail_path = darwin_user_cache_dir + '/com.apple.QuickLook.thumbnailcache/thumbnails.data'
if not mac_info.IsValidFilePath(db_path) or not mac_info.IsValidFilePath(thumbnail_path):
continue
log.debug(f"Found valid thumbnail database for user '{user.user_name}' at {db_path}")
log.debug(f"Found valid thumbnail data for user '{user.user_name}' at {thumbnail_path}")
db_path_arr.append(db_path)
thumbnail_path_array.append(thumbnail_path)
users.append(user.user_name)
return db_path_arr, thumbnail_path_array, users
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
# Array to store QuickLook objects
quicklook_array = []
# Finds QuickLook index.sqlite and the thumbnails.data
paths_to_quicklook_db, paths_to_thumbnails, users = findDb(mac_info)
# Iterate through returned array of paths and pair each index.sqlite with their thumbnails.data
for quicklook_db_path, thumbnail_file, user in zip(paths_to_quicklook_db, paths_to_thumbnails, users):
log.info("QuickLook Cache data found!")
# Export index.sqlite and thumbnails.data file
mac_info.ExportFile(quicklook_db_path, __Plugin_Name, user + "_")
mac_info.ExportFile(thumbnail_file, __Plugin_Name, user + "_")
# Opens index.sqlite
quicklook_db, quicklook_wrapper = OpenDbFromImage(mac_info, quicklook_db_path)
if quicklook_db == None:
continue
c = quicklook_db.cursor()
query = "PRAGMA table_info('files');"
c.execute(query)
row = c.fetchone()
if row is not None:
log.debug("QuickLook data from Mac OS below 10.15 found... Processing")
parseDb(c, quicklook_array, quicklook_db_path, thumbnail_file, mac_info, user)
else:
log.debug("QuickLook data from Mac OS 10.15+ found... Processing")
if not hasattr(mac_info, 'apfs_db') or isinstance(mac_info, ZipMacInfo): # Check if this is from mounted disk or zip file
log.warning("Since the APFS database is not available (MOUNTED/ZIP mode?), file inodes won't be resolved to paths.")
parseDbNew(c, quicklook_array, quicklook_db_path, thumbnail_file, mac_info, user)
# Close the index.sqlite
quicklook_db.close()
# If the QuickLook array is not empty, we print the information out
if quicklook_array:
PrintAll(quicklook_array, mac_info.output_params, '')
else:
log.info("No QuickLook artifacts found.")
def Plugin_Start_Standalone(input_files_list, output_params):
query = "PRAGMA table_info('files');"
log.info("Module Started as standalone")
quicklook_db = os.path.join(input_files_list[0], "index.sqlite")
thumbnails = os.path.join(input_files_list[0], "thumbnails.data")
quicklook_array = []
if os.path.isfile(quicklook_db) and os.path.isfile(thumbnails):
log.info("index.sqlite and thumbnails.data files found!")
db = OpenDb(quicklook_db)
c = db.cursor()
c.execute(query)
row = c.fetchone()
if row is not None:
log.debug("QuickLook data from Mac OS below 10.15 found... Processing")
parseDb(c, quicklook_array, quicklook_db, thumbnails, output_params.output_path, '')
else:
log.debug("QuickLook data from Mac OS 10.15+ found... Processing")
parseDbNewSinglePlug(c, quicklook_array, quicklook_db, thumbnails, output_params.output_path)
db.close()
else:
log.error("index.sqlite or thumbnails.data not found in input directory.\n"
"Remember to use a folder containing the index.sqlite AND the thumbnails.data as your input!")
if quicklook_array:
log.info("QuickLook data processed. Printing out now")
PrintAll(quicklook_array, output_params, '')
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!") | mit | -3,242,953,654,342,168,600 | 40.565714 | 150 | 0.602059 | false |
glexey/vim_html_paste | clip2html.py | 1 | 4509 | # This script takes marks the existing text content of a Windows
# clipboard as HTML. This is useful to automate copy/paste of
# the formatted code from gVIM to Outlook, blog post, etc.
# Clipboard stuff from http://code.activestate.com/recipes/474121/
import re
import win32clipboard
class HtmlClipboard:
CF_HTML = None
MARKER_BLOCK_OUTPUT = \
"Version:1.0\r\n" \
"StartHTML:%09d\r\n" \
"EndHTML:%09d\r\n" \
"StartFragment:%09d\r\n" \
"EndFragment:%09d\r\n" \
"StartSelection:%09d\r\n" \
"EndSelection:%09d\r\n" \
"SourceURL:%s\r\n"
MARKER_BLOCK_EX = \
"Version:(\S+)\s+" \
"StartHTML:(\d+)\s+" \
"EndHTML:(\d+)\s+" \
"StartFragment:(\d+)\s+" \
"EndFragment:(\d+)\s+" \
"StartSelection:(\d+)\s+" \
"EndSelection:(\d+)\s+" \
"SourceURL:(\S+)"
MARKER_BLOCK_EX_RE = re.compile(MARKER_BLOCK_EX)
MARKER_BLOCK = \
"Version:(\S+)\s+" \
"StartHTML:(\d+)\s+" \
"EndHTML:(\d+)\s+" \
"StartFragment:(\d+)\s+" \
"EndFragment:(\d+)\s+" \
"SourceURL:(\S+)"
MARKER_BLOCK_RE = re.compile(MARKER_BLOCK)
DEFAULT_HTML_BODY = \
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\">" \
"<HTML><HEAD></HEAD><BODY><!--StartFragment-->%s<!--EndFragment--></BODY></HTML>"
def __init__(self):
self.html = None
self.fragment = None
self.selection = None
self.source = None
self.htmlClipboardVersion = None
def GetCfHtml(self):
"""
Return the FORMATID of the HTML format
"""
if self.CF_HTML is None:
self.CF_HTML = win32clipboard.RegisterClipboardFormat("HTML Format")
return self.CF_HTML
def GetFromClipboard(self):
"""
Read and decode the HTML from the clipboard
"""
try:
win32clipboard.OpenClipboard(0)
src = win32clipboard.GetClipboardData(self.GetCfHtml())
self.DecodeClipboardSource(src)
finally:
win32clipboard.CloseClipboard()
def PutFragment(self, fragment, selection=None, html=None, source=None):
"""
Put the given well-formed fragment of Html into the clipboard.
selection, if given, must be a literal string within fragment.
html, if given, must be a well-formed Html document that textually
contains fragment and its required markers.
"""
if selection is None:
selection = fragment
if html is None:
html = self.DEFAULT_HTML_BODY % fragment
if source is None:
source = "file://cliphtml.vim"
fragmentStart = html.index(fragment)
fragmentEnd = fragmentStart + len(fragment)
selectionStart = html.index(selection)
selectionEnd = selectionStart + len(selection)
self.PutToClipboard(html, fragmentStart, fragmentEnd, selectionStart, selectionEnd, source)
def PutToClipboard(self, html, fragmentStart, fragmentEnd, selectionStart, selectionEnd, source="None"):
"""
Replace the Clipboard contents with the given html information.
"""
try:
win32clipboard.OpenClipboard(0)
win32clipboard.EmptyClipboard()
src = self.EncodeClipboardSource(html, fragmentStart, fragmentEnd, selectionStart, selectionEnd, source)
#print src
win32clipboard.SetClipboardData(self.GetCfHtml(), src)
finally:
win32clipboard.CloseClipboard()
def EncodeClipboardSource(self, html, fragmentStart, fragmentEnd, selectionStart, selectionEnd, source):
"""
Join all our bits of information into a string formatted as per the HTML format specs.
"""
# How long is the prefix going to be?
dummyPrefix = self.MARKER_BLOCK_OUTPUT % (0, 0, 0, 0, 0, 0, source)
lenPrefix = len(dummyPrefix)
prefix = self.MARKER_BLOCK_OUTPUT % (lenPrefix, len(html)+lenPrefix,
fragmentStart+lenPrefix, fragmentEnd+lenPrefix,
selectionStart+lenPrefix, selectionEnd+lenPrefix,
source)
return (prefix + html)
# Get the (assumedly) HTML code from the clipboard, as text
win32clipboard.OpenClipboard(0)
text = win32clipboard.GetClipboardData()
# Put it back on the clipboard, now marking as HTML
HtmlClipboard().PutFragment(text)
| mit | -8,930,930,131,424,293,000 | 32.154412 | 116 | 0.604125 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.