repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
basilfx/BierApp-Server | bierapp/core/decorators.py | 1 | 1740 | from django.shortcuts import get_object_or_404
from django.http import Http404
from functools import wraps
from bierapp.accounts.models import User
from bierapp.core.models import Transaction, ProductGroup, Product, \
TransactionTemplate
from bierapp.utils.types import get_int
def resolve_user(func):
@wraps(func)
def _inner(request, id, *args, **kwargs):
try:
user = request.site.users.get(id=id)
except User.DoesNotExist:
raise Http404
return func(request, user, *args, **kwargs)
return _inner
def resolve_transaction(func):
@wraps(func)
def _inner(request, id, *args, **kwargs):
transaction = get_object_or_404(Transaction, pk=id, site=request.site)
return func(request, transaction, *args, **kwargs)
return _inner
def resolve_product_group(func):
@wraps(func)
def _inner(request, id, *args, **kwargs):
product_group = get_object_or_404(
ProductGroup, pk=id, site=request.site)
return func(request, product_group, *args, **kwargs)
return _inner
def resolve_product(func):
@wraps(func)
def _inner(request, group_id, id, *args, **kwargs):
product = get_object_or_404(Product, pk=id, product_group=group_id)
return func(request, product, *args, **kwargs)
return _inner
def resolve_template(func):
@wraps(func)
def _inner(request, *args, **kwargs):
template_id = get_int(request.GET, "template", default=False)
if template_id:
kwargs["template"] = get_object_or_404(
TransactionTemplate, pk=template_id,
category__site=request.site)
return func(request, *args, **kwargs)
return _inner
| gpl-3.0 | 2,630,288,681,782,983,000 | 28 | 78 | 0.644828 | false |
chippey/gaffer | python/GafferImageUITest/ImageGadgetTest.py | 1 | 3183 | ##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferUITest
import GafferImage
import GafferImageUI
class ImageGadgetTest( GafferUITest.TestCase ) :
def testBound( self ) :
g = GafferImageUI.ImageGadget()
self.assertEqual( g.bound(), IECore.Box3f() )
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 200, 100 ) )
g.setImage( c["out"] )
self.assertEqual( g.bound(), IECore.Box3f( IECore.V3f( 0 ), IECore.V3f( 200, 100, 0) ) )
c["format"].setValue( GafferImage.Format( 200, 100, 2 ) )
self.assertEqual( g.bound(), IECore.Box3f( IECore.V3f( 0 ), IECore.V3f( 400, 100, 0) ) )
c2 = GafferImage.Constant()
g.setImage( c2["out"] )
f = GafferImage.FormatPlug.getDefaultFormat( g.getContext() ).getDisplayWindow()
self.assertEqual( g.bound(), IECore.Box3f( IECore.V3f( f.min.x, f.min.y, 0 ), IECore.V3f( f.max.x, f.max.y, 0 ) ) )
GafferImage.FormatPlug.setDefaultFormat( g.getContext(), GafferImage.Format( IECore.Box2i( IECore.V2i( 10, 20 ), IECore.V2i( 30, 40 ) ) ) )
self.assertEqual( g.bound(), IECore.Box3f( IECore.V3f( 10, 20, 0 ), IECore.V3f( 30, 40, 0 ) ) )
def testGetImage( self ) :
g = GafferImageUI.ImageGadget()
self.assertEqual( g.getImage(), None )
c = GafferImage.Constant()
g.setImage( c["out"] )
self.assertTrue( g.getImage().isSame( c["out"] ) )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 3,051,755,592,278,766,000 | 37.817073 | 141 | 0.676406 | false |
aringh/odl | odl/util/vectorization.py | 1 | 8540 | # Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Utilities for internal functionality connected to vectorization."""
from __future__ import print_function, division, absolute_import
from builtins import object
from functools import wraps
import numpy as np
__all__ = ('is_valid_input_array', 'is_valid_input_meshgrid',
'out_shape_from_meshgrid', 'out_shape_from_array',
'OptionalArgDecorator', 'vectorize')
def is_valid_input_array(x, ndim=None):
"""Test if ``x`` is a correctly shaped point array in R^d."""
x = np.asarray(x)
if ndim is None or ndim == 1:
return x.ndim == 1 and x.size > 1 or x.ndim == 2 and x.shape[0] == 1
else:
return x.ndim == 2 and x.shape[0] == ndim
def is_valid_input_meshgrid(x, ndim):
"""Test if ``x`` is a `meshgrid` sequence for points in R^d."""
# This case is triggered in FunctionSpaceElement.__call__ if the
# domain does not have an 'ndim' attribute. We return False and
# continue.
if ndim is None:
return False
if not isinstance(x, tuple):
return False
if ndim > 1:
try:
np.broadcast(*x)
except (ValueError, TypeError): # cannot be broadcast
return False
return (len(x) == ndim and
all(isinstance(xi, np.ndarray) for xi in x) and
all(xi.ndim == ndim for xi in x))
def out_shape_from_meshgrid(mesh):
"""Get the broadcast output shape from a `meshgrid`."""
if len(mesh) == 1:
return (len(mesh[0]),)
else:
return np.broadcast(*mesh).shape
def out_shape_from_array(arr):
"""Get the output shape from an array."""
arr = np.asarray(arr)
if arr.ndim == 1:
return arr.shape
else:
return (arr.shape[1],)
class OptionalArgDecorator(object):
"""Abstract class to create decorators with optional arguments.
This class implements the functionality of a decorator that can
be used with and without arguments, i.e. the following patterns
both work::
@decorator
def myfunc(x, *args, **kwargs):
pass
@decorator(param, **dec_kwargs)
def myfunc(x, *args, **kwargs):
pass
The arguments to the decorator are passed on to the underlying
wrapper.
To use this class, subclass it and implement the static ``_wrapper``
method.
"""
def __new__(cls, *args, **kwargs):
"""Create a new decorator instance.
There are two cases to distinguish:
1. Without arguments::
@decorator
def myfunc(x):
pass
which is equivalent to ::
def myfunc(x):
pass
myfunc = decorator(myfunc)
Hence, in this case, the ``__new__`` method of the decorator
immediately returns the wrapped function.
2. With arguments::
@decorator(*dec_args, **dec_kwargs)
def myfunc(x):
pass
which is equivalent to ::
def myfunc(x):
pass
dec_instance = decorator(*dec_args, **dec_kwargs)
myfunc = dec_instance(myfunc)
Hence, in this case, the first call creates an actual class
instance of ``decorator``, and in the second statement, the
``dec_instance.__call__`` method returns the wrapper using
the stored ``dec_args`` and ``dec_kwargs``.
"""
# Decorating without arguments: return wrapper w/o args directly
instance = super(OptionalArgDecorator, cls).__new__(cls)
if (not kwargs and
len(args) == 1 and
callable(args[0])):
func = args[0]
return instance._wrapper(func)
# With arguments, return class instance
else:
instance.wrapper_args = args
instance.wrapper_kwargs = kwargs
return instance
def __call__(self, func):
"""Return ``self(func)``.
This method is invoked when the decorator was created with
arguments.
Parameters
----------
func : callable
Original function to be wrapped
Returns
-------
wrapped : callable
The wrapped function
"""
return self._wrapper(func, *self.wrapper_args, **self.wrapper_kwargs)
@staticmethod
def _wrapper(func, *wrapper_args, **wrapper_kwargs):
"""Make a wrapper for ``func`` and return it.
This is a default implementation that simply returns the wrapped
function, i.e., the resulting decorator is the identity.
"""
return func
class vectorize(OptionalArgDecorator):
"""Decorator class for function vectorization.
This vectorizer expects a function with exactly one positional
argument (input) and optional keyword arguments. The decorated
function has an optional ``out`` parameter for in-place evaluation.
Examples
--------
Use the decorator witout arguments:
>>> @vectorize
... def f(x):
... return x[0] + x[1] if x[0] < x[1] else x[0] - x[1]
>>>
>>> f([0, 1]) # np.vectorize'd functions always return an array
array(1)
>>> f([[0, -2], [1, 4]]) # corresponds to points [0, 1], [-2, 4]
array([1, 2])
The function may have ``kwargs``:
>>> @vectorize
... def f(x, param=1.0):
... return x[0] + x[1] if x[0] < param else x[0] - x[1]
>>>
>>> f([[0, -2], [1, 4]])
array([1, 2])
>>> f([[0, -2], [1, 4]], param=-1.0)
array([-1, 2])
You can pass arguments to the vectorizer, too:
>>> @vectorize(otypes=['float32'])
... def f(x):
... return x[0] + x[1] if x[0] < x[1] else x[0] - x[1]
>>> f([[0, -2], [1, 4]])
array([ 1., 2.], dtype=float32)
"""
@staticmethod
def _wrapper(func, *vect_args, **vect_kwargs):
"""Return the vectorized wrapper function."""
if not hasattr(func, '__name__'):
# Set name if not available. Happens if func is actually a function
func.__name__ = '{}.__call__'.format(func.__class__.__name__)
return wraps(func)(_NumpyVectorizeWrapper(func, *vect_args,
**vect_kwargs))
class _NumpyVectorizeWrapper(object):
"""Class for vectorization wrapping using `numpy.vectorize`.
The purpose of this class is to store the vectorized version of
a function when it is called for the first time.
"""
def __init__(self, func, *vect_args, **vect_kwargs):
"""Initialize a new instance.
Parameters
----------
func : callable
Python function or method to be wrapped
vect_args :
positional arguments for `numpy.vectorize`
vect_kwargs :
keyword arguments for `numpy.vectorize`
"""
super(_NumpyVectorizeWrapper, self).__init__()
self.func = func
self.vfunc = None
self.vect_args = vect_args
self.vect_kwargs = vect_kwargs
def __call__(self, x, out=None, **kwargs):
"""Vectorized function call.
Parameters
----------
x : `array-like` or sequence of `array-like`'s
Input argument(s) to the wrapped function
out : `numpy.ndarray`, optional
Appropriately sized array to write to
Returns
-------
out : `numpy.ndarray`
Result of the vectorized function evaluation. If ``out``
was given, the returned object is a reference to it.
"""
if np.isscalar(x):
x = np.array([x])
elif isinstance(x, np.ndarray) and x.ndim == 1:
x = x[None, :]
if self.vfunc is None:
# Not yet vectorized
def _func(*x, **kw):
return self.func(np.array(x), **kw)
self.vfunc = np.vectorize(_func, *self.vect_args,
**self.vect_kwargs)
if out is None:
return self.vfunc(*x, **kwargs)
else:
out[:] = self.vfunc(*x, **kwargs)
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| mpl-2.0 | -1,299,324,578,800,178,000 | 28.040816 | 79 | 0.559264 | false |
euphorie/Euphorie | src/euphorie/client/authentication.py | 1 | 9395 | """
Authentication
--------------
User account plugins and authentication.
"""
from ..content.api.authentication import authenticate_token as authenticate_cms_token
from . import model
from .interfaces import IClientSkinLayer
from AccessControl import ClassSecurityInfo
from Acquisition import aq_parent
from App.class_init import InitializeClass
from euphorie.content.api.interfaces import ICMSAPISkinLayer
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
from Products.PluggableAuthService.interfaces.plugins import IAuthenticationPlugin
from Products.PluggableAuthService.interfaces.plugins import IChallengePlugin
from Products.PluggableAuthService.interfaces.plugins import IExtractionPlugin
from Products.PluggableAuthService.interfaces.plugins import IUserEnumerationPlugin
from Products.PluggableAuthService.interfaces.plugins import IUserFactoryPlugin
from Products.PluggableAuthService.plugins.BasePlugin import BasePlugin
from Products.PluggableAuthService.utils import classImplements
from z3c.saconfig import Session
from zope.publisher.interfaces.browser import IBrowserView
import logging
import six
import sqlalchemy.exc
import traceback
import urllib
log = logging.getLogger(__name__)
class NotImplementedError(Exception):
def __init__(self, message):
self.message = message
def graceful_recovery(default=None, log_args=True):
"""Decorator to safely use SQLAlchemy in PAS plugins. This decorator
makes sure SQL exceptions are caught and logged.
Code from Malthe Borch's pas.plugins.sqlalchemy package.
"""
def decorator(func):
def wrapper(*args, **kwargs):
try:
value = func(*args, **kwargs)
except sqlalchemy.exc.SQLAlchemyError as e:
if log_args is False:
args = ()
kwargs = {}
formatted_tb = traceback.format_exc()
try:
exc_str = str(e)
except Exception:
exc_str = "<%s at 0x%x>" % (e.__class__.__name__, id(e))
log.critical(
"caught SQL-exception: "
"%s (in method ``%s``; arguments were %s)\n\n%s"
% (
exc_str,
func.__name__,
", ".join(
[repr(arg) for arg in args]
+ [
"%s=%s" % (name, repr(value))
for (name, value) in kwargs.items()
]
),
formatted_tb,
)
)
return default
return value
return wrapper
return decorator
manage_addEuphorieAccountPlugin = PageTemplateFile(
"templates/addPasPlugin", globals(), __name__="manage_addEuphorieAccountPlugin"
)
def addEuphorieAccountPlugin(self, id, title="", REQUEST=None):
"""Add an EuphorieAccountPlugin to a Pluggable Authentication Service."""
p = EuphorieAccountPlugin(id, title)
self._setObject(p.getId(), p)
if REQUEST is not None:
REQUEST["RESPONSE"].redirect(
"%s/manage_workspace"
"?manage_tabs_message=Euphorie+Account+Manager+plugin+added."
% self.absolute_url()
)
class EuphorieAccountPlugin(BasePlugin):
meta_type = "Euphorie account manager"
security = ClassSecurityInfo()
def __init__(self, id, title=None):
self._setId(id)
self.title = title
def extractCredentials(self, request):
"""IExtractionPlugin implementation"""
token = request.getHeader("X-Euphorie-Token")
if token:
return {"api-token": token}
else:
return {}
@security.private
def _authenticate_token(self, credentials):
"""IAuthenticationPlugin implementation"""
token = credentials.get("api-token")
if not token:
return None
account = authenticate_cms_token(self, token)
return account
@security.private
def _authenticate_login(self, credentials):
login = credentials.get("login")
password = credentials.get("password")
account = authenticate(login, password)
if account is not None:
return (str(account.id), account.loginname)
else:
return None
@security.private
def _get_survey_session(self):
for parent in self.REQUEST.other["PARENTS"]:
if isinstance(parent, model.SurveySession):
return parent
else:
return None
@security.private
@graceful_recovery(log_args=False)
def authenticateCredentials(self, credentials):
if not (
IClientSkinLayer.providedBy(self.REQUEST)
or ICMSAPISkinLayer.providedBy(self.REQUEST)
):
return None
uid_and_login = self._authenticate_login(credentials)
if uid_and_login is None:
uid_and_login = self._authenticate_token(credentials)
if uid_and_login is not None:
session = self._get_survey_session()
if session is not None:
# Verify if current session matches the user. This prevents
# a cookie hijack attack.
if str(session.account_id) != uid_and_login[0]:
return None
return uid_and_login
else:
return None
@graceful_recovery()
def createUser(self, user_id, name):
"""IUserFactoryPlugin implementation"""
try:
user_id = int(user_id)
except (TypeError, ValueError):
return None
return Session().query(model.Account).get(user_id)
@graceful_recovery()
def enumerateUsers(
self,
id=None,
login=None,
exact_match=False,
sort_by=None,
max_results=None,
**kw
):
"""IUserEnumerationPlugin implementation"""
if not exact_match:
return []
if not IClientSkinLayer.providedBy(self.REQUEST):
return []
query = Session().query(model.Account)
if id is not None:
try:
query = query.filter(model.Account.id == int(id))
except ValueError:
return []
if login:
query = query.filter(model.Account.loginname == login)
account = query.first()
if account is not None:
return [{"id": str(account.id), "login": account.loginname}]
return []
def updateUser(self, user_id, login_name):
"""Changes the user's username. New method available since Plone 4.3.
Euphorie doesn't support this.
:returns: False
"""
return False
def updateEveryLoginName(self, quit_on_first_error=True):
"""Update login names of all users to their canonical value.
This should be done after changing the login_transform
property of PAS.
You can set quit_on_first_error to False to report all errors
before quitting with an error. This can be useful if you want
to know how many problems there are, if any.
:raises: NotImplementedError
"""
raise NotImplementedError(
"updateEveryLoginName method is not implemented by Euphorie"
)
def challenge(self, request, response):
"""IChallengePlugin implementation"""
if not IClientSkinLayer.providedBy(request):
return False
current_url = request.get("ACTUAL_URL", "")
query = request.get("QUERY_STRING")
if query:
if not query.startswith("?"):
query = "?" + query
current_url += query
context = request.get("PUBLISHED")
if not context:
log.error(
"Refusing to authenticate because no context has been found in %r", # noqa: E501
request,
)
return False
if IBrowserView.providedBy(context):
context = aq_parent(context)
login_url = "%s/@@login?%s" % (
context.absolute_url(),
urllib.urlencode(dict(came_from=current_url)),
)
response.redirect(login_url, lock=True)
return True
def authenticate(login, password):
"""Try to authenticate a user using the given login and password.
:param unicode login: login name
:param unicode password: users password
:return: :py:class:`Account <euphorie.client.model.Account>` instance
If the credentials are valid the matching account is returned. For invalid
credentials None is returned instead.
"""
if not login or not password:
return None
if isinstance(password, six.text_type):
password = password.encode("utf8")
login = login.lower()
accounts = Session().query(model.Account).filter(model.Account.loginname == login)
for account in accounts:
if account.verify_password(password):
return account
classImplements(
EuphorieAccountPlugin,
IAuthenticationPlugin,
IChallengePlugin,
IExtractionPlugin,
IUserEnumerationPlugin,
IUserFactoryPlugin,
)
InitializeClass(EuphorieAccountPlugin)
| gpl-2.0 | 5,777,297,908,647,784,000 | 30.955782 | 97 | 0.604896 | false |
tensorflow/models | official/vision/beta/modeling/backbones/resnet_3d_test.py | 1 | 3813 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for resnet."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.modeling.backbones import resnet_3d
class ResNet3DTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, 50, 4, 'v0', False, 0.0),
(128, 50, 4, 'v1', False, 0.2),
(256, 50, 4, 'v1', True, 0.2),
)
def test_network_creation(self, input_size, model_id, endpoint_filter_scale,
stem_type, se_ratio, init_stochastic_depth_rate):
"""Test creation of ResNet3D family models."""
tf.keras.backend.set_image_data_format('channels_last')
temporal_strides = [1, 1, 1, 1]
temporal_kernel_sizes = [(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1),
(1, 3, 1)]
use_self_gating = [True, False, True, False]
network = resnet_3d.ResNet3D(
model_id=model_id,
temporal_strides=temporal_strides,
temporal_kernel_sizes=temporal_kernel_sizes,
use_self_gating=use_self_gating,
stem_type=stem_type,
se_ratio=se_ratio,
init_stochastic_depth_rate=init_stochastic_depth_rate)
inputs = tf.keras.Input(shape=(8, input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
self.assertAllEqual([
1, 2, input_size / 2**2, input_size / 2**2, 64 * endpoint_filter_scale
], endpoints['2'].shape.as_list())
self.assertAllEqual([
1, 2, input_size / 2**3, input_size / 2**3, 128 * endpoint_filter_scale
], endpoints['3'].shape.as_list())
self.assertAllEqual([
1, 2, input_size / 2**4, input_size / 2**4, 256 * endpoint_filter_scale
], endpoints['4'].shape.as_list())
self.assertAllEqual([
1, 2, input_size / 2**5, input_size / 2**5, 512 * endpoint_filter_scale
], endpoints['5'].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=50,
temporal_strides=[1, 1, 1, 1],
temporal_kernel_sizes=[(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1),
(1, 3, 1)],
stem_type='v0',
stem_conv_temporal_kernel_size=5,
stem_conv_temporal_stride=2,
stem_pool_temporal_stride=2,
se_ratio=0.0,
use_self_gating=None,
init_stochastic_depth_rate=0.0,
use_sync_bn=False,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
)
network = resnet_3d.ResNet3D(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = resnet_3d.ResNet3D.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 5,509,428,363,427,333,000 | 36.019417 | 79 | 0.635195 | false |
DFO-Ocean-Navigator/Ocean-Data-Map-Project | tests/test_scriptGen.py | 1 | 5515 | import ast
from plotting.scriptGenerator import generatePython, generateR
from oceannavigator import create_app
import hashlib
import json
import unittest
from io import BytesIO
class TestScriptGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.app = create_app()
def test_generatePython_plot(self):
with self.app.app_context():
plotQuery = '{"area":[{"innerrings":[],"name":"","polygons":[[[50.32977916630952,-54.02923583984376],[49.99194654491231,-41.90032958984374],[43.11512912870705,-41.90032958984374],[43.8801861709303,-54.20501708984374],[50.32977916630952,-54.02923583984376]]]}],"bathymetry":true,"colormap":"default","contour":{"colormap":"default","hatch":false,"legend":true,"levels":"auto","variable":"none"},"dataset":"giops_day","depth":0,"interp":"gaussian","neighbours":10,"projection":"EPSG:3857","quiver":{"colormap":"default","magnitude":"length","variable":"none"},"radius":25,"scale":"-5,30,auto","showarea":true,"time":860,"type":"map","variable":"votemper"}'
data = generatePython(plotQuery, "PLOT").read()
ast.parse(data)
def test_generatePython_csv(self):
with self.app.app_context():
plotQuery = '{"area":[{"innerrings":[],"name":"","polygons":[[[47.59676544537632,-63.322752995466445],[47.48923059927762,-62.7459688212614],[46.71147616396766,-62.92175066482866],[47.07117494555064,-63.848111528746855],[47.59676544537632,-63.322752995466445]]]}],"bathymetry":true,"colormap":"default","contour":{"colormap":"default","hatch":false,"legend":true,"levels":"auto","variable":"none"},"dataset":"giops_day","depth":0,"interp":"gaussian","neighbours":10,"projection":"EPSG:3857","quiver":{"colormap":"default","magnitude":"length","variable":"none"},"radius":25,"scale":"10.672692871093773,21.980279541015648,auto","showarea":true,"time":712,"type":"map","variable":"votemper"}'
data = generatePython(plotQuery, "CSV").read()
ast.parse(data)
def test_generatePython_netcdf(self):
with self.app.app_context():
plotQuery = '{"dataset_name":"giops_day","max_range":"47.59676544537632,-62.7459688212614","min_range":"46.71147616396766,-63.848111528746855","output_format":"NETCDF4","should_zip":0,"time":"712,716","user_grid":0,"variables":"vice,votemper,vozocrtx,vomecrty"}'
data = generatePython(plotQuery, "SUBSET").read()
ast.parse(data)
@unittest.skip("Test is broken: these should not have been comparing hashes, but the entire output.")
def test_generateR_plot(self):
with self.app.app_context():
plotQuery = '{"area":[{"innerrings":[],"name":"","polygons":[[[57.45537472457255,-53.32611083984376],[54.96545403664038,-35.91699909988563],[37.492919230762624,-40.57520222488561],[39.21584183791197,-60.08692097488562],[57.45537472457255,-53.32611083984376]]]}],"bathymetry":true,"colormap":"default","contour":{"colormap":"default","hatch":false,"legend":true,"levels":"auto","variable":"none"},"dataset":"giops_day","depth":0,"interp":"gaussian","neighbours":10,"projection":"EPSG:3857","quiver":{"colormap":"default","magnitude":"length","variable":"none"},"radius":25,"scale":"-5,30,auto","showarea":true,"time":862,"type":"map","variable":"votemper"}'
data = generateR(plotQuery)
newData = data.read()
m = hashlib.md5()
m.update(newData)
expectedHash = '7442e1b8ac4b92d9a8aafa7edf6a8400'
self.assertEqual(m.hexdigest(), expectedHash)
@unittest.skip("Test is broken: these should not have been comparing hashes, but the entire output.")
def test_generateR_csv(self):
with self.app.app_context():
plotQuery = '{"area":[{"innerrings":[],"name":"","polygons":[[[57.45537472457255,-53.32611083984376],[54.96545403664038,-35.91699909988563],[37.492919230762624,-40.57520222488561],[39.21584183791197,-60.08692097488562],[57.45537472457255,-53.32611083984376]]]}],"bathymetry":true,"colormap":"default","contour":{"colormap":"default","hatch":false,"legend":true,"levels":"auto","variable":"none"},"dataset":"giops_day","depth":0,"interp":"gaussian","neighbours":10,"projection":"EPSG:3857","quiver":{"colormap":"default","magnitude":"length","variable":"none"},"radius":25,"scale":"-5,30,auto","showarea":true,"time":862,"type":"map","variable":"votemper"}&save&format=csv&size=10x7&dpi=144'
data = generateR(plotQuery)
newData = data.read()
m = hashlib.md5()
m.update(newData)
expectedHash = '4afa74cd7db4226c78fb7f5e2ae0a22f'
self.assertEqual(m.hexdigest(), expectedHash)
@unittest.skip("Test is broken: these should not have been comparing hashes, but the entire output.")
def test_generateR_netcdf(self):
with self.app.app_context():
plotQuery = '{"dataset_name":"giops_day","max_range":"57.45537472457255,-35.91699909988563","min_range":"37.492919230762624,-60.08692097488562","output_format":"NETCDF4","should_zip":0,"time":"857,862","user_grid":0,"variables":"vice,votemper,vozocrtx,vomecrty"}'
data = generateR(plotQuery)
newData = data.read()
m = hashlib.md5()
m.update(newData)
expectedHash = '9c4552b8e34e8856bd8bde64125e7f2d'
self.assertEqual(m.hexdigest(), expectedHash) | gpl-3.0 | 6,761,764,156,442,774,000 | 60.977528 | 702 | 0.658386 | false |
larryweya/dry-pyramid | drypyramid/tests.py | 1 | 20737 | import unittest
import colander
from webob.multidict import MultiDict
from webtest import TestApp
from pyramid import testing
from pyramid.httpexceptions import (
HTTPNotFound,
HTTPFound
)
from sqlalchemy import (
create_engine,
Column,
Integer,
String,
Table,
ForeignKey,
)
from sqlalchemy.orm import (
relationship,
)
from .models import (
SASession,
Base,
ModelFactory,
BaseRootFactory,
BaseUser,
)
from .auth import pwd_context
from .views import (
model_list,
model_create,
model_show,
model_update,
model_delete,
ModelView,
)
person_hobby = Table(
'person_hobby', Base.metadata,
Column('person_id', Integer, ForeignKey('person.id')),
Column('hobby_id', Integer, ForeignKey('hobby.id')),
)
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique=True, nullable=False)
age = Column(Integer, nullable=False)
hobbies = relationship('Hobby', secondary=person_hobby, backref='people')
class Hobby(Base):
__tablename__ = 'hobby'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique=True, nullable=False)
class PersonModelFactory(ModelFactory):
ModelClass = Person
def post_get_item(self, item):
self.post_get_item_called = True
class PersonForm(colander.MappingSchema):
name = colander.SchemaNode(colander.String(encoding='utf-8'))
age = colander.SchemaNode(colander.Integer())
class HobbiesSchema(colander.SequenceSchema):
name = colander.SchemaNode(
colander.String(encoding='utf-8'), title="Hobby")
class PersonUpdateForm(colander.MappingSchema):
hobbies = HobbiesSchema(values=[
('su', 'Superuser'),
('billing', 'Billing'),
('customer_care', 'Customer Care')
])
class TestBase(unittest.TestCase):
def _setup_db(self):
self.engine = create_engine('sqlite:///:memory:', echo=True)
SASession.configure(bind=self.engine)
Base.metadata.create_all(self.engine)
def setUp(self):
self.config = testing.setUp()
#self.config.add_route('login', '/login')
#self.config.add_route('root', '/*traverse')
self._setup_db()
def tearDown(self):
SASession.remove()
testing.tearDown()
class TestBaseModel(TestBase):
def test_create_from_dict(self):
data = {
'name': "Mr Smith",
'age': 23
}
model = Person.create_from_dict(data)
self.assertEqual(model.name, data['name'])
self.assertEqual(model.age, data['age'])
def test_to_dict(self):
model = Person(name="Mr Smith", age=23)
data = model.to_dict()
expected_data = {
'id': None,
'name': "Mr Smith",
'age': 23
}
self.assertEqual(data, expected_data)
def test_update_from_dict(self):
model = Person(name="Mr Smith", age=23)
update_data = {
'name': "Mrs Smith",
'age': 35
}
model.update_from_dict(update_data)
self.assertEqual(model.name, update_data['name'])
self.assertEqual(model.age, update_data['age'])
def test_to_dict_handles_relationships(self):
pass
class TestModelFactory(TestBase):
def setUp(self):
super(TestModelFactory, self).setUp()
self.request = testing.DummyRequest()
# this is done by ModelView on include
route_name = 'persons'
base_url = 'people'
PersonModelFactory.__route_name__ = route_name
self.config.add_route(route_name, '/{0}/*traverse'.format(base_url),
factory=PersonModelFactory)
self.factory = PersonModelFactory(self.request)
def test_list_url(self):
url = self.factory.list_url(self.request)
expected_url = "%s/people/" % self.request.application_url
self.assertEqual(url, expected_url)
def test_create_url(self):
self.factory = PersonModelFactory(self.request)
url = self.factory.create_url(self.request)
expected_url = "%s/people/add" % self.request.application_url
self.assertEqual(url, expected_url)
def test_show_url(self):
person = Person(id=1, name="Mr Smith", age=23)
url = self.factory.show_url(self.request, person)
expected_url = "{0}/people/{1}".format(self.request.application_url,
person.id)
self.assertEqual(url, expected_url)
def test_update_url(self):
person = Person(id=1, name="Mr Smith", age=23)
url = self.factory.update_url(self.request, person)
expected_url = "{0}/people/{1}/edit".format(
self.request.application_url, person.id)
self.assertEqual(url, expected_url)
def test_delete_url(self):
person = Person(id=1, name="Mr Smith", age=23)
url = self.factory.delete_url(self.request, person)
expected_url = "{0}/people/{1}/delete".format(
self.request.application_url, person.id)
self.assertEqual(url, expected_url)
def test_get_item_calls_post_get_item(self):
self.factory = PersonModelFactory(self.request)
# create a Person
person = Person(name="Mr Smith", age=23)
person.save()
self.factory.__getitem__('1')
self.assertTrue(self.factory.post_get_item_called)
class TestViewHelpers(TestBase):
def setUp(self):
super(TestViewHelpers, self).setUp()
self.config.add_route('persons', '/persons/*traverse',
factory=PersonModelFactory)
def test_model_list(self):
person = Person(name='Mr Smith', age=23)
person.save()
SASession.flush()
view = model_list(Person)
request = testing.DummyRequest()
response = view(request)
self.assertIn('records', response)
self.assertIsInstance(response['records'][0], Person)
def test_model_create(self):
def _post_create_response_callback(request, record):
return HTTPFound(request.route_url('persons',
traverse=(record.id,)))
def _pre_create_callback(request, record, values):
record.age = 25
view = model_create(Person, PersonForm, _post_create_response_callback,
_pre_create_callback)
request = testing.DummyRequest()
request.method = 'POST'
values = [
('csrf_token', request.session.get_csrf_token()),
('name', 'Mr Smith'),
('age', '22'),
]
request.POST = MultiDict(values)
context = PersonModelFactory(request)
response = view(context, request)
self.assertIsInstance(response, HTTPFound)
self.assertEqual(response.location,
'{0}/persons/1'.format(request.application_url))
person = Person.query().filter_by(name='Mr Smith').one()
self.assertEqual(person.age, 25)
def test_model_show(self):
person = Person(name='Mr Smith', age=23)
person.save()
SASession.flush()
view = model_show(Person)
request = testing.DummyRequest()
response = view(person, request)
self.assertIn('record', response)
self.assertIsInstance(response['record'], Person)
def test_model_update(self):
def _post_update_response_callback(request, record):
return HTTPFound(request.route_url('persons',
traverse=(record.id,)))
person = Person(name='Not Mr Smith', age=23)
person.save()
SASession.flush()
def _pre_update_callback(request, record, values):
record.age = 28
view = model_update(Person, PersonForm, _post_update_response_callback,
_pre_update_callback)
request = testing.DummyRequest()
request.method = 'POST'
values = [
('csrf_token', request.session.get_csrf_token()),
('name', 'Mr Smith'),
('age', '22'),
]
request.POST = MultiDict(values)
response = view(person, request)
self.assertIsInstance(response, HTTPFound)
self.assertEqual(response.location,
'{0}/persons/1'.format(request.application_url))
person = Person.query().filter_by(name='Mr Smith').one()
self.assertEqual(person.age, 28)
def test_model_delete(self):
def _post_del_response_callback(request, record):
return HTTPFound(request.route_url('persons', traverse=()))
person = Person(name='Mr Smith', age=23)
person.save()
SASession.flush()
view = model_delete(_post_del_response_callback)
self.config.add_view(view,
context=PersonModelFactory,
route_name='persons',
name='delete',
permission='delete',
check_csrf=True)
request = testing.DummyRequest()
request.method = 'POST'
response = view(person, request)
self.assertIsInstance(response, HTTPFound)
self.assertEqual(response.location,
'{0}/persons/'.format(request.application_url))
class TestRootFactory(BaseRootFactory):
pass
class FunctionalTestBase(TestBase):
application_url = 'http://localhost'
def setUp(self):
super(FunctionalTestBase, self).setUp()
self.config.set_root_factory(TestRootFactory)
session_factory = testing.DummySession
self.config.set_session_factory(session_factory)
class TestModelView(FunctionalTestBase):
class TestRenderer(object):
responses = {
'templates/person_list.pt': '{{"title": "People List"}}',
'templates/person_create.pt': '{{"title": "People Create"}}',
'templates/person_show.pt': '{{"title": "Person Show"}}',
'templates/person_update.pt': '{{"title": "Person Update",'
'"form_class": "{form_class}"}}',
# custom templates
'templates/person_custom_list.pt': '{{"title": "People Custom List"}}',
'templates/person_custom_create.pt': '{{"title": "People Custom Create"}},'
'"form_class": "{form_class}"}}',
'templates/person_custom_show.pt': '{{"title": "Person Custom Show"}}',
'templates/person_custom_update.pt': '{{"title": "Person Custom Update", '
'"form_class": "{form_class}"}}'
}
def __init__(self, info):
pass
def __call__(self, value, system):
renderer = system['renderer_name']
response = self.responses[renderer]
if 'form' in value:
response = response.format(
form_class=value['form'].schema.__class__.__name__)
return response
def setUp(self):
super(TestModelView, self).setUp()
self.config.add_renderer('.pt', self.TestRenderer)
person = Person(name="Mr Smith", age=23)
person.save()
SASession.flush()
def test_view_registration(self):
"""
Check that all views (list, create, show, update, delete) are
registered by default
"""
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
base_url_override = 'people'
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
# list
response = testapp.get('/people/')
response.mustcontain('People List')
# create
response = testapp.get('/people/add')
response.mustcontain('People Create')
# show
response = testapp.get('/people/1')
response.mustcontain('Person Show')
# update
response = testapp.get('/people/1/edit')
response.mustcontain('Person Update')
# delete
request = testing.DummyRequest()
csrf_token = request.session.get_csrf_token()
response = testapp.post('/people/1/delete', {'csrf_token': csrf_token})
self.assertEqual(response.status_code, 302)
def test_only_requested_views_are_registered(self):
"""
Test that only views within the enabled_views list are created and
exposed
"""
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
enabled_views = (ModelView.LIST, ModelView.CREATE, ModelView.UPDATE)
base_url_override = 'people'
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
# list
response = testapp.get('/people/')
response.mustcontain('People List')
# create
response = testapp.get('/people/add')
response.mustcontain('People Create')
# show
self.assertRaises(HTTPNotFound, testapp.get, '/people/1')
# update
response = testapp.get('/people/1/edit')
response.mustcontain('Person Update')
# delete
request = testing.DummyRequest()
csrf_token = request.session.get_csrf_token()
self.assertRaises(HTTPNotFound, testapp.post, '/people/1/delete',
{'csrf_token': csrf_token})
def test_update_view_uses_update_form_override_if_specified(self):
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
ModelUpdateFormClass = PersonUpdateForm
base_url_override = 'people'
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
# update
response = testapp.get('/people/1/edit')
response.mustcontain('PersonUpdateForm')
def test_renderer_overrides_work_on_all_views(self):
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
base_url_override = 'people'
list_view_renderer = 'templates/person_custom_list.pt'
create_view_renderer = 'templates/person_custom_create.pt'
show_view_renderer = 'templates/person_custom_show.pt'
update_view_renderer = 'templates/person_custom_update.pt'
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
# list
response = testapp.get('/people/')
response.mustcontain('People Custom List')
# create
response = testapp.get('/people/add')
response.mustcontain('People Custom Create')
# show
response = testapp.get('/people/1')
response.mustcontain('Person Custom Show')
# update
response = testapp.get('/people/1/edit')
response.mustcontain('Person Custom Update')
class TestModelViewResponseCallbacks(FunctionalTestBase):
def test_create_view_response_override_works(self):
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
base_url_override = 'people'
@classmethod
def post_save_response(cls, request, record):
return HTTPFound(request.route_url('person',
traverse=(record.id,)))
# NOTE: just overriding the function doesnt work
post_create_response_callback = post_save_response
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
request = testing.DummyRequest()
params = {
'name': 'Mr Smith',
'age': '22',
'csrf_token': request.session.get_csrf_token()}
response = testapp.post('/people/add', params)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location,
'{0}/people/1'.format(self.application_url))
def test_update_view_response_override_works(self):
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
base_url_override = 'people'
@classmethod
def post_save_response(cls, request, record):
return HTTPFound(request.route_url('person',
traverse=(record.id,)))
# NOTE: just overriding the function doesnt work
post_update_response_callback = post_save_response
person = Person(name='Mrs Smith', age=25)
SASession.add(person)
SASession.flush()
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
request = testing.DummyRequest()
params = {
'name': 'Mrs Jane Smith',
'age': '22',
'csrf_token': request.session.get_csrf_token()}
url = '/people/{0}/edit'.format(person.id)
response = testapp.post(url, params)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location,
'{0}/people/1'.format(self.application_url))
def test_delete_view_response_override_works(self):
class PersonViews(ModelView):
ModelFactoryClass = PersonModelFactory
ModelFormClass = PersonForm
base_url_override = 'people'
@classmethod
def post_save_response(cls, request, record):
return HTTPFound(request.route_url('person',
traverse=('2', 'edit')))
post_delete_response_callback = post_save_response
person = Person(name='Mr Smith', age=25)
SASession.add(person)
SASession.flush()
PersonViews.include(self.config)
testapp = TestApp(self.config.make_wsgi_app())
request = testing.DummyRequest()
params = {'csrf_token': request.session.get_csrf_token()}
url = '/people/{0}/delete'.format(person.id)
response = testapp.post(url, params)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location,
'{0}/people/2/edit'.format(self.application_url))
class TestLogin(TestBase):
def setUp(self):
super(TestLogin, self).setUp()
self.config.add_route('login', '/login')
pwd_context.load({'schemes': ['des_crypt']})
user = BaseUser(account_id='[email protected]', password='admin')
user.save()
SASession.flush()
def test_login_GET_request(self):
from views import user_login
request = testing.DummyRequest()
request.method = 'GET'
context = BaseRootFactory(request)
response = user_login(context, request)
self.assertIn('csrf_token', response)
self.assertIn('form', response)
def test_login_returns_bad_request_if_no_csrf_token(self):
from views import user_login
request = testing.DummyRequest()
request.method = 'POST'
context = BaseRootFactory(request)
response = user_login(context, request)
self.assertEqual(response.status_code, 400)
def test_login_POST_with_valid_credentials(self):
from views import user_login
request = testing.DummyRequest()
request.method = 'POST'
values = [
('csrf_token', request.session.get_csrf_token()),
('account_id', '[email protected]'),
('password', 'admin'),
]
request.POST = MultiDict(values)
context = BaseRootFactory(request)
response = user_login(context, request)
self.assertIsInstance(response, HTTPFound)
def test_login_POST_with_invalid_credentials(self):
from views import user_login
request = testing.DummyRequest()
request.method = 'POST'
values = [
('csrf_token', request.session.get_csrf_token()),
('account_id', '[email protected]'),
('password', 'wrong'),
]
request.POST = MultiDict(values)
context = BaseRootFactory(request)
response = user_login(context, request)
self.assertIn('csrf_token', response)
self.assertIn('form', response)
| mit | -7,523,207,633,318,657,000 | 33.619366 | 87 | 0.589815 | false |
6aika/issue-reporting | issues/models/applications.py | 1 | 1241 | from django.db import models
from django.utils.crypto import get_random_string
from issues.excs import InvalidAppError
DEFAULT_APP_DATA = { # Used by `.autodetermine()` and the migration
'identifier': 'default',
'name': 'Default',
'key': ''
}
def generate_api_key():
return get_random_string(30)
class Application(models.Model):
active = models.BooleanField(default=True, db_index=True)
identifier = models.CharField(
max_length=64,
db_index=True,
help_text='a machine-readable name for this app (a package identifier, for instance)',
)
name = models.CharField(
max_length=64,
help_text='a human-readable name for this app',
)
key = models.CharField(max_length=32, unique=True, default=generate_api_key, editable=False)
@staticmethod
def autodetermine():
app_count = Application.objects.count()
if app_count == 0:
return Application.objects.create(**DEFAULT_APP_DATA)
elif app_count == 1:
return Application.objects.filter(key='').first()
raise InvalidAppError('There are %d applications, so a valid API key must be passed in' % app_count)
def __str__(self):
return self.name
| mit | 6,477,491,313,848,653,000 | 30.025 | 108 | 0.654311 | false |
dajobe/SoCo | soco/ms_data_structures.py | 1 | 21067 | # -*- coding: utf-8 -*-
# pylint: disable = star-args, too-many-arguments, unsupported-membership-test
# pylint: disable = not-an-iterable
# Disable while we have Python 2.x compatability
# pylint: disable=useless-object-inheritance
"""This module contains all the data structures for music service plugins."""
# This needs to be integrated with Music Library data structures
from __future__ import unicode_literals
from .exceptions import DIDLMetadataError
from .utils import camel_to_underscore
from .xml import (
NAMESPACES, XML, ns_tag
)
def get_ms_item(xml, service, parent_id):
"""Return the music service item that corresponds to xml.
The class is identified by getting the type from the 'itemType' tag
"""
cls = MS_TYPE_TO_CLASS.get(xml.findtext(ns_tag('ms', 'itemType')))
out = cls.from_xml(xml, service, parent_id)
return out
def tags_with_text(xml, tags=None):
"""Return a list of tags that contain text retrieved recursively from an
XML tree."""
if tags is None:
tags = []
for element in xml:
if element.text is not None:
tags.append(element)
elif len(element) > 0: # pylint: disable=len-as-condition
tags_with_text(element, tags)
else:
message = 'Unknown XML structure: {}'.format(element)
raise ValueError(message)
return tags
class MusicServiceItem(object):
"""Class that represents a music service item."""
# These fields must be overwritten in the sub classes
item_class = None
valid_fields = None
required_fields = None
def __init__(self, **kwargs):
super(MusicServiceItem, self).__init__()
self.content = kwargs
@classmethod
def from_xml(cls, xml, service, parent_id):
"""Return a Music Service item generated from xml.
:param xml: Object XML. All items containing text are added to the
content of the item. The class variable ``valid_fields`` of each of
the classes list the valid fields (after translating the camel
case to underscore notation). Required fields are listed in the
class variable by that name (where 'id' has been renamed to
'item_id').
:type xml: :py:class:`xml.etree.ElementTree.Element`
:param service: The music service (plugin) instance that retrieved the
element. This service must contain ``id_to_extended_id`` and
``form_uri`` methods and ``description`` and ``service_id``
attributes.
:type service: Instance of sub-class of
:class:`soco.plugins.SoCoPlugin`
:param parent_id: The parent ID of the item, will either be the
extended ID of another MusicServiceItem or of a search
:type parent_id: str
For a track the XML can e.g. be on the following form:
.. code :: xml
<mediaMetadata xmlns="http://www.sonos.com/Services/1.1">
<id>trackid_141359</id>
<itemType>track</itemType>
<mimeType>audio/aac</mimeType>
<title>Teacher</title>
<trackMetadata>
<artistId>artistid_10597</artistId>
<artist>Jethro Tull</artist>
<composerId>artistid_10597</composerId>
<composer>Jethro Tull</composer>
<albumId>albumid_141358</albumId>
<album>MU - The Best Of Jethro Tull</album>
<albumArtistId>artistid_10597</albumArtistId>
<albumArtist>Jethro Tull</albumArtist>
<duration>229</duration>
<albumArtURI>http://varnish01.music.aspiro.com/sca/
imscale?h=90&w=90&img=/content/music10/prod/wmg/
1383757201/094639008452_20131105025504431/resources/094639008452.
jpg</albumArtURI>
<canPlay>true</canPlay>
<canSkip>true</canSkip>
<canAddToFavorites>true</canAddToFavorites>
</trackMetadata>
</mediaMetadata>
"""
# Add a few extra pieces of information
content = {'description': service.description,
'service_id': service.service_id,
'parent_id': parent_id}
# Extract values from the XML
all_text_elements = tags_with_text(xml)
for item in all_text_elements:
tag = item.tag[len(NAMESPACES['ms']) + 2:] # Strip namespace
tag = camel_to_underscore(tag) # Convert to nice names
if tag not in cls.valid_fields:
message = 'The info tag \'{}\' is not allowed for this item'.\
format(tag)
raise ValueError(message)
content[tag] = item.text
# Convert values for known types
for key, value in content.items():
if key == 'duration':
content[key] = int(value)
if key in ['can_play', 'can_skip', 'can_add_to_favorites',
'can_enumerate']:
content[key] = (value == 'true')
# Rename a single item
content['item_id'] = content.pop('id')
# And get the extended id
content['extended_id'] = service.id_to_extended_id(content['item_id'],
cls)
# Add URI if there is one for the relevant class
uri = service.form_uri(content, cls)
if uri:
content['uri'] = uri
# Check for all required values
for key in cls.required_fields:
if key not in content:
message = 'An XML field that correspond to the key \'{}\' '\
'is required. See the docstring for help.'.format(key)
return cls.from_dict(content)
@classmethod
def from_dict(cls, dict_in):
"""Initialize the class from a dict.
:param dict_in: The dictionary that contains the item content. Required
fields are listed class variable by that name
:type dict_in: dict
"""
kwargs = dict_in.copy()
args = [kwargs.pop(key) for key in cls.required_fields]
return cls(*args, **kwargs)
def __eq__(self, playable_item):
"""Return the equals comparison result to another ``playable_item``."""
if not isinstance(playable_item, MusicServiceItem):
return False
return self.content == playable_item.content
def __ne__(self, playable_item):
"""Return the not equals comparison result to another
``playable_item``"""
if not isinstance(playable_item, MusicServiceItem):
return True
return self.content != playable_item.content
def __repr__(self):
"""Return the repr value for the item.
The repr is on the form::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set,
or ``str(content)``. The output is also cleared of non-ascii
characters.
"""
# 40 originates from terminal width (78) - (15) for address part and
# (19) for the longest class name and a little left for buffer
if self.content.get('title') is not None:
middle = self.content['title'].encode('ascii', 'replace')[0:40]
else:
middle = str(self.content).encode('ascii', 'replace')[0:40]
return '<{} \'{}\' at {}>'.format(self.__class__.__name__,
middle,
hex(id(self)))
def __str__(self):
"""Return the str value for the item::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set, or
``str(content)``. The output is also cleared of non-ascii characters.
"""
return self.__repr__()
@property
def to_dict(self):
"""Return a copy of the content dict."""
return self.content.copy()
@property
def didl_metadata(self):
"""Return the DIDL metadata for a Music Service Track.
The metadata is on the form:
.. code :: xml
<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">
<item id="...self.extended_id..."
parentID="...self.parent_id..."
restricted="true">
<dc:title>...self.title...</dc:title>
<upnp:class>...self.item_class...</upnp:class>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
self.content['description']
</desc>
</item>
</DIDL-Lite>
"""
# Check if this item is meant to be played
if not self.can_play:
message = 'This item is not meant to be played and therefore '\
'also not to create its own didl_metadata'
raise DIDLMetadataError(message)
# Check if we have the attributes to create the didl metadata:
for key in ['extended_id', 'title', 'item_class']:
if not hasattr(self, key):
message = 'The property \'{}\' is not present on this item. '\
'This indicates that this item was not meant to create '\
'didl_metadata'.format(key)
raise DIDLMetadataError(message)
if 'description' not in self.content:
message = 'The item for \'description\' is not present in '\
'self.content. This indicates that this item was not meant '\
'to create didl_metadata'
raise DIDLMetadataError(message)
# Main element, ugly? yes! but I have given up on using namespaces
# with xml.etree.ElementTree
item_attrib = {
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:upnp': 'urn:schemas-upnp-org:metadata-1-0/upnp/',
'xmlns:r': 'urn:schemas-rinconnetworks-com:metadata-1-0/',
'xmlns': 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/'
}
xml = XML.Element('DIDL-Lite', item_attrib)
# Item sub element
item_attrib = {
'parentID': '',
'restricted': 'true',
'id': self.extended_id
}
# Only add the parent_id if we have it
if self.parent_id:
item_attrib['parentID'] = self.parent_id
item = XML.SubElement(xml, 'item', item_attrib)
# Add title and class
XML.SubElement(item, 'dc:title').text = self.title
XML.SubElement(item, 'upnp:class').text = self.item_class
# Add the desc element
desc_attrib = {
'id': 'cdudn',
'nameSpace': 'urn:schemas-rinconnetworks-com:metadata-1-0/'
}
desc = XML.SubElement(item, 'desc', desc_attrib)
desc.text = self.content['description']
return xml
@property
def item_id(self):
"""Return the item id."""
return self.content['item_id']
@property
def extended_id(self):
"""Return the extended id."""
return self.content['extended_id']
@property
def title(self):
"""Return the title."""
return self.content['title']
@property
def service_id(self):
"""Return the service ID."""
return self.content['service_id']
@property
def can_play(self):
"""Return a boolean for whether the item can be played."""
return bool(self.content.get('can_play'))
@property
def parent_id(self):
"""Return the extended parent_id, if set, otherwise return None."""
return self.content.get('parent_id')
@property
def album_art_uri(self):
"""Return the album art URI if set, otherwise return None."""
return self.content.get('album_art_uri')
class MSTrack(MusicServiceItem):
"""Class that represents a music service track."""
item_class = 'object.item.audioItem.musicTrack'
valid_fields = [
'album', 'can_add_to_favorites', 'artist', 'album_artist_id', 'title',
'album_id', 'album_art_uri', 'album_artist', 'composer_id',
'item_type', 'composer', 'duration', 'can_skip', 'artist_id',
'can_play', 'id', 'mime_type', 'description'
]
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'uri', 'description',
'service_id']
def __init__(self, title, item_id, extended_id, uri, description,
service_id, **kwargs):
"""Initialize MSTrack item."""
content = {
'title': title, 'item_id': item_id, 'extended_id': extended_id,
'uri': uri, 'description': description, 'service_id': service_id,
}
content.update(kwargs)
super(MSTrack, self).__init__(**content)
@property
def album(self):
"""Return the album title if set, otherwise return None."""
return self.content.get('album')
@property
def artist(self):
"""Return the artist if set, otherwise return None."""
return self.content.get('artist')
@property
def duration(self):
"""Return the duration if set, otherwise return None."""
return self.content.get('duration')
@property
def uri(self):
"""Return the URI."""
# x-sonos-http:trackid_19356232.mp4?sid=20&flags=32
return self.content['uri']
class MSAlbum(MusicServiceItem):
"""Class that represents a Music Service Album."""
item_class = 'object.container.album.musicAlbum'
valid_fields = [
'username', 'can_add_to_favorites', 'artist', 'title', 'album_art_uri',
'can_play', 'item_type', 'service_id', 'id', 'description',
'can_cache', 'artist_id', 'can_skip'
]
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'uri', 'description',
'service_id']
def __init__(self, title, item_id, extended_id, uri, description,
service_id, **kwargs):
content = {
'title': title, 'item_id': item_id, 'extended_id': extended_id,
'uri': uri, 'description': description, 'service_id': service_id,
}
content.update(kwargs)
super(MSAlbum, self).__init__(**content)
@property
def artist(self):
"""Return the artist if set, otherwise return None."""
return self.content.get('artist')
@property
def uri(self):
"""Return the URI."""
# x-rincon-cpcontainer:0004002calbumid_22757081
return self.content['uri']
class MSAlbumList(MusicServiceItem):
"""Class that represents a Music Service Album List."""
item_class = 'object.container.albumlist'
valid_fields = [
'id', 'title', 'item_type', 'artist', 'artist_id', 'can_play',
'can_enumerate', 'can_add_to_favorites', 'album_art_uri', 'can_cache'
]
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'uri', 'description',
'service_id']
def __init__(self, title, item_id, extended_id, uri, description,
service_id, **kwargs):
content = {
'title': title, 'item_id': item_id, 'extended_id': extended_id,
'uri': uri, 'description': description, 'service_id': service_id,
}
content.update(kwargs)
super(MSAlbumList, self).__init__(**content)
@property
def uri(self):
"""Return the URI."""
# x-rincon-cpcontainer:000d006cplaylistid_26b18dbb-fd35-40bd-8d4f-
# 8669bfc9f712
return self.content['uri']
class MSPlaylist(MusicServiceItem):
"""Class that represents a Music Service Play List."""
item_class = 'object.container.albumlist'
valid_fields = ['id', 'item_type', 'title', 'can_play', 'can_cache',
'album_art_uri', 'artist', 'can_enumerate',
'can_add_to_favorites', 'artist_id']
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'uri', 'description',
'service_id']
def __init__(self, title, item_id, extended_id, uri, description,
service_id, **kwargs):
content = {
'title': title, 'item_id': item_id, 'extended_id': extended_id,
'uri': uri, 'description': description, 'service_id': service_id,
}
content.update(kwargs)
super(MSPlaylist, self).__init__(**content)
@property
def uri(self):
"""Return the URI."""
# x-rincon-cpcontainer:000d006cplaylistid_c86ddf26-8ec5-483e-b292-
# abe18848e89e
return self.content['uri']
class MSArtistTracklist(MusicServiceItem):
"""Class that represents a Music Service Artist Track List."""
item_class = 'object.container.playlistContainer.sameArtist'
valid_fields = ['id', 'title', 'item_type', 'can_play', 'album_art_uri']
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'uri', 'description',
'service_id']
def __init__(self, title, item_id, extended_id, uri, description,
service_id, **kwargs):
content = {
'title': title, 'item_id': item_id, 'extended_id': extended_id,
'uri': uri, 'description': description, 'service_id': service_id,
}
content.update(kwargs)
super(MSArtistTracklist, self).__init__(**content)
@property
def uri(self):
"""Return the URI."""
# x-rincon-cpcontainer:100f006cartistpopsongsid_1566
return 'x-rincon-cpcontainer:100f006c{}'.format(self.item_id)
class MSArtist(MusicServiceItem):
"""Class that represents a Music Service Artist."""
valid_fields = [
'username', 'can_add_to_favorites', 'artist', 'title', 'album_art_uri',
'item_type', 'id', 'service_id', 'description', 'can_cache'
]
# Since MSArtist cannot produce didl_metadata, they are not strictly
# required, but it makes sense to require them anyway, since they are the
# fields that that describe the item
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'service_id']
def __init__(self, title, item_id, extended_id, service_id, **kwargs):
content = {'title': title, 'item_id': item_id,
'extended_id': extended_id, 'service_id': service_id}
content.update(kwargs)
super(MSArtist, self).__init__(**content)
class MSFavorites(MusicServiceItem):
"""Class that represents a Music Service Favorite."""
valid_fields = ['id', 'item_type', 'title', 'can_play', 'can_cache',
'album_art_uri']
# Since MSFavorites cannot produce didl_metadata, they are not strictly
# required, but it makes sense to require them anyway, since they are the
# fields that that describe the item
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'service_id']
def __init__(self, title, item_id, extended_id, service_id, **kwargs):
content = {'title': title, 'item_id': item_id,
'extended_id': extended_id, 'service_id': service_id}
content.update(kwargs)
super(MSFavorites, self).__init__(**content)
class MSCollection(MusicServiceItem):
"""Class that represents a Music Service Collection."""
valid_fields = ['id', 'item_type', 'title', 'can_play', 'can_cache',
'album_art_uri']
# Since MSCollection cannot produce didl_metadata, they are not strictly
# required, but it makes sense to require them anyway, since they are the
# fields that that describe the item
# IMPORTANT. Keep this list, __init__ args and content in __init__ in sync
required_fields = ['title', 'item_id', 'extended_id', 'service_id']
def __init__(self, title, item_id, extended_id, service_id, **kwargs):
content = {'title': title, 'item_id': item_id,
'extended_id': extended_id, 'service_id': service_id}
content.update(kwargs)
super(MSCollection, self).__init__(**content)
MS_TYPE_TO_CLASS = {'artist': MSArtist, 'album': MSAlbum, 'track': MSTrack,
'albumList': MSAlbumList, 'favorites': MSFavorites,
'collection': MSCollection, 'playlist': MSPlaylist,
'artistTrackList': MSArtistTracklist}
| mit | 7,402,956,122,372,239,000 | 37.164855 | 79 | 0.584896 | false |
kuke/models | fluid/PaddleRec/word2vec/preprocess.py | 1 | 5840 | # -*- coding: utf-8 -*
import os
import random
import re
import six
import argparse
import io
import math
prog = re.compile("[^a-z ]", flags=0)
def parse_args():
parser = argparse.ArgumentParser(
description="Paddle Fluid word2 vector preprocess")
parser.add_argument(
'--build_dict_corpus_dir', type=str, help="The dir of corpus")
parser.add_argument(
'--input_corpus_dir', type=str, help="The dir of input corpus")
parser.add_argument(
'--output_corpus_dir', type=str, help="The dir of output corpus")
parser.add_argument(
'--dict_path',
type=str,
default='./dict',
help="The path of dictionary ")
parser.add_argument(
'--min_count',
type=int,
default=5,
help="If the word count is less then min_count, it will be removed from dict"
)
parser.add_argument(
'--downsample',
type=float,
default=0.001,
help="filter word by downsample")
parser.add_argument(
'--filter_corpus',
action='store_true',
default=False,
help='Filter corpus')
parser.add_argument(
'--build_dict',
action='store_true',
default=False,
help='Build dict from corpus')
return parser.parse_args()
def text_strip(text):
#English Preprocess Rule
return prog.sub("", text.lower())
# Shameless copy from Tensorflow https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/text_encoder.py
# Unicode utility functions that work with Python 2 and 3
def native_to_unicode(s):
if _is_unicode(s):
return s
try:
return _to_unicode(s)
except UnicodeDecodeError:
res = _to_unicode(s, ignore_errors=True)
return res
def _is_unicode(s):
if six.PY2:
if isinstance(s, unicode):
return True
else:
if isinstance(s, str):
return True
return False
def _to_unicode(s, ignore_errors=False):
if _is_unicode(s):
return s
error_mode = "ignore" if ignore_errors else "strict"
return s.decode("utf-8", errors=error_mode)
def filter_corpus(args):
"""
filter corpus and convert id.
"""
word_count = dict()
word_to_id_ = dict()
word_all_count = 0
id_counts = []
word_id = 0
#read dict
with io.open(args.dict_path, 'r', encoding='utf-8') as f:
for line in f:
word, count = line.split()[0], int(line.split()[1])
word_count[word] = count
word_to_id_[word] = word_id
word_id += 1
id_counts.append(count)
word_all_count += count
#filter corpus and convert id
if not os.path.exists(args.output_corpus_dir):
os.makedirs(args.output_corpus_dir)
for file in os.listdir(args.input_corpus_dir):
with io.open(args.output_corpus_dir + '/convert_' + file, "w") as wf:
with io.open(
args.input_corpus_dir + '/' + file, encoding='utf-8') as rf:
print(args.input_corpus_dir + '/' + file)
for line in rf:
signal = False
line = text_strip(line)
words = line.split()
for item in words:
if item in word_count:
idx = word_to_id_[item]
else:
idx = word_to_id_[native_to_unicode('<UNK>')]
count_w = id_counts[idx]
corpus_size = word_all_count
keep_prob = (
math.sqrt(count_w /
(args.downsample * corpus_size)) + 1
) * (args.downsample * corpus_size) / count_w
r_value = random.random()
if r_value > keep_prob:
continue
wf.write(_to_unicode(str(idx) + " "))
signal = True
if signal:
wf.write(_to_unicode("\n"))
def build_dict(args):
"""
proprocess the data, generate dictionary and save into dict_path.
:param corpus_dir: the input data dir.
:param dict_path: the generated dict path. the data in dict is "word count"
:param min_count:
:return:
"""
# word to count
word_count = dict()
for file in os.listdir(args.build_dict_corpus_dir):
with io.open(
args.build_dict_corpus_dir + "/" + file, encoding='utf-8') as f:
print("build dict : ", args.build_dict_corpus_dir + "/" + file)
for line in f:
line = text_strip(line)
words = line.split()
for item in words:
if item in word_count:
word_count[item] = word_count[item] + 1
else:
word_count[item] = 1
item_to_remove = []
for item in word_count:
if word_count[item] <= args.min_count:
item_to_remove.append(item)
unk_sum = 0
for item in item_to_remove:
unk_sum += word_count[item]
del word_count[item]
#sort by count
word_count[native_to_unicode('<UNK>')] = unk_sum
word_count = sorted(
word_count.items(), key=lambda word_count: -word_count[1])
with io.open(args.dict_path, 'w+', encoding='utf-8') as f:
for k, v in word_count:
f.write(k + " " + str(v) + '\n')
if __name__ == "__main__":
args = parse_args()
if args.build_dict:
build_dict(args)
elif args.filter_corpus:
filter_corpus(args)
else:
print(
"error command line, please choose --build_dict or --filter_corpus")
| apache-2.0 | -1,929,002,871,442,135,600 | 30.229947 | 134 | 0.52774 | false |
sysuwuhaibin/vatus | vatus/testcases/test_bussiness/test_login_management/test_E73_modify_personal_information.py | 1 | 5396 | # -*- coding: utf-8 -*-
import time
import unittest
from selenium import webdriver
import settings
class ModifyPersonalInfo(unittest.TestCase):
def setUp(self):
self.driver = None
self.base_url = settings.test_parameters.get("bussiness_base_url")
def test_E73_modify_personal_information(self):
web_types = settings.test_parameters.get("web_types")
for web_type in web_types:
if web_type == 'firefox':
self.driver = webdriver.Firefox()
elif web_type == 'chrome':
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
driver = self.driver
driver.get(self.base_url)
driver.maximize_window()
###########################################
# 前置条件:登录系统
###########################################
driver.find_element_by_id("input_username").clear()
driver.find_element_by_id("input_username").send_keys("admin")
driver.find_element_by_id("input_password").clear()
driver.find_element_by_id("input_password").send_keys("admin123")
driver.find_element_by_id("login_btn").click()
time.sleep(3)
###########################################
# 步骤1:邮箱地址为空
###########################################
driver.find_element_by_css_selector("span.user-info").click()
driver.find_element_by_link_text(u"个人信息").click()
time.sleep(3)
driver.find_element_by_id("email").click()
driver.find_element_by_css_selector("span.editable-clear-x").click()
driver.find_element_by_xpath("//button[@type='submit']").click()
self.assertEqual("请输入邮箱地址!", driver.find_element_by_class_name("editable-error-block").text)
time.sleep(2)
###########################################
# 步骤2:邮箱地址长度和格式不对
###########################################
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").\
send_keys("123456789012345678901234567890123456789012345678901234567890@1234567890.com")
driver.find_element_by_xpath("//button[@type='submit']").click()
self.assertEqual("长度不超过64个字", driver.find_element_by_class_name("editable-error-block").text)
time.sleep(2)
driver.find_element_by_css_selector("span.editable-clear-x").click()
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("fss@#$%.com")
driver.find_element_by_xpath("//button[@type='submit']").click()
self.assertEqual("邮件格式不正确", driver.find_element_by_class_name("editable-error-block").text)
time.sleep(2)
###########################################
# 步骤3:输入已被其他 用户注册使用的邮箱
###########################################
driver.find_element_by_css_selector("span.editable-clear-x").click()
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("[email protected]")
driver.find_element_by_xpath("//button[@type='submit']").click()
time.sleep(2)
self.assertEqual("邮件地址已被使用", driver.find_element_by_class_name("gritter-without-image").
find_element_by_tag_name("p").text)
time.sleep(8)
###########################################
# 步骤4:输入合法邮箱地址
###########################################
driver.find_element_by_id("email").click()
driver.find_element_by_css_selector("span.editable-clear-x").click()
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("[email protected]")
driver.find_element_by_xpath("//button[@type='submit']").click()
time.sleep(2)
self.assertEqual("用户 admin 信息修改成功", driver.find_element_by_class_name("gritter-without-image").
find_element_by_tag_name("p").text)
time.sleep(8)
###########################################
# 后置条件: 恢复初始邮箱地址,有助多次测试
###########################################
driver.find_element_by_id("email").click()
driver.find_element_by_css_selector("span.editable-clear-x").click()
driver.find_element_by_css_selector("input.form-control.input-sm").clear()
driver.find_element_by_css_selector("input.form-control.input-sm").send_keys("[email protected]")
driver.find_element_by_xpath("//button[@type='submit']").click()
time.sleep(2)
driver.quit()
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 3,833,711,199,793,544,000 | 53.252632 | 111 | 0.528328 | false |
dycodedev/taiga-back | tests/integration/test_vote_issues.py | 1 | 4147 | # Copyright (C) 2015 Andrey Antukh <[email protected]>
# Copyright (C) 2015 Jesús Espino <[email protected]>
# Copyright (C) 2015 David Barragán <[email protected]>
# Copyright (C) 2015 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from django.core.urlresolvers import reverse
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_upvote_issue(client):
user = f.UserFactory.create()
issue = f.create_issue(owner=user)
f.MembershipFactory.create(project=issue.project, user=user, is_owner=True)
url = reverse("issues-upvote", args=(issue.id,))
client.login(user)
response = client.post(url)
assert response.status_code == 200
def test_downvote_issue(client):
user = f.UserFactory.create()
issue = f.create_issue(owner=user)
f.MembershipFactory.create(project=issue.project, user=user, is_owner=True)
url = reverse("issues-downvote", args=(issue.id,))
client.login(user)
response = client.post(url)
assert response.status_code == 200
def test_list_issue_voters(client):
user = f.UserFactory.create()
issue = f.create_issue(owner=user)
f.MembershipFactory.create(project=issue.project, user=user, is_owner=True)
f.VoteFactory.create(content_object=issue, user=user)
url = reverse("issue-voters-list", args=(issue.id,))
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data[0]['id'] == user.id
def test_get_issue_voter(client):
user = f.UserFactory.create()
issue = f.create_issue(owner=user)
f.MembershipFactory.create(project=issue.project, user=user, is_owner=True)
vote = f.VoteFactory.create(content_object=issue, user=user)
url = reverse("issue-voters-detail", args=(issue.id, vote.user.id))
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data['id'] == vote.user.id
def test_get_issue_votes(client):
user = f.UserFactory.create()
issue = f.create_issue(owner=user)
f.MembershipFactory.create(project=issue.project, user=user, is_owner=True)
url = reverse("issues-detail", args=(issue.id,))
f.VotesFactory.create(content_object=issue, count=5)
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data['total_voters'] == 5
def test_get_issue_is_voted(client):
user = f.UserFactory.create()
issue = f.create_issue(owner=user)
f.MembershipFactory.create(project=issue.project, user=user, is_owner=True)
f.VotesFactory.create(content_object=issue)
url_detail = reverse("issues-detail", args=(issue.id,))
url_upvote = reverse("issues-upvote", args=(issue.id,))
url_downvote = reverse("issues-downvote", args=(issue.id,))
client.login(user)
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['total_voters'] == 0
assert response.data['is_voter'] == False
response = client.post(url_upvote)
assert response.status_code == 200
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['total_voters'] == 1
assert response.data['is_voter'] == True
response = client.post(url_downvote)
assert response.status_code == 200
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['total_voters'] == 0
assert response.data['is_voter'] == False
| agpl-3.0 | 7,353,787,517,842,962,000 | 33.247934 | 79 | 0.70584 | false |
lschule/biknstock | web/python/hello.py | 1 | 2891 | from __future__ import print_function
import os
from operator import itemgetter
from crm114 import Classifier
def index(req):
data_path = '%s%s%s' % (os.path.dirname(__file__), os.sep, 'data')
categories = ['good', 'bad']
print('Initializing classifier')
print('- Categories: %s' % ', '.join(categories))
print('- Data path: %s' % data_path)
c = Classifier(data_path, ['good', 'bad'])
for file_path in c.file_list():
if os.path.exists(file_path):
os.remove(file_path)
print('- Data file: %s' % file_path)
c.create_files()
print('')
c.learn('good', 'this is a good test')
c.learn('good', 'pretty good')
c.learn('good', 'this is a VERY good test')
c.learn('good', 'this is a good test')
c.learn('good', 'this is a great test')
c.learn('good', 'awesome test')
c.learn('good', 'peachy test')
c.learn('good', 'love')
c.learn('good', 'hey')
c.learn('bad', 'a bad test')
c.learn('bad', 'pretty bad test')
c.learn('bad', 'this is a very bad test')
c.learn('bad', 'terrible test')
c.learn('bad', 'this is a treacherous test')
c.learn('bad', 'TREACHERY AT ITS FINEST')
c.learn('bad', 'this is a shit awful test')
c.learn('bad', 'hate')
c.learn('bad', 'HATED') # Case-sensitive? Really?
c.learn('bad', 'made me care-vomit')
c.learn('bad', 'vomit')
classify_texts = ['this is a good test',
'here is a pretty good test',
'this is a bad test',
'this is shit awful',
'this is insanely awesome',
'THIS IS SUCH AN AWESOME TEST',
'I love this test so much.',
'This is the finest test.',
'HATED IT',
"hey baby test, can I get your digits?",
'I wanted to vomit',
'Please, only the finest of your treachery',
'this is a test',
'a treacherous, terrible test, which I hated ' +
'as it made me vomit']
category_max = len(max(categories, key=len))
test_output_format = '%% 3.2f%%%% %%%ds: %%s' % category_max
test_results = []
for text in classify_texts:
category, probability = c.classify(text)
test_results.append({'category': category,
'probability': probability,
'text': text})
sorted_results = sorted(test_results, key=itemgetter('probability'),
reverse=True)
for test in sorted_results:
print(test_output_format % (test['probability'] * 100.0,
test['category'],
test['text']))
return "Hello World! This is a python script!";
| mit | 4,316,286,691,436,116,500 | 34.691358 | 72 | 0.51643 | false |
sigma-random/Triton | examples/callback_after.py | 1 | 8375 |
# Output
#
# $ ./triton ./examples/callback_after.py ./samples/crackmes/crackme_xor a
# 0x40056d: push rbp
# -> #0 = (bvsub (_ bv140735022953896 64) (_ bv8 64)) ; Aligns stack
# -> #1 = (_ bv140735022953936 64)
# -> #2 = (_ bv4195694 64) ; RIP
#
# 0x40056e: mov rbp, rsp
# -> #3 = ((_ extract 63 0) #0)
# -> #4 = (_ bv4195697 64) ; RIP
#
# 0x400571: mov qword ptr [rbp-0x18], rdi
# -> #5 = (_ bv140735022960969 64)
# -> #6 = (_ bv4195701 64) ; RIP
#
# 0x400575: mov dword ptr [rbp-0x4], 0x0
# -> #7 = (_ bv0 32)
# -> #8 = (_ bv4195708 64) ; RIP
#
# 0x40057c: jmp 0x4005bd
# -> #9 = (_ bv4195773 64) ; RIP
#
# 0x4005bd: cmp dword ptr [rbp-0x4], 0x4
# -> #10 = (bvsub #7 ((_ sign_extend 0) (_ bv4 32)))
# -> #11 = (ite (= (_ bv16 32) (bvand (_ bv16 32) (bvxor #10 (bvxor #7 ((_ sign_extend 0) (_ bv4 32)))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #12 = (ite (bvult #7 ((_ sign_extend 0) (_ bv4 32))) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #13 = (ite (= ((_ extract 31 31) (bvand (bvxor #7 ((_ sign_extend 0) (_ bv4 32))) (bvxor #7 #10))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #14 = (ite (= (parity_flag ((_ extract 7 0) #10)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #15 = (ite (= ((_ extract 31 31) #10) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #16 = (ite (= #10 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #17 = (_ bv4195777 64) ; RIP
#
# 0x40057e: mov eax, dword ptr [rbp-0x4]
# -> #19 = ((_ extract 31 0) #10)
# -> #20 = (_ bv4195713 64) ; RIP
#
# 0x400581: movsxd rdx, eax
# -> #21 = ((_ sign_extend 32) ((_ extract 31 0) #19))
# -> #22 = (_ bv4195716 64) ; RIP
#
# 0x400584: mov rax, qword ptr [rbp-0x18]
# -> #23 = ((_ extract 63 0) #5)
# -> #24 = (_ bv4195720 64) ; RIP
#
# 0x400588: add rax, rdx
# -> #25 = (bvadd ((_ extract 63 0) #23) ((_ extract 63 0) #21))
# -> #26 = (ite (= (_ bv16 64) (bvand (_ bv16 64) (bvxor #25 (bvxor ((_ extract 63 0) #23) ((_ extract 63 0) #21))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #27 = (ite (bvult #25 ((_ extract 63 0) #23)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #28 = (ite (= ((_ extract 63 63) (bvand (bvxor ((_ extract 63 0) #23) (bvnot ((_ extract 63 0) #21))) (bvxor ((_ extract 63 0) #23) #25))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #29 = (ite (= (parity_flag ((_ extract 7 0) #25)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #30 = (ite (= ((_ extract 63 63) #25) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #31 = (ite (= #25 (_ bv0 64)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #32 = (_ bv4195723 64) ; RIP
#
# 0x40058b: movzx eax, byte ptr [rax]
# -> #33 = ((_ zero_extend 24) (_ bv97 8))
# -> #34 = (_ bv4195726 64) ; RIP
#
# 0x40058e: movsx eax, al
# -> #35 = ((_ sign_extend 24) ((_ extract 7 0) #33))
# -> #36 = (_ bv4195729 64) ; RIP
#
# 0x400591: sub eax, 0x1
# -> #37 = (bvsub ((_ extract 31 0) #35) (_ bv1 32))
# -> #38 = (ite (= (_ bv16 32) (bvand (_ bv16 32) (bvxor #37 (bvxor ((_ extract 31 0) #35) (_ bv1 32))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #39 = (ite (bvult ((_ extract 31 0) #35) (_ bv1 32)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #40 = (ite (= ((_ extract 31 31) (bvand (bvxor ((_ extract 31 0) #35) (_ bv1 32)) (bvxor ((_ extract 31 0) #35) #37))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #41 = (ite (= (parity_flag ((_ extract 7 0) #37)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #42 = (ite (= ((_ extract 31 31) #37) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #43 = (ite (= #37 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #44 = (_ bv4195732 64) ; RIP
#
# 0x400594: xor eax, 0x55
# -> #45 = (bvxor ((_ extract 31 0) #37) (_ bv85 32))
# -> #46 = (_ bv0 1) ; Clears carry flag
# -> #47 = (_ bv0 1) ; Clears overflow flag
# -> #48 = (ite (= (parity_flag ((_ extract 7 0) #45)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #49 = (ite (= ((_ extract 31 31) #45) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #50 = (ite (= #45 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #51 = (_ bv4195735 64) ; RIP
#
# 0x400597: mov ecx, eax
# -> #52 = ((_ extract 31 0) #45)
# -> #53 = (_ bv4195737 64) ; RIP
#
# 0x400599: mov rdx, qword ptr [rip+0x200aa0]
# -> #54 = (_ bv4196036 64)
# -> #55 = (_ bv4195744 64) ; RIP
#
# 0x4005a0: mov eax, dword ptr [rbp-0x4]
# -> #56 = ((_ extract 31 0) #10)
# -> #57 = (_ bv4195747 64) ; RIP
#
# 0x4005a3: cdqe
# -> #58 = ((_ sign_extend 32) ((_ extract 31 0) #56))
# -> #59 = (_ bv4195749 64) ; RIP
#
# 0x4005a5: add rax, rdx
# -> #60 = (bvadd ((_ extract 63 0) #58) ((_ extract 63 0) #54))
# -> #61 = (ite (= (_ bv16 64) (bvand (_ bv16 64) (bvxor #60 (bvxor ((_ extract 63 0) #58) ((_ extract 63 0) #54))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #62 = (ite (bvult #60 ((_ extract 63 0) #58)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #63 = (ite (= ((_ extract 63 63) (bvand (bvxor ((_ extract 63 0) #58) (bvnot ((_ extract 63 0) #54))) (bvxor ((_ extract 63 0) #58) #60))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #64 = (ite (= (parity_flag ((_ extract 7 0) #60)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #65 = (ite (= ((_ extract 63 63) #60) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #66 = (ite (= #60 (_ bv0 64)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #67 = (_ bv4195752 64) ; RIP
#
# 0x4005a8: movzx eax, byte ptr [rax]
# -> #68 = ((_ zero_extend 24) (_ bv49 8))
# -> #69 = (_ bv4195755 64) ; RIP
#
# 0x4005ab: movsx eax, al
# -> #70 = ((_ sign_extend 24) ((_ extract 7 0) #68))
# -> #71 = (_ bv4195758 64) ; RIP
#
# 0x4005ae: cmp ecx, eax
# -> #72 = (bvsub ((_ extract 31 0) #52) ((_ extract 31 0) #70))
# -> #73 = (ite (= (_ bv16 32) (bvand (_ bv16 32) (bvxor #72 (bvxor ((_ extract 31 0) #52) ((_ extract 31 0) #70))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #74 = (ite (bvult ((_ extract 31 0) #52) ((_ extract 31 0) #70)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #75 = (ite (= ((_ extract 31 31) (bvand (bvxor ((_ extract 31 0) #52) ((_ extract 31 0) #70)) (bvxor ((_ extract 31 0) #52) #72))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #76 = (ite (= (parity_flag ((_ extract 7 0) #72)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #77 = (ite (= ((_ extract 31 31) #72) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #78 = (ite (= #72 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #79 = (_ bv4195760 64) ; RIP
#
# 0x4005b0: jz 0x4005b9
# -> #80 = (ite (= #78 (_ bv1 1)) (_ bv4195769 64) (_ bv4195762 64)) ; RIP
#
# 0x4005b2: mov eax, 0x1
# -> #81 = (_ bv1 32)
# -> #82 = (_ bv4195767 64) ; RIP
#
# 0x4005b7: jmp 0x4005c8
# -> #83 = (_ bv4195784 64) ; RIP
#
# 0x4005c8: pop rbp
# -> #84 = #1
# -> #85 = (bvadd #0 (_ bv8 64)) ; Aligns stack
# -> #86 = (_ bv4195785 64) ; RIP
#
# loose
# $
from triton import *
# A callback must be a function with one argument. This argument is
# always the Instruction class and contains all information
def my_callback_after(instruction):
print '%#x: %s' %(instruction.address, instruction.assembly)
for se in instruction.symbolicExpressions:
print '\t -> #%d = %s %s' %(se.getId(), se.getAst(), (('; ' + se.getComment()) if se.getComment() is not None else ''))
print
if __name__ == '__main__':
# Start the symbolic analysis from the 'check' function
startAnalysisFromSymbol('check')
# Add a callback.
# BEFORE: Add the callback before the instruction processing
# AFTER: Add the callback after the instruction processing
# FINI: Add the callback at the end of the execution
addCallback(my_callback_after, IDREF.CALLBACK.AFTER)
# Run the instrumentation - Never returns
runProgram()
| lgpl-3.0 | 4,867,103,924,392,754,000 | 47.410405 | 200 | 0.469373 | false |
lelit/tailor | vcpx/tests/svn.py | 1 | 13407 | # -*- mode: python; coding: utf-8 -*-
# :Progetto: vcpx -- svn specific tests
# :Creato: gio 11 nov 2004 19:09:06 CET
# :Autore: Lele Gaifax <[email protected]>
# :Licenza: GNU General Public License
#
from unittest import TestCase
from datetime import datetime
from vcpx.repository.svn import changesets_from_svnlog
from vcpx.tzinfo import UTC
class FakeLogger:
def warning(self, *args):
pass
debug = warning
class FakeRepository:
def __init__(self, repo, module):
self.repository = repo
self.module = module
self.log = FakeLogger()
FR = FakeRepository
class SvnLogParser(TestCase):
"""Ensure the svn log parser does its job"""
def getSvnLog(self, testname):
from os.path import join, split
logname = join(split(__file__)[0], 'data', testname)+'.log'
return file(logname)
def testRenameBehaviour(self):
"""Verify svn log parser behaves correctly on renames"""
log = self.getSvnLog('svn-simple_rename_test')
csets = changesets_from_svnlog(log, FR('file:///tmp/t/repo', '/trunk'))
cset = csets.next()
self.assertEqual(cset.author, 'lele')
self.assertEqual(cset.date, datetime(2004,11,12,15,05,37,134366,UTC))
self.assertEqual(cset.log, 'create tree')
self.assertEqual(len(cset.entries), 2)
entry = cset.entries[0]
self.assertEqual(entry.name, 'dir')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[1]
self.assertEqual(entry.name, 'dir/a.txt')
self.assertEqual(entry.action_kind, entry.ADDED)
cset = csets.next()
self.assertEqual(cset.author, 'lele')
self.assertEqual(cset.date, datetime(2004,11,12,15,06,04,193650,UTC))
self.assertEqual(cset.log, 'rename dir')
self.assertEqual(len(cset.entries), 1)
entry = cset.entries[0]
self.assertEqual(entry.name, 'new')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'dir')
self.assertRaises(StopIteration, csets.next)
def testRenameOutBehaviour(self):
"""Verify svn log parser behaves correctly on renames out of scope"""
log = self.getSvnLog('svn-rename_out_test')
csets = changesets_from_svnlog(log,
FR('http://srv/svn/Shtoom', '/trunk'))
cset = csets.next()
self.assertEqual(cset.author, 'anthony')
self.assertEqual(cset.date, datetime(2004,11,9,6,54,20,709243,UTC))
self.assertEqual(cset.log, 'Moving to a /sandbox\n')
self.assertEqual(len(cset.entries), 1)
entry = cset.entries[0]
self.assertEqual(entry.name, 'shtoom/tmp')
self.assertEqual(entry.action_kind, entry.DELETED)
self.assertRaises(StopIteration, csets.next)
def testCopyAndRename(self):
"""Verify svn log parser behaves correctly on copies"""
log = self.getSvnLog('svn-copy_and_rename_test')
csets = list(changesets_from_svnlog(log,
FR('file:///tmp/rep', '/test')))
self.assertEqual(len(csets), 4)
cset = csets[1]
self.assertEqual(cset.author, 'lele')
self.assertEqual(cset.date, datetime(2005,1,8, 17,36,55,174757,UTC))
self.assertEqual(cset.log, 'Copy')
self.assertEqual(len(cset.entries), 1)
entry = cset.entries[0]
self.assertEqual(entry.name, 'file2.txt')
self.assertEqual(entry.action_kind, entry.ADDED)
self.assertEqual(entry.old_name, 'file1.txt')
cset = csets[2]
self.assertEqual(cset.date, datetime(2005,1,8, 17,42,41,347315,UTC))
self.assertEqual(cset.log, 'Remove')
self.assertEqual(len(cset.entries), 1)
entry = cset.entries[0]
self.assertEqual(entry.name, 'file1.txt')
self.assertEqual(entry.action_kind, entry.DELETED)
cset = csets[3]
self.assertEqual(cset.date, datetime(2005,1,8, 17,43,9,909127,UTC))
self.assertEqual(cset.log, 'Move')
self.assertEqual(len(cset.entries), 1)
entry = cset.entries[0]
self.assertEqual(entry.name, 'file1.txt')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'file2.txt')
def testREvent(self):
"""Verify how tailor handle svn "R" event"""
log = self.getSvnLog('svn-svn_r_event_test')
csets = changesets_from_svnlog(log, FR('file:///tmp/rep', '/trunk'))
cset = csets.next()
cset = csets.next()
self.assertEqual(cset.author, 'cmlenz')
self.assertEqual(cset.date, datetime(2005,3,21, 8,34, 2,522947,UTC))
self.assertEqual(len(cset.entries), 7)
entry = cset.entries[0]
self.assertEqual(entry.name, 'setup.py')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[1]
self.assertEqual(entry.name, 'trac/scripts')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[2]
self.assertEqual(entry.name, 'trac/scripts/__init__.py')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[3]
self.assertEqual(entry.name, 'trac/scripts/admin.py')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'scripts/trac-admin')
entry = cset.entries[4]
self.assertEqual(entry.name, 'trac/tests/environment.py')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[6]
self.assertEqual(entry.name, 'scripts/trac-admin')
self.assertEqual(entry.action_kind, entry.ADDED)
self.assertRaises(StopIteration, csets.next)
def testRenameReplace(self):
"""Verify how tailor handle svn "R" event on renames"""
log = self.getSvnLog('svn-rename_replace')
csets = changesets_from_svnlog(log, FR('file:///tmp/rep',
'/cedar-backup2/trunk'))
cset = csets.next()
self.assertEqual(len(cset.entries), 7)
for entry, expected in map(None, cset.entries,
(('Makefile', 'UPD'),
('test', 'REN', 'unittest'),
('test/__init__.py', 'ADD'),
('test/filesystemtests.py', 'ADD'),
('test/knapsacktests.py', 'ADD'),
('util/createtree.py', 'UPD'),
('test/data', 'REN', 'unittest/data'))):
self.assertEqual(entry.name, expected[0])
self.assertEqual(entry.action_kind, expected[1],
msg=entry.name+': got %r, expected %r' %
(entry.action_kind, expected[1]))
if expected[1]=='REN':
self.assertEqual(entry.old_name, expected[2],
msg=entry.name+': got %r, expected %r' %
(entry.old_name, expected[2]))
def testTrackingRoot(self):
"""Verify we are able to track the root of the repository"""
log = self.getSvnLog('svn-svn_repos_root_test')
csets = list(changesets_from_svnlog(log,
FR('svn+ssh://caia/tmp/svn', '/')))
self.assertEqual(len(csets), 4)
cset = csets[1]
self.assertEqual(len(cset.entries), 3)
entry = cset.entries[0]
self.assertEqual(entry.name, 'branches/branch-a')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[1]
self.assertEqual(entry.name, 'branches/branch-a/a.txt')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[2]
self.assertEqual(entry.name, 'branches/branch-a/b.txt')
self.assertEqual(entry.action_kind, entry.ADDED)
def testPydistStrangeCase(self):
"""Verify we are able to groke with svn 'R' strangeness"""
log = self.getSvnLog('svn-pydist_strange_case')
csets = changesets_from_svnlog(log, FR('http://srv/svn', '/py/dist'))
cset = csets.next()
self.assertEqual(len(cset.entries), 3)
entry = cset.entries[0]
self.assertEqual(entry.name, 'py/documentation/example')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'example')
entry = cset.entries[1]
self.assertEqual(entry.name, 'py/documentation/test.txt')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[2]
self.assertEqual(entry.name, 'py/documentation/example/test')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'example/test')
self.assertRaises(StopIteration, csets.next)
def testUnicode(self):
"""Verify svn parser returns unicode strings"""
log = self.getSvnLog('svn-encoding_test')
csets = changesets_from_svnlog(log, FR('http://srv/plone/CMFPlone',
'/branches/2.1'))
log = csets.next().log
self.assertEqual(type(log), type(u'€'))
self.assertEqual(len(log), 91)
self.assertRaises(UnicodeEncodeError, log.encode, 'iso-8859-1')
self.assertEqual(len(log.encode('ascii', 'ignore')), 90)
self.assertRaises(StopIteration, csets.next)
def testCopyAndReplace(self):
"""Verify the svn parser handle copy+replace"""
log = self.getSvnLog('svn-copy_and_replace_test')
csets = changesets_from_svnlog(log,
FR('http://srv/repos/trac', '/trunk'))
cset = csets.next()
self.assertEqual(len(cset.entries), 7)
entry = cset.entries[0]
self.assertEqual(entry.name, 'setup.py')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[1]
self.assertEqual(entry.name, 'trac/scripts')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[2]
self.assertEqual(entry.name, 'trac/scripts/__init__.py')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[3]
self.assertEqual(entry.name, 'trac/scripts/admin.py')
self.assertEqual(entry.action_kind, entry.RENAMED)
self.assertEqual(entry.old_name, 'scripts/trac-admin')
entry = cset.entries[4]
self.assertEqual(entry.name, 'trac/tests/environment.py')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[5]
self.assertEqual(entry.name, 'trac/tests/tracadmin.py')
self.assertEqual(entry.action_kind, entry.UPDATED)
entry = cset.entries[6]
self.assertEqual(entry.name, 'scripts/trac-admin')
self.assertEqual(entry.action_kind, entry.ADDED)
def testCopyFromAndRemove(self):
"""Verify the svn parser handle copyfrom+remove"""
log = self.getSvnLog('svn-copyfrom_and_remove_test')
csets = changesets_from_svnlog(log, FR('http://srv/samba',
'/branches/SAMBA_4_0'))
cset = csets.next()
self.assertEqual(len(cset.entries), 4)
entry = cset.entries[0]
self.assertEqual(entry.name, 'source/nsswitch')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[1]
self.assertEqual(entry.name, 'source/nsswitch/config.m4')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[2]
self.assertEqual(entry.name, 'source/nsswitch/wb_common.c')
self.assertEqual(entry.action_kind, entry.ADDED)
entry = cset.entries[3]
self.assertEqual(entry.name, 'source/nsswitch/wins.c')
self.assertEqual(entry.action_kind, entry.DELETED)
def testIncrementalParser(self):
"""Verify that the svn log parser is effectively incremental"""
log = self.getSvnLog('svn-svn_repos_root_test')
csets = list(changesets_from_svnlog(log,
FR('svn+ssh://caia/tmp/svn', '/'),
chunksize=100))
self.assertEqual(len(csets), 4)
def testExternalCopies(self):
"""Verify that external copies+deletions are handled ok"""
log = self.getSvnLog('svn-external_copies_test')
csets = changesets_from_svnlog(log,
FR('svn+ssh://caia/tmp/svn', '/trunk'))
cset = csets.next()
cset = csets.next()
self.assertEqual(len(cset.entries), 5)
entry = cset.removedEntries()[0]
self.assertEqual(entry.name, 'README_LOGIN')
cset = csets.next()
self.assertEqual(len(cset.entries), 5)
def testCollidingNames(self):
"""Verify svn log parser behaves correctly with colliding names"""
# Sorry, couldn't find a better name
log = self.getSvnLog('svn-colliding_names_test')
csets = changesets_from_svnlog(log,
FR('svn://ixion.tartarus.org/main', '/putty'))
cset = csets.next()
self.assertEqual(len(cset.entries), 1)
| gpl-3.0 | 3,799,975,612,509,294,000 | 36.54902 | 85 | 0.596121 | false |
peterhinch/micropython-epaper | epaper.py | 1 | 19329 | # epaper.py main module for Embedded Artists' 2.7 inch E-paper Display.
# Peter Hinch
# version 0.9
# 17 Jun 2018 Adapted for VFS mount/unmount.
# 18 Mar 2016 Adafruit module and fast (partial) updates.
# 2 Mar 2016 Power control support removed. Support for fonts as persistent byte code
# 29th Jan 2016 Monospaced fonts supported.
# Copyright 2015 Peter Hinch
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
# Code translated and developed from https://developer.mbed.org/users/dreschpe/code/EaEpaper/
import pyb, gc, uos
from panel import NORMAL, FAST, EMBEDDED_ARTISTS, ADAFRUIT
LINES_PER_DISPLAY = const(176) # 2.7 inch panel only!
BYTES_PER_LINE = const(33)
BITS_PER_LINE = const(264)
gc.collect()
NEWLINE = const(10) # ord('\n')
class EPDError(OSError):
pass
def checkstate(state, msg):
if not state:
raise EPDError(msg)
# Generator parses an XBM file returning width, height, followed by data bytes
def get_xbm_data(sourcefile):
errmsg = ''.join(("File: '", sourcefile, "' is not a valid XBM file"))
try:
with open(sourcefile, 'r') as f:
phase = 0
for line in f:
if phase < 2:
if line.startswith('#define'):
yield int(line.split(' ')[-1])
phase += 1
if phase == 2:
start = line.find('{')
if start >= 0:
line = line[start +1:]
phase += 1
if phase == 3:
if not line.isspace():
phase += 1
if phase == 4:
end = line.find('}')
if end >=0 :
line = line[:end]
phase += 1
hexnums = line.split(',')
if hexnums[0] != '':
for hexnum in [q for q in hexnums if not q.isspace()]:
yield int(hexnum, 16)
if phase != 5 :
print(errmsg)
except OSError:
print("Can't open " + sourcefile + " for reading")
class FontFileError(Exception):
pass
class Font(object):
def __init__(self):
self.bytes_per_ch = 0 # Number of bytes to define a character
self.bytes_horiz = 0 # No. of bytes per character row
self.bits_horiz = 0 # Horzontal bits in character matrix
self.bits_vert = 0 # Vertical bits in character matrix
self.monospaced = False # Default is variable width
self.exists = False
self.modfont = None
self.fontfilename = None
self.fontfile = None
# monospaced only applies to binary files. Since these lack an index FIXME
# characters are saved in fixed pitch with width data, hence can be
# rendered as fixed or variable pitch.
# Python fonts are saved as variable or fixed pitch depending on the -f arg.
# The monospaced flag saved with the file enables the renderer to
# determine the correct x advance.
def __call__(self, fontfilename, monospaced = False):
self.fontfilename = fontfilename
self.monospaced = monospaced
return self
def __enter__(self): #fopen(self, fontfile):
if isinstance(self.fontfilename, type(uos)): # Using a Python font
self.fontfile = None
f = self.fontfilename
ok = False
try:
ok = f.hmap() and f.reverse()
except AttributeError:
pass
if not ok:
raise FontFileError('Font module {} is invalid'.format(f.__name__))
self.monospaced = f.monospaced()
self.modfont = f
self.bits_horiz = f.max_width()
self.bits_vert = f.height()
else:
self.modfont = None
try:
f = open(self.fontfilename, 'rb')
except OSError as err:
raise FontFileError(err)
self.fontfile = f
header = f.read(4)
if header[0] == 0x42 and header[1] == 0xe7:
self.bits_horiz = header[2] # font[1]
self.bits_vert = header[3] # font[2]
else:
raise FontFileError('Font file {} is invalid'.format(self.fontfilename))
self.bytes_horiz = (self.bits_horiz + 7) // 8
self.bytes_per_ch = self.bytes_horiz * self.bits_vert
self.exists = True
return self
def __exit__(self, *_):
self.exists = False
if self.fontfile is not None:
self.fontfile.close()
class Display(object):
FONT_HEADER_LENGTH = 4
def __init__(self, side='L',*, mode=NORMAL, model=EMBEDDED_ARTISTS, use_flash=False, up_time=None):
self.flash = None # Assume flash is unused
self.in_context = False
try:
intside = {'l':0, 'r':1}[side.lower()]
except (KeyError, AttributeError):
raise ValueError("Side must be 'L' or 'R'")
if model not in (EMBEDDED_ARTISTS, ADAFRUIT):
raise ValueError('Unsupported model')
if mode == FAST and use_flash:
raise ValueError('Flash memory unavailable in fast mode')
if mode == NORMAL and up_time is not None:
raise ValueError('Cannot set up_time in normal mode')
if mode == NORMAL:
from epd import EPD
self.epd = EPD(intside, model)
elif mode == FAST:
from epdpart import EPD
self.epd = EPD(intside, model, up_time)
else:
raise ValueError('Unsupported mode {}'.format(mode))
self.mode = mode
self.font = Font()
gc.collect()
self.locate(0, 0) # Text cursor: default top left
self.mounted = False # umountflash() not to sync
if use_flash:
from flash import FlashClass
gc.collect()
self.flash = FlashClass(intside)
self.umountflash() # In case mounted by prior tests.
self.mountflash()
gc.collect()
def checkcm(self):
if not (self.mode == NORMAL or self.in_context):
raise EPDError('Fast mode must be run using a context manager')
def __enter__(self): # Power up
checkstate(self.mode == FAST, "In normal mode, can't use context manager")
self.in_context = True
self.epd.enter()
return self
def __exit__(self, *_): # shut down
self.in_context = False
self.epd.exit()
pass
def mountflash(self):
if self.flash is None: # Not being used
return
self.flash.begin() # Initialise.
vfs = uos.VfsFat(self.flash) # Instantiate FAT filesystem
uos.mount(vfs, self.flash.mountpoint)
self.mounted = True
def umountflash(self): # Unmount flash
if self.flash is None:
return
if self.mounted:
self.flash.synchronise()
try:
uos.umount(self.flash.mountpoint)
except OSError:
pass # Don't care if it wasn't mounted
self.flash.end() # Shut down
self.mounted = False # flag unmounted to prevent spurious syncs
def show(self):
self.checkcm()
self.umountflash() # sync, umount flash, shut it down and disable SPI
if self.mode == NORMAL: # EPD functions which access the display electronics must be
with self.epd as epd: # called from a with block to ensure proper startup & shutdown
epd.showdata()
else: # Fast mode: already in context manager
self.epd.showdata()
self.mountflash()
def clear_screen(self, show=True, both=False):
self.checkcm()
self.locate(0, 0) # Reset text cursor
self.epd.clear_data(both)
if show:
if self.mode == NORMAL:
self.show()
else:
self.epd.EPD_clear()
def refresh(self, fast =True): # Fast mode only functions
checkstate(self.mode == FAST, 'refresh() invalid in normal mode')
self.checkcm()
self.epd.refresh(fast)
def exchange(self, clear_data):
checkstate(self.mode == FAST, 'exchange() invalid in normal mode')
self.checkcm()
self.epd.exchange(clear_data)
@property
def temperature(self): # return temperature as integer in Celsius
return self.epd.temperature
@property
def location(self):
return self.char_x, self.char_y
@micropython.native
def setpixel(self, x, y, black): # 41uS. Clips to borders. x, y must be integer
if y < 0 or y >= LINES_PER_DISPLAY or x < 0 or x >= BITS_PER_LINE :
return
image = self.epd.image
omask = 1 << (x & 0x07)
index = (x >> 3) + y *BYTES_PER_LINE
if black:
image[index] |= omask
else:
image[index] &= (omask ^ 0xff)
@micropython.viper
def setpixelfast(self, x: int, y: int, black: int): # 27uS. Caller checks bounds
image = ptr8(self.epd.image)
omask = 1 << (x & 0x07)
index = (x >> 3) + y * 33 #BYTES_PER_LINE
if black:
image[index] |= omask
else:
image[index] &= (omask ^ 0xff)
# ****** Simple graphics support ******
def _line(self, x0, y0, x1, y1, black = True): # Sinle pixel line
dx = x1 -x0
dy = y1 -y0
dx_sym = 1 if dx > 0 else -1
dy_sym = 1 if dy > 0 else -1
dx = dx_sym*dx
dy = dy_sym*dy
dx_x2 = dx*2
dy_x2 = dy*2
if (dx >= dy):
di = dy_x2 - dx
while (x0 != x1):
self.setpixel(x0, y0, black)
x0 += dx_sym
if (di<0):
di += dy_x2
else :
di += dy_x2 - dx_x2
y0 += dy_sym
self.setpixel(x0, y0, black)
else:
di = dx_x2 - dy
while (y0 != y1):
self.setpixel(x0, y0, black)
y0 += dy_sym
if (di < 0):
di += dx_x2
else:
di += dx_x2 - dy_x2
x0 += dx_sym
self.setpixel(x0, y0, black)
def line(self, x0, y0, x1, y1, width =1, black = True): # Draw line
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if abs(x1 - x0) > abs(y1 - y0): # < 45 degrees
for w in range(-width//2 +1, width//2 +1):
self._line(x0, y0 +w, x1, y1 +w, black)
else:
for w in range(-width//2 +1, width//2 +1):
self._line(x0 +w, y0, x1 +w, y1, black)
def _rect(self, x0, y0, x1, y1, black): # Draw rectangle
self.line(x0, y0, x1, y0, 1, black)
self.line(x0, y0, x0, y1, 1, black)
self.line(x0, y1, x1, y1, 1, black)
self.line(x1, y0, x1, y1, 1, black)
def rect(self, x0, y0, x1, y1, width =1, black = True): # Draw rectangle
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
x0, x1 = (x0, x1) if x1 > x0 else (x1, x0) # x0, y0 is top left, x1, y1 is bottom right
y0, y1 = (y0, y1) if y1 > y0 else (y1, y0)
for w in range(width):
self._rect(x0 +w, y0 +w, x1 -w, y1 -w, black)
def fillrect(self, x0, y0, x1, y1, black = True): # Draw filled rectangle
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
x0, x1 = (x0, x1) if x1 > x0 else (x1, x0)
y0, y1 = (y0, y1) if y1 > y0 else (y1, y0)
for x in range(x0, x1):
for y in range(y0, y1):
self.setpixel(x, y, black)
def _circle(self, x0, y0, r, black = True): # Single pixel circle
x = -r
y = 0
err = 2 -2*r
while x <= 0:
self.setpixel(x0 -x, y0 +y, black)
self.setpixel(x0 +x, y0 +y, black)
self.setpixel(x0 +x, y0 -y, black)
self.setpixel(x0 -x, y0 -y, black)
e2 = err
if (e2 <= y):
y += 1
err += y*2 +1
if (-x == y and e2 <= x):
e2 = 0
if (e2 > x):
x += 1
err += x*2 +1
def circle(self, x0, y0, r, width =1, black = True): # Draw circle
x0, y0, r = int(x0), int(y0), int(r)
for r in range(r, r -width, -1):
self._circle(x0, y0, r, black)
def fillcircle(self, x0, y0, r, black = True): # Draw filled circle
x0, y0, r = int(x0), int(y0), int(r)
x = -r
y = 0
err = 2 -2*r
while x <= 0:
self._line(x0 -x, y0 -y, x0 -x, y0 +y, black)
self._line(x0 +x, y0 -y, x0 +x, y0 +y, black)
e2 = err
if (e2 <= y):
y +=1
err += y*2 +1
if (-x == y and e2 <= x):
e2 = 0
if (e2 > x):
x += 1
err += x*2 +1
# ****** Image display ******
def load_xbm(self, sourcefile, x = 0, y = 0):
g = get_xbm_data(sourcefile)
width = next(g)
height = next(g)
self.loadgfx(g, width, height, x, y)
# Load a rectangular region with a bitmap supplied by a generator.
def loadgfx(self, gen, width, height, x0, y0):
byteoffset = x0 >> 3
bitshift = x0 & 7 # Offset of image relative to byte boundary
bytes_per_line = width >> 3
if width & 7 > 0:
bytes_per_line += 1
for line in range(height):
y = y0 + line
if y >= LINES_PER_DISPLAY:
break
index = y * BYTES_PER_LINE + byteoffset
bitsleft = width
x = x0
for byte in range(bytes_per_line):
val = next(gen)
bits_to_write = min(bitsleft, 8)
x += bits_to_write
if x <= BITS_PER_LINE:
if bitshift == 0 and bits_to_write == 8:
self.epd.image[index] = val
index += 1
else:
mask = ((1 << bitshift) -1) # Bits in current byte to preserve
bitsused = bitshift + bits_to_write
overflow = max(0, bitsused -8)
underflow = max(0, 8 -bitsused)
if underflow: # Underflow in current byte
mask = (mask | ~((1 << bitsused) -1)) & 0xff
nmask = ~mask & 0xff # Bits to overwrite
self.epd.image[index] = (self.epd.image[index] & mask) | ((val << bitshift) & nmask)
index += 1
if overflow : # Bits to write to next byte
mask = ~((1 << overflow) -1) & 0xff # Preserve
self.epd.image[index] = (self.epd.image[index] & mask) | (val >> (8 - bitshift))
bitsleft -= bits_to_write
# ****** Text support ******
def locate(self, x, y): # set cursor position
self.char_x = x # Text input cursor to (x, y)
self.char_y = y
# font.bytes_horiz
# In cse of font file it's the pysical width of every character as stored in file
# In case of Python font it's the value of max_width converted to bytes
def _character(self, c, usefile):
font = self.font # Cache for speed
bits_vert = font.bits_vert
if usefile:
ff = font.fontfile
ff.seek(self.FONT_HEADER_LENGTH + (c -32) * (font.bytes_per_ch + 1))
buf = ff.read(font.bytes_per_ch + 1)
# Characters are stored as constant width.
bytes_horiz = font.bytes_horiz # No. of bytes before next row
# Advance = bits_horiz if variable pitch else font.bits_horiz
bits_horiz = buf[0]
offset = 1
else:
modfont = font.modfont
buf, height, bits_horiz = modfont.get_ch(chr(c))
# Width varies between characters
bytes_horiz = (bits_horiz + 7) // 8
offset = 0
# Sanity checks: prevent index errors. Wrapping should be done at string/word level.
if (self.char_x + bytes_horiz * 8) > BITS_PER_LINE :
self.char_x = 0
self.char_y += bits_vert
if self.char_y >= (LINES_PER_DISPLAY - bits_vert):
self.char_y = 0
image = self.epd.image
y = self.char_y # x, y are pixel coordinates
for bit_vert in range(bits_vert): # for each vertical line
x = self.char_x
for byte_horiz in range(bytes_horiz):
fontbyte = buf[bit_vert * bytes_horiz + byte_horiz + offset]
index = (x >> 3) + y * BYTES_PER_LINE
nbits = x & 0x07
if nbits == 0:
image[index] = fontbyte
else:
image[index] &= (0xff >> (8 - nbits))
image[index] |= (fontbyte << nbits)
image[index + 1] &= (0xff << nbits)
image[index + 1] |= (fontbyte >> (8 - nbits))
x += 8
y += 1
self.char_x += font.bits_horiz if font.monospaced else bits_horiz
def _putc(self, value, usefile): # print char
if (value == NEWLINE):
self.char_x = 0
self.char_y += self.font.bits_vert
if (self.char_y >= LINES_PER_DISPLAY - self.font.bits_vert):
self.char_y = 0
else:
self._character(value, usefile)
return value
def puts(self, s): # Output a string at cursor
if self.font.exists:
if self.font.modfont is None: # No font module: using binary file
for char in s:
c = ord(char)
if (c > 31 and c < 127) or c == NEWLINE:
self._putc(c, True)
else: # Python font file is self-checking
for char in s:
self._putc(ord(char), False)
else:
raise FontFileError("There is no current font")
| apache-2.0 | 1,606,973,317,774,428,700 | 37.735471 | 110 | 0.496197 | false |
PyBro-JHU/Clue-Less | clueless/tests/client/game_play_functest.py | 1 | 6596 | """
This module is a functional test of the game_server and game_client
interactions. a game server is spun up in a seperate process space and bound
to localhost on port 5000. The game client is then used to interact with
the game server.
"""
import unittest
from multiprocessing.process import Process
from clueless.client import errors
from clueless.client.game_play import GameClient
from clueless import log
from clueless.model import game_state
from clueless.server.app import start_server
import time
_LOG = log.get_logger(__name__)
class WhenFunctionalTestingGameClient(unittest.TestCase):
def setUp(self):
#setup game server to run on a seperate process
self.game_server = Process(target=start_server)
self.game_server.start()
#create the game client
self.client = GameClient(host="127.0.0.1", port="5000")
self.player_one = "Arthur"
self.player_one_suspect = game_state.PEACOCK
self.player_two = "Steven"
self.player_two_suspect = game_state.PLUM
def test_game_client(self):
try:
#give the game server process a chance to start
time.sleep(3)
#test registering players and choosing suspects
self.client.register_player(self.player_one)
self.client.choose_suspect(
self.player_one, self.player_one_suspect)
self.client.register_player(
self.player_two)
self.client.choose_suspect(
self.player_two, self.player_two_suspect)
#retreive the registered players with the client and validate the
#return values
players = self.client.get_players()
for player in players:
self.assertIsInstance(player, game_state.Player)
self.assertTrue(
self.player_one in [player.username
for player in players])
self.assertTrue(
self.player_two in [player.username
for player in players])
self.assertTrue(
self.player_one_suspect in [player.suspect
for player in players])
self.assertTrue(
self.player_two_suspect in [player.suspect
for player in players])
#start a new game with the client and validate a GameState object
#is returned
game = self.client.start_new_game()
self.assertTrue(game, game_state.GameState)
game = self.client.get_game_state(game.game_id)
self.assertTrue(game, game_state.GameState)
#move player 1 from start space to hallway
player = game.current_player
player_1_current_space = game.game_board[player.suspect]
move_space = player_1_current_space.connected_spaces[0]
game = self.client.move_player(
player.username, player.suspect, move_space)
self.assertEqual(
game.turn_status, game_state.AWAITING_ACCUSATION_OR_END_TURN)
game = self.client.end_turn(player.username)
player_1_current_space = game.game_board[move_space]
self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)
#move player 2 from start space to hallway
player = game.current_player
player_2_current_space = game.game_board[player.suspect]
move_space = player_2_current_space.connected_spaces[0]
game = self.client.move_player(
player.username, player.suspect, move_space)
self.assertEqual(
game.turn_status, game_state.AWAITING_ACCUSATION_OR_END_TURN)
game = self.client.end_turn(player.username)
player_2_current_space = game.game_board[move_space]
self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)
#move player 1 from hallway to room
player = game.current_player
move_space = player_1_current_space.connected_spaces[0]
game = self.client.move_player(
player.username, player.suspect, move_space)
self.assertEqual(
game.turn_status, game_state.AWAITING_SUGGESTION)
#make suggestion based on room player is currently in
game = self.client.make_suggestion(
player.username, game_state.MUSTARD,
game_state.REVOLVER,
move_space
)
#if there is a player that can prove the suggestion false
#then test the suggestion response
if game.suggestion_response_player:
with self.assertRaises(errors.GameClientException):
game = self.client.move_player(
player.username, player.suspect, move_space)
self.assertEqual(
game.turn_status, game_state.AWAITING_SUGGESTION_RESPONSE)
response_player = game.suggestion_response_player
suggestion = game.current_suggestion
gamecard_item = list(
{suggestion.weapon, suggestion.room, suggestion.suspect}
&
set(card.item for card in response_player.game_cards))[0]
game = self.client.make_suggestion_response(
response_player.username, gamecard_item)
self.assertEqual(
game.turn_status, game_state.AWAITING_ACCUSATION_OR_END_TURN)
game = self.client.end_turn(player.username)
self.assertEqual(game.turn_status, game_state.AWAITING_MOVE)
last_player = player
player = game.current_player
self.assertNotEqual(player.username, last_player.username)
#test accusation
suspect = [
card.item for card in game.case_file
if card.type == game_state.SUSPECT
][0]
weapon = [
card.item for card in game.case_file
if card.type == game_state.WEAPON
][0]
room = [
card.item for card in game.case_file
if card.type == game_state.ROOM
][0]
game = self.client.make_accusation(
player.username, suspect, weapon, room)
for message in game.player_messages:
print message
self.client.destroy_game(game.game_id)
finally:
self.game_server.terminate()
| apache-2.0 | 7,321,740,515,994,931,000 | 38.73494 | 78 | 0.59339 | false |
ecreall/nova-ideo | novaideo/utilities/alerts_utility.py | 1 | 9428 | # -*- coding: utf8 -*-
# Copyright (c) 2015 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import json
import requests
import re
from persistent.list import PersistentList
# from urllib.request import urlopen
from pyramid.threadlocal import get_current_request
from substanced.util import get_oid
from pyramid_sms.utils import normalize_us_phone_number
from pyramid_sms.outgoing import send_sms
from dace.objectofcollaboration.principal.util import get_current
import html_diff_wrapper
from novaideo.ips.mailer import mailer_send
# from novaideo.content.resources import (
# arango_server, create_collection)
from novaideo.content.alert import INTERNAL_ALERTS
from novaideo.utilities.util import connect
from novaideo.content.comment import Comment, Commentable
from novaideo import log, _
# SLACK_CHANNELS = {
# 'id': {'url': 'url',
# 'name': 'name'}
# }
# def alert_slack(senders=[], recipients=[], **kwargs):
# """
# recipients: ['improve', 'questionnaire']
# """
# for recipient in recipients:
# channel_data = SLACK_CHANNELS[recipient]
# kwargs['channel'] = "#" + channel_data['name']
# kwargs['username'] = 'webhookbot'
# kwargs = 'payload=' + json.dumps(kwargs)
# url = channel_data['url']
# urlopen(url, kwargs.encode())
# def alert_arango(senders=[], recipients=[], **kwargs):
# """
# recipients: ['creationculturelle.improve']
# """
# for recipient in recipients:
# recipient_parts = recipient.split('.')
# db_id = recipient_parts[0]
# collection_id = recipient_parts[1]
# db = arango_server.db(db_id)
# if db:
# collection = create_collection(db, collection_id)
# collection.create_document(kwargs)
def get_user_data(user, id, request=None):
if not isinstance(user, str):
if not request:
request = get_current_request()
localizer = request.localizer
user_title = getattr(user, 'user_title', '')
user_title = localizer.translate(_(user_title)) \
if user_title else ''
return {
id+'_title': user_title,
id+'_last_name': getattr(user, 'last_name', ''),
id+'_first_name': getattr(user, 'first_name', ''),
}
return {
id+'_title': '',
id+'_last_name': '',
id+'_first_name': '',
}
def get_entity_data(entity, id, request=None):
if not request:
request = get_current_request()
def default_presentation_text(nb_characters=400):
return getattr(entity, 'description', "")[:nb_characters]+'...'
def default_get_url(request):
request.resource_url(entity, '@@index')
title = "The " + entity.__class__.__name__.lower()
entity_type = request.localizer.translate(_(title))
return {
id+'_title': getattr(entity, 'title', ''),
id+'_content': getattr(
entity, 'presentation_text', default_presentation_text)(),
id+'_url': getattr(
entity, 'get_url', default_get_url)(request),
id+'_oid': get_oid(entity, 'None'),
id+'_type': entity_type,
id+'_icon': getattr(entity, 'icon', ''),
}
def alert_comment_nia(context, request, root, **kwargs):
nia = root['principals']['users'].get('nia', None)
channel = context.channel
kind = kwargs.pop('internal_kind', None)
alert_class = INTERNAL_ALERTS.get(kind, None)
if nia and channel and alert_class:
# For Nia the alert is volatil
alert = alert_class(**kwargs)
alert.subject = context
comment_text = alert.render('nia', None, request).strip()
# remove spaces and new lines between tags
comment_text = re.sub('>[\n|\r|\s]*<', '><', comment_text)
comment = Comment(
intention=_('Remark'),
comment=comment_text
)
if isinstance(context, Commentable):
context.addtoproperty('comments', comment)
else:
channel.addtoproperty('comments', comment)
channel.add_comment(comment)
comment.format(request, True)
comment.formatted_comment = '<div class="bot-message">' + \
comment.formatted_comment +\
'</div>'
comment.state = PersistentList(['published'])
comment.reindex()
comment.setproperty('author', nia)
if kwargs.get('related_contents', []):
related_contents = kwargs.get('related_contents')
correlation = connect(
context,
list(related_contents),
{'comment': comment.comment,
'type': comment.intention},
nia,
unique=True)
comment.setproperty('related_correlation', correlation[0])
context.reindex()
def alert_email(senders=[], recipients=[], exclude=[], **kwargs):
"""
recipients: ['[email protected]']
"""
admin_example_mail = '[email protected]'
sender = senders[0]
subject = kwargs.get('subject', '')
mail = kwargs.get('body', None)
html = kwargs.get('html', None)
attachments = kwargs.get('attachments', [])
if admin_example_mail in recipients:
recipients.remove(admin_example_mail)
if recipients and (mail or html):
mailer_send(
subject=subject, body=mail,
html=html, attachments=attachments,
recipients=recipients, sender=sender)
def alert_sms(senders=[], recipients=[], exclude=[], **kwargs):
"""
recipients: ['+33....']
"""
message = kwargs.get('message', None)
request = kwargs.get('request', get_current_request())
for recipient in recipients:
to = normalize_us_phone_number(recipient)
send_sms(request, to, message)
def alert_internal(senders=[], recipients=[], exclude=[], **kwargs):
"""
recipients: [user1, user2],
kwargs: {'internal_kind': 'content_alert',...}
"""
kind = kwargs.pop('internal_kind', None)
alert_class = INTERNAL_ALERTS.get(kind, None)
if alert_class and recipients:
subjects = kwargs.pop('subjects', [])
sender = senders[0]
alert = alert_class(**kwargs)
sender.addtoproperty('alerts', alert)
alert.init_alert(recipients, subjects, exclude)
if getattr(sender, 'activate_push_notification', False):
app_id = getattr(sender, 'app_id')
app_key = getattr(sender, 'app_key')
def send_notification(players_ids, excluded_ids=[]):
subject = subjects[0] if subjects else sender
request = get_current_request()
user = get_current(request)
notification_data = alert_class.get_notification_data(
subject, user, request, alert)
header = {
"Content-Type": "application/json",
"authorization": "Basic " + app_key}
payload = {"app_id": app_id,
"headings": {"en": notification_data['title'],
"fr": notification_data['title']},
"contents": {"en": notification_data['message'],
"fr": notification_data['message']},
"url": notification_data['url']
}
if players_ids != 'all':
payload["include_player_ids"] = players_ids
else:
payload["included_segments"] = ['All']
# if excluded_ids:
# payload["excluded_player_ids"] = excluded_ids
try:
requests.post(
"https://onesignal.com/api/v1/notifications",
headers=header, data=json.dumps(payload), timeout=0.1)
except Exception as error:
log.warning(error)
if recipients != 'all':
players_ids = [getattr(user, 'notification_ids', [])
for user in recipients]
players_ids = [item for sublist in players_ids
for item in sublist]
if players_ids:
excluded_ids = [getattr(user, 'notification_ids', [])
for user in exclude]
excluded_ids = [item for sublist in excluded_ids
for item in sublist]
send_notification(players_ids, excluded_ids)
else:
send_notification('all')
def alert(kind="", senders=[], recipients=[], exclude=[], **kwargs):
alert_op = ALERTS.get(kind, None)
if alert_op:
try:
recipients = list(set(recipients)) if isinstance(recipients, (list, set)) else recipients
return alert_op(senders, recipients, exclude, **kwargs)
except Exception as error:
log.warning(error)
return None
log.warning("Alert kind {kind} not implemented".format(kind=kind))
return None
ALERTS = {
'internal': alert_internal,
# 'slack': alert_slack,
# 'arango': alert_arango,
'email': alert_email,
'sms': alert_sms
}
| agpl-3.0 | -1,736,134,962,268,818,200 | 34.310861 | 101 | 0.559504 | false |
zmarvel/playground | project6-hy/tests.py | 1 | 4016 | from hashtable import Hashtable
import time
###########################
########## Tests ##########
###########################
some_words = [u'lewes', # => 5
u'mistranscribe', # => 13
u'outbleed', # => 8
u'abstemiously', # => 12
u'antifeudal', # => 10
u'tableaux', # => 8
u'whine', # => 5
u'ytterbite', # => 9
u'redeemer'] # => 8
filename = "words.txt"
print(u'Reading words from file {}.'.format(filename))
most_words = []
start_time = time.time()
with open(filename) as f:
for line in f.readlines():
most_words.append(line.strip())
print(u'Read in {} words in {}s.'.format(len(most_words), time.time()-start_time))
def do_tests(T):
"""Run the tests for the Hashtable class.
For the example hashtable, we're mapping strings to integers. More
specifically, we're mapping words to the number of characters they have,
just for fun. The test function takes a Hashtable of words mapped to their
length, and at the end it adds a lot more of them to it.
"""
print(u'Starting hashtable tests!')
print(u'#####################')
print(u'')
print(u'Initial word list: {}'.format(some_words))
# test the constructor (which also uses __setitem__ and thereby __getitem__)
for word in some_words:
print(u'{} should map to {}.'.format(word, len(word)))
assert T[word] == len(word)
print(u'#####################')
print(u'')
print(u'Testing __setitem__ and __getitem__')
# test __setitem__ and __getitem__ some more
more_words = [u'nummulitic', u'proconviction', u'inscriber']
print(u'Adding more things to the table: {}'.format(more_words))
for word in more_words:
T[word] = len(word)
# make sure the original words are still there
for word in some_words:
print(u'{} should map to {}.'.format(word, len(word)))
assert T[word] == len(word)
# make sure the insertion actually worked
for word in more_words:
print(u'{} should map to {}.'.format(word, len(word)))
assert T[word] == len(word)
print(u'#####################')
print(u'')
# now delete the second list of words
print(u'Testing delete')
for word in more_words:
print(u'Delete key {}'.format(word))
del T[word]
# make sure the words in more_words aren't keys anymore
keys = T.keys()
print(u'Current list of keys: {}'.format(keys))
for word in more_words:
assert word not in keys
print(u'#####################')
print(u'')
# let's put them back in
for word in more_words:
print(u'Re-adding {}.'.format(word))
T[word] = len(word)
# make sure the list of keys contains all the words from both lists
keys = T.keys()
print(u'Current list of keys: {}'.format(keys))
for word in some_words:
assert word in keys
for word in more_words:
assert word in keys
print(u'#####################')
print(u'')
print(u'Now, let\'s make the table REALLY big!')
print(u'(In other words, let\'s test double() and quarter().)')
print(u'#####################')
print(u'')
print(u'Putting a bunch of words in the hashtable.')
start_time = time.time()
for word in most_words:
T[word] = len(word)
print(u'{} words inserted successfully in {}s.'.format(\
len(most_words),
time.time()-start_time))
print(u'Checking that the words and their values are actually there.')
for word in most_words:
l = len(word)
print(u'{}: {}'.format(word, l))
assert T[word] == l
print(u'Deleting a lot of items.')
for i, key in enumerate(T.keys()):
if i > 800:
break
else:
del T[key]
print(u'All tests passed!')
| mit | 9,035,777,160,325,980,000 | 33.324786 | 82 | 0.534363 | false |
joshua-cogliati-inl/raven | rook/XMLDiff.py | 1 | 19356 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This implements a test to compare two XML files.
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import sys
import os
import xml.etree.ElementTree as ET
from Tester import Differ
import DiffUtils as DU
#cswf Defined because otherwise lines of code get too long.
cswf = DU.compare_strings_with_floats
numTol = 1e-10 #effectively zero for our purposes
def find_branches(node, path, finished):
"""
Iterative process to convert XML tree into list of entries
@ In, node, ET.Element, whose children need sorting
@ In, path, list(ET.Element), leading to node
@ In, finished, list(list(ET.Element)), full entries
@ Out, finished, list(list(ET.Element)), of full entries
"""
for child in node:
npath = path[:]+[child]
if len(child) == 0:
finished.append(npath)
else:
finished = find_branches(child, npath, finished)
return finished
def tree_to_list(node):
"""
Converts XML tree to list of entries. Useful to start recursive search.
@ In, node, ET.Element, the xml tree root node to convert
@ Out, tree_to_list, list(list(ET.Element)), of full paths to entries in xml tree
"""
flattened = find_branches(node, [node], [])
return list(tuple(f) for f in flattened)
def compare_list_entry(a_list, b_list, **kwargs):
"""
Comparse flattened XML entries for equality
return bool is True if all tag, text, and attributes match, False otherwise
return qual is percent of matching terms
@ In, a_list, list(ET.Element), first set
@ In, b_list, list(ET.Element), second set
@ Out, compare_list_entry, (bool,val), results
"""
num_match = 0 #number of matching points between entries
total_matchable = 0 #total tag, text, and attributes available to match
match = True #True if entries match
diff = [] #tuple of (element, diff code, correct (a) value, test (b) value)
options = kwargs
for i in range(len(a_list)):
if i > len(b_list) - 1:
match = False
diff.append((b_list[-1], XMLDiff.missingChildNode, a_list[i].tag, None))
#could have matched the tag and attributes
total_matchable += 1 + len(a_list[i].attrib.keys())
#if text isn't empty, could have matched text, too
if a_list[i].text is not None and len(a_list[i].text.strip()) > 0:
total_matchable += 1
continue
a_item = a_list[i]
b_item = b_list[i]
#match tag
same, _ = cswf(a_item.tag, b_item.tag,
rel_err=options["rel_err"],
zero_threshold=options["zero_threshold"],
remove_whitespace=options["remove_whitespace"],
remove_unicode_identifier=options["remove_unicode_identifier"])
total_matchable += 1
if not same:
match = False
diff.append((b_item, XMLDiff.notMatchTag, a_item.tag, b_item.tag))
else:
num_match += 1
#match text
#if (a_item.text is None or len(a_item.text)>0) and (b_item.text is None or len(b_item.text)>0):
same, _ = cswf(a_item.text,
b_item.text,
rel_err=options["rel_err"],
zero_threshold=options["zero_threshold"],
remove_whitespace=options["remove_whitespace"],
remove_unicode_identifier=options["remove_unicode_identifier"])
if not same:
match = False
diff.append((b_item, XMLDiff.notMatchText, str(a_item.text), str(b_item.text)))
total_matchable += 1
else:
if not(a_item.text is None or a_item.text.strip() != ''):
num_match += 1
total_matchable += 1
#match attributes
for attrib in a_item.attrib.keys():
total_matchable += 1
if attrib not in b_item.attrib.keys():
match = False
diff.append((b_item, XMLDiff.missingAttribute, attrib, None))
continue
same, _ = cswf(a_item.attrib[attrib],
b_item.attrib[attrib],
rel_err=options["rel_err"],
zero_threshold=options["zero_threshold"],
remove_whitespace=options["remove_whitespace"],
remove_unicode_identifier=options["remove_unicode_identifier"])
if not same:
match = False
diff.append((b_item, XMLDiff.notMatchAttribute, (a_item, attrib), (b_item, attrib)))
else:
num_match += 1
#note attributes in b_item not in a_item
for attrib in b_item.attrib.keys():
if attrib not in a_item.attrib.keys():
match = False
diff.append((b_item, XMLDiff.extraAttribute, attrib, None))
total_matchable += 1
# note elements in b not in a
if len(b_list) > len(a_list):
match = False
i = len(a_list) - 1
for j in range(i, len(b_list)):
diff.append((a_list[-1], XMLDiff.extraChildNode, b_list[j].tag, None))
#count tag and attributes
total_matchable += 1 + len(b_list[j].attrib.keys())
#if text isn't empty, count text, too
if b_list[i].text is not None and len(b_list[i].text.strip()) > 0:
total_matchable += 1
return (match, float(num_match)/float(total_matchable), diff)
def compare_unordered_element(a_element, b_element, **kwargs):
"""
Compares two element trees and returns (same,message)
where same is true if they are the same,
and message is a list of the differences.
Uses list of tree entries to find best match, instead of climbing the tree
@ In, a_element, ET.Element, the first element
@ In, b_element, ET.Element, the second element
@ Out, compare_unordered_element, (bool,[string]), results of comparison
"""
same = True
message = []
options = kwargs
matchvals = {}
diffs = {}
DU.set_default_options(options)
def fail_message(*args):
"""
adds the fail message to the list
@ In, args, list, The arguments to the fail message (will be converted with str())
@ Out, fail_message, (bool,string), results
"""
print_args = []
print_args.extend(args)
args_expanded = " ".join([str(x) for x in print_args])
message.append(args_expanded)
if a_element.text != b_element.text:
succeeded, note = cswf(a_element.text,
b_element.text,
rel_err=options["rel_err"],
zero_threshold=options["zero_threshold"],
remove_whitespace=options["remove_whitespace"],
remove_unicode_identifier=options["remove_unicode_identifier"])
if not succeeded:
same = False
fail_message(note)
return (same, message)
a_list = tree_to_list(a_element)
b_list = tree_to_list(b_element)
#search a for matches in b
for a_entry in a_list:
matchvals[a_entry] = {}
diffs[a_entry] = {}
for b_entry in b_list:
same, matchval, diff = compare_list_entry(a_entry, b_entry, **options)
if same:
b_list.remove(b_entry)
del matchvals[a_entry]
del diffs[a_entry]
#since we found the match, remove from other near matches
for close_key in diffs:
if b_entry in diffs[close_key].keys():
del diffs[close_key][b_entry]
del matchvals[close_key][b_entry]
break
matchvals[a_entry][b_entry] = matchval
diffs[a_entry][b_entry] = diff
if len(matchvals) == 0: #all matches found
return (True, '')
note = ''
for unmatched, close in matchvals.items():
#print the path without a match
path = '/'.join(list(m.tag for m in unmatched))
note += 'No match for gold node {}\n'.format(path)
note += ' tag: {}\n'.format(unmatched[-1].tag)
note += ' attr: {}\n'.format(unmatched[-1].attrib)
note += ' text: {}\n'.format(unmatched[-1].text)
#print the tree of the nearest match
note += ' Nearest unused match: '
close = sorted(list(close.items()), key=lambda x: x[1], reverse=True)
if close:
closest = '/'.join(list(c.tag for c in close[0][0]))
else:
closest = '-none found-'
note += ' '+ closest +'\n'
#print what was different between them
if len(close):
diff = diffs[unmatched][close[0][0]]
for b_diff, code, right, miss in diff:
if b_diff is None:
b_diff = str(b_diff)
if code is None:
code = str(code)
if right is None:
right = str(right)
if miss is None:
miss = str(miss)
if code == XMLDiff.missingChildNode:
note += ' <'+b_diff.tag+'> is missing child node: <'+right+'> vs <'+miss+'>\n'
elif code == XMLDiff.missingAttribute:
note += ' <'+b_diff.tag+'> is missing attribute: "'+right+'"\n'
elif code == XMLDiff.extraChildNode:
note += ' <'+b_diff.tag+'> has extra child node: <'+right+'>\n'
elif code == XMLDiff.extraAttribute:
note += ' <'+b_diff.tag+'> has extra attribute: "'+right+\
'" = "'+b_diff.attrib[right]+'"\n'
elif code == XMLDiff.notMatchTag:
note += ' <'+b_diff.tag+'> tag does not match: <'+right+'> vs <'+miss+'>\n'
elif code == XMLDiff.notMatchAttribute:
note += ' <'+b_diff.tag+'> attribute does not match: "'+right[1]+\
'" = "'+right[0].attrib[right[1]]+'" vs "'+miss[0].attrib[miss[1]]+'"\n'
elif code == XMLDiff.notMatchText:
note += ' <'+b_diff.tag+'> text does not match: "'+right+'" vs "'+miss+'"\n'
else:
note += ' UNRECOGNIZED OPTION: "'+b_diff.tag+'" "'+str(code)+\
'": "'+str(right)+'" vs "'+str(miss)+'"\n'
return (False, [note])
def compare_ordered_element(a_element, b_element, *args, **kwargs):
"""
Compares two element trees and returns (same,message) where same is true
if they are the same, and message is a list of the differences
@ In, a_element, ET.Element, the first element tree
@ In, b_element, ET.Element, the second element tree
@ In, args, dict, arguments
@ In, kwargs, dict, keyword arguments
accepted args:
- none -
accepted kwargs:
path: a string to describe where the element trees are located (mainly
used recursively)
@ Out, compare_ordered_element, (bool,[string]), results of comparison
"""
same = True
message = []
options = kwargs
path = kwargs.get('path', '')
counter = kwargs.get('counter', 0)
DU.set_default_options(options)
def fail_message(*args):
"""
adds the fail message to the list
@ In, args, list, The arguments to the fail message (will be converted with str())
@ Out, fail_message, (bool,string), results
"""
print_args = [path]
print_args.extend(args)
args_expanded = " ".join([str(x) for x in print_args])
message.append(args_expanded)
if a_element.tag != b_element.tag:
same = False
fail_message("mismatch tags ", a_element.tag, b_element.tag)
else:
path += a_element.tag + "/"
if a_element.text != b_element.text:
succeeded, note = cswf(a_element.text,
b_element.text,
rel_err=options["rel_err"],
zero_threshold=options["zero_threshold"],
remove_whitespace=options["remove_whitespace"],
remove_unicode_identifier=options["remove_unicode_identifier"])
if not succeeded:
same = False
fail_message(note)
return (same, message)
different_keys = set(a_element.keys()).symmetric_difference(set(b_element.keys()))
same_keys = set(a_element.keys()).intersection(set(b_element.keys()))
if len(different_keys) != 0:
same = False
fail_message("mismatch attribute keys ", different_keys)
for key in same_keys:
if a_element.attrib[key] != b_element.attrib[key]:
same = False
fail_message("mismatch attribute ", key, a_element.attrib[key], b_element.attrib[key])
if len(a_element) != len(b_element):
same = False
fail_message("mismatch number of children ", len(a_element), len(b_element))
else:
if a_element.tag == b_element.tag:
#find all matching XML paths
#WARNING: this will mangle the XML, so other testing should happen above this!
found = []
for i in range(len(a_element)):
sub_options = dict(options)
sub_options["path"] = path
(same_child, _) = compare_ordered_element(a_element[i], b_element[i], *args, **sub_options)
if same_child:
found.append((a_element[i], b_element[i]))
same = same and same_child
#prune matches from trees
for children in found:
a_element.remove(children[0])
b_element.remove(children[1])
#once all pruning done, error on any remaining structure
if counter == 0: #on head now, recursion is finished
if len(a_element) > 0:
a_string = ET.tostring(a_element)
if len(a_string) > 80:
message.append('Branches in gold not matching test...\n{}'.format(path))
else:
message.append('Branches in gold not matching test...\n{} {}'.format(path, a_string))
if len(b_element) > 0:
b_string = ET.tostring(b_element)
if len(b_string) > 80:
message.append('Branches in test not matching gold...\n{}'.format(path))
else:
message.append('Branches in test not matching gold...\n{} {}'.format(path, b_string))
return (same, message)
class XMLDiff:
"""
XMLDiff is used for comparing xml files.
"""
#static codes for differences
missingChildNode = 0
missingAttribute = 1
extraChildNode = 2
extraAttribute = 3
notMatchTag = 4
notMatchAttribute = 5
notMatchText = 6
def __init__(self, out_files, gold_files, **kwargs):
"""
Create an XMLDiff class
@ In, testDir, string, the directory where the test takes place
@ In, out_files, List(string), the files to be compared.
@ In, gold_files, List(String), the gold files to be compared.
@ In, kwargs, dict, other arguments that may be included:
- 'unordered': indicates unordered sorting
@ Out, None
"""
assert len(out_files) == len(gold_files)
self.__out_files = out_files
self.__gold_files = gold_files
self.__messages = ""
self.__same = True
self.__options = kwargs
def diff(self):
"""
Run the comparison.
@ In, None
@ Out, diff, (bool,string), (same,messages) where same is true if all
the xml files are the same, and messages is a string with all the
differences.
"""
# read in files
for test_filename, gold_filename in zip(self.__out_files, self.__gold_files):
if not os.path.exists(test_filename):
self.__same = False
self.__messages += 'Test file does not exist: '+test_filename
elif not os.path.exists(gold_filename):
self.__same = False
self.__messages += 'Gold file does not exist: '+gold_filename
else:
files_read = True
try:
test_root = ET.parse(test_filename).getroot()
except Exception as exp:
files_read = False
self.__messages += 'Exception reading file '+test_filename+': '+str(exp.args)
try:
gold_root = ET.parse(gold_filename).getroot()
except Exception as exp:
files_read = False
self.__messages += 'Exception reading file '+gold_filename+': '+str(exp.args)
if files_read:
if 'unordered' in self.__options.keys() and self.__options['unordered']:
same, messages = compare_unordered_element(gold_root, test_root, **self.__options)
else:
same, messages = compare_ordered_element(test_root, gold_root, **self.__options)
if not same:
self.__same = False
separator = "\n"+" "*4
self.__messages += "Mismatch between "+test_filename+" and "+gold_filename+separator
self.__messages += separator.join(messages) + "\n"
else:
self.__same = False
if '[' in self.__messages or ']' in self.__messages:
self.__messages = self.__messages.replace('[', '(')
self.__messages = self.__messages.replace(']', ')')
return (self.__same, self.__messages)
class XML(Differ):
"""
This is the class to use for handling the XML block.
"""
@staticmethod
def get_valid_params():
"""
Return the valid parameters for this class.
@ In, None
@ Out, params, _ValidParameters, return the parameters.
"""
params = Differ.get_valid_params()
params.add_param('unordered', False, 'if true allow the tags in any order')
params.add_param('zero_threshold', sys.float_info.min*4.0, 'it represents '
+'the value below which a float is considered zero (XML comparison only)')
params.add_param('remove_whitespace', False,
'Removes whitespace before comparing xml node text if True')
params.add_param('remove_unicode_identifier', False,
'if true, then remove u infront of a single quote')
params.add_param('xmlopts', '', "Options for xml checking")
params.add_param('rel_err', '', 'Relative Error for csv files or floats in xml ones')
return params
def __init__(self, name, params, test_dir):
"""
Initializer for the class. Takes a String name and a dictionary params
@ In, name, string, name of the test.
@ In, params, dictionary, parameters for the class
@ In, test_dir, string, path to the test.
@ Out, None.
"""
Differ.__init__(self, name, params, test_dir)
self.__xmlopts = {}
if len(self.specs["rel_err"]) > 0:
self.__xmlopts['rel_err'] = float(self.specs["rel_err"])
self.__xmlopts['zero_threshold'] = float(self.specs["zero_threshold"])
self.__xmlopts['unordered'] = bool(self.specs["unordered"])
self.__xmlopts['remove_whitespace'] = bool(self.specs['remove_whitespace'])
self.__xmlopts['remove_unicode_identifier'] = self.specs['remove_unicode_identifier']
if len(self.specs['xmlopts']) > 0:
self.__xmlopts['xmlopts'] = self.specs['xmlopts'].split(' ')
def check_output(self):
"""
Checks that the output matches the gold.
returns (same, message) where same is true if the
test passes, or false if the test failes. message should
gives a human readable explaination of the differences.
@ In, None
@ Out, (same, message), same is true if the tests passes.
"""
xml_files = self._get_test_files()
gold_files = self._get_gold_files()
xml_diff = XMLDiff(xml_files, gold_files, **self.__xmlopts)
return xml_diff.diff()
| apache-2.0 | 3,820,883,646,590,924,000 | 39.493724 | 100 | 0.606685 | false |
openatv/enigma2 | lib/python/Plugins/Extensions/FileCommander/addons/dmnapi.py | 3 | 1533 | #!/usr/bin/python -u
# -*- coding: UTF-8 -*-
# napiprojekt.pl API is used with napiproject administration consent
import re
import os
import os.path
import sys
import dmnapim
def get_all(file, supplement):
rex = re.compile('.*\\.%s$' % file[-3:], re.I)
(dir, fname) = os.path.split(file)
for f in os.listdir(dir):
if os.path.exists(os.path.join(dir, f[:-4] + '.srt')) and supplement:
pass
else:
if rex.match(f):
try:
dmnapim.get_sub_from_napi(os.path.join(dir, f))
except:
print " Error: %s" % (sys.exc_info()[1])
try:
# opt fps videofile [subtitlefile]
opt = sys.argv[1]
try:
fps = float(sys.argv[2]) / 1000
except:
fps = 0
if opt == "get":
file = os.path.abspath(sys.argv[3])
dmnapim.get_sub_from_napi(file, fps=fps)
elif opt == "all" or opt == 'allnew':
file = os.path.abspath(sys.argv[3])
get_all(file, opt == "allnew")
elif opt == "convert":
file = os.path.abspath(sys.argv[3])
dmnapim.convert(file, sys.argv[4], fps=fps)
elif opt == "upgrade":
file = sys.argv[2]
x, ipk = os.path.split(file)
if os.path.exists("/usr/bin/opkg"):
do = "opkg install " + ipk
else:
do = "ipkg install " + ipk
print "Upgrade to:\n", file, "\n"
os.system("cd /tmp ; rm -f enigma2-plugin-extensions-dmnapi*.ipk ; opkg update && wget -c %s && ls -al enigma2-plugin-extensions-dmnapi*.ipk && %s" % (file, do))
elif opt == "n24":
file = os.path.abspath(sys.argv[3])
dmnapim.get_sub_from_n24(file, sys.argv[4], fps=fps)
except:
print " Error: %s" % (sys.exc_info()[1])
| gpl-2.0 | -2,545,383,978,789,383,700 | 26.872727 | 163 | 0.621657 | false |
itsvetkov/pyqtgraph | pyqtgraph/debug.py | 1 | 36061 | # -*- coding: utf-8 -*-
"""
debug.py - Functions to aid in debugging
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from __future__ import print_function
import sys, traceback, time, gc, re, types, weakref, inspect, os, cProfile, threading
from . import ptime
from numpy import ndarray
from .Qt import QtCore, QtGui
from .util.mutex import Mutex
from .util import cprint
__ftraceDepth = 0
def ftrace(func):
"""Decorator used for marking the beginning and end of function calls.
Automatically indents nested calls.
"""
def w(*args, **kargs):
global __ftraceDepth
pfx = " " * __ftraceDepth
print(pfx + func.__name__ + " start")
__ftraceDepth += 1
try:
rv = func(*args, **kargs)
finally:
__ftraceDepth -= 1
print(pfx + func.__name__ + " done")
return rv
return w
def warnOnException(func):
"""Decorator which catches/ignores exceptions and prints a stack trace."""
def w(*args, **kwds):
try:
func(*args, **kwds)
except:
printExc('Ignored exception:')
return w
def getExc(indent=4, prefix='| '):
tb = traceback.format_exc()
lines = []
for l in tb.split('\n'):
lines.append(" "*indent + prefix + l)
return '\n'.join(lines)
def printExc(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
print(exc)
print(" "*indent + prefix + '='*30 + '<<')
def printTrace(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented stack trace"""
trace = backtrace(1)
#exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
for line in trace.split('\n'):
print(" "*indent + prefix + " " + line)
print(" "*indent + prefix + '='*30 + '<<')
def backtrace(skip=0):
return ''.join(traceback.format_stack()[:-(skip+1)])
def listObjs(regex='Q', typ=None):
"""List all objects managed by python gc with class name matching regex.
Finds 'Q...' classes by default."""
if typ is not None:
return [x for x in gc.get_objects() if isinstance(x, typ)]
else:
return [x for x in gc.get_objects() if re.match(regex, type(x).__name__)]
def findRefPath(startObj, endObj, maxLen=8, restart=True, seen={}, path=None, ignore=None):
"""Determine all paths of object references from startObj to endObj"""
refs = []
if path is None:
path = [endObj]
if ignore is None:
ignore = {}
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
ignore[id(seen)] = None
prefix = " "*(8-maxLen)
#print prefix + str(map(type, path))
prefix += " "
if restart:
#gc.collect()
seen.clear()
gc.collect()
newRefs = [r for r in gc.get_referrers(endObj) if id(r) not in ignore]
ignore[id(newRefs)] = None
#fo = allFrameObjs()
#newRefs = []
#for r in gc.get_referrers(endObj):
#try:
#if r not in fo:
#newRefs.append(r)
#except:
#newRefs.append(r)
for r in newRefs:
#print prefix+"->"+str(type(r))
if type(r).__name__ in ['frame', 'function', 'listiterator']:
#print prefix+" FRAME"
continue
try:
if any([r is x for x in path]):
#print prefix+" LOOP", objChainString([r]+path)
continue
except:
print(r)
print(path)
raise
if r is startObj:
refs.append([r])
print(refPathString([startObj]+path))
continue
if maxLen == 0:
#print prefix+" END:", objChainString([r]+path)
continue
## See if we have already searched this node.
## If not, recurse.
tree = None
try:
cache = seen[id(r)]
if cache[0] >= maxLen:
tree = cache[1]
for p in tree:
print(refPathString(p+path))
except KeyError:
pass
ignore[id(tree)] = None
if tree is None:
tree = findRefPath(startObj, r, maxLen-1, restart=False, path=[r]+path, ignore=ignore)
seen[id(r)] = [maxLen, tree]
## integrate any returned results
if len(tree) == 0:
#print prefix+" EMPTY TREE"
continue
else:
for p in tree:
refs.append(p+[r])
#seen[id(r)] = [maxLen, refs]
return refs
def objString(obj):
"""Return a short but descriptive string for any object"""
try:
if type(obj) in [int, float]:
return str(obj)
elif isinstance(obj, dict):
if len(obj) > 5:
return "<dict {%s,...}>" % (",".join(list(obj.keys())[:5]))
else:
return "<dict {%s}>" % (",".join(list(obj.keys())))
elif isinstance(obj, str):
if len(obj) > 50:
return '"%s..."' % obj[:50]
else:
return obj[:]
elif isinstance(obj, ndarray):
return "<ndarray %s %s>" % (str(obj.dtype), str(obj.shape))
elif hasattr(obj, '__len__'):
if len(obj) > 5:
return "<%s [%s,...]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj[:5]]))
else:
return "<%s [%s]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj]))
else:
return "<%s %s>" % (type(obj).__name__, obj.__class__.__name__)
except:
return str(type(obj))
def refPathString(chain):
"""Given a list of adjacent objects in a reference path, print the 'natural' path
names (ie, attribute names, keys, and indexes) that follow from one object to the next ."""
s = objString(chain[0])
i = 0
while i < len(chain)-1:
#print " -> ", i
i += 1
o1 = chain[i-1]
o2 = chain[i]
cont = False
if isinstance(o1, list) or isinstance(o1, tuple):
if any([o2 is x for x in o1]):
s += "[%d]" % o1.index(o2)
continue
#print " not list"
if isinstance(o2, dict) and hasattr(o1, '__dict__') and o2 == o1.__dict__:
i += 1
if i >= len(chain):
s += ".__dict__"
continue
o3 = chain[i]
for k in o2:
if o2[k] is o3:
s += '.%s' % k
cont = True
continue
#print " not __dict__"
if isinstance(o1, dict):
try:
if o2 in o1:
s += "[key:%s]" % objString(o2)
continue
except TypeError:
pass
for k in o1:
if o1[k] is o2:
s += "[%s]" % objString(k)
cont = True
continue
#print " not dict"
#for k in dir(o1): ## Not safe to request attributes like this.
#if getattr(o1, k) is o2:
#s += ".%s" % k
#cont = True
#continue
#print " not attr"
if cont:
continue
s += " ? "
sys.stdout.flush()
return s
def objectSize(obj, ignore=None, verbose=False, depth=0, recursive=False):
"""Guess how much memory an object is using"""
ignoreTypes = [types.MethodType, types.UnboundMethodType, types.BuiltinMethodType, types.FunctionType, types.BuiltinFunctionType]
ignoreRegex = re.compile('(method-wrapper|Flag|ItemChange|Option|Mode)')
if ignore is None:
ignore = {}
indent = ' '*depth
try:
hash(obj)
hsh = obj
except:
hsh = "%s:%d" % (str(type(obj)), id(obj))
if hsh in ignore:
return 0
ignore[hsh] = 1
try:
size = sys.getsizeof(obj)
except TypeError:
size = 0
if isinstance(obj, ndarray):
try:
size += len(obj.data)
except:
pass
if recursive:
if type(obj) in [list, tuple]:
if verbose:
print(indent+"list:")
for o in obj:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', s)
size += s
elif isinstance(obj, dict):
if verbose:
print(indent+"list:")
for k in obj:
s = objectSize(obj[k], ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', k, s)
size += s
#elif isinstance(obj, QtCore.QObject):
#try:
#childs = obj.children()
#if verbose:
#print indent+"Qt children:"
#for ch in childs:
#s = objectSize(obj, ignore=ignore, verbose=verbose, depth=depth+1)
#size += s
#if verbose:
#print indent + ' +', ch.objectName(), s
#except:
#pass
#if isinstance(obj, types.InstanceType):
gc.collect()
if verbose:
print(indent+'attrs:')
for k in dir(obj):
if k in ['__dict__']:
continue
o = getattr(obj, k)
if type(o) in ignoreTypes:
continue
strtyp = str(type(o))
if ignoreRegex.search(strtyp):
continue
#if isinstance(o, types.ObjectType) and strtyp == "<type 'method-wrapper'>":
#continue
#if verbose:
#print indent, k, '?'
refs = [r for r in gc.get_referrers(o) if type(r) != types.FrameType]
if len(refs) == 1:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
size += s
if verbose:
print(indent + " +", k, s)
#else:
#if verbose:
#print indent + ' -', k, len(refs)
return size
class GarbageWatcher(object):
"""
Convenient dictionary for holding weak references to objects.
Mainly used to check whether the objects have been collect yet or not.
Example:
gw = GarbageWatcher()
gw['objName'] = obj
gw['objName2'] = obj2
gw.check()
"""
def __init__(self):
self.objs = weakref.WeakValueDictionary()
self.allNames = []
def add(self, obj, name):
self.objs[name] = obj
self.allNames.append(name)
def __setitem__(self, name, obj):
self.add(obj, name)
def check(self):
"""Print a list of all watched objects and whether they have been collected."""
gc.collect()
dead = self.allNames[:]
alive = []
for k in self.objs:
dead.remove(k)
alive.append(k)
print("Deleted objects:", dead)
print("Live objects:", alive)
def __getitem__(self, item):
return self.objs[item]
class Profiler(object):
"""Simple profiler allowing measurement of multiple time intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `PYQTGRAPHPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
"""
_profilers = os.environ.get("PYQTGRAPHPROFILE", None)
_profilers = _profilers.split(",") if _profilers is not None else []
_depth = 0
_msgs = []
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if disabled is True or (disabled=='env' and len(cls._profilers) == 0):
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[-1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if disabled=='env' and func_qualname not in cls._profilers: # don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = ptime.time()
obj._newMsg("> Entering " + obj._name)
return obj
#else:
#def __new__(cls, delayed=True):
#return lambda msg=None: None
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = ptime.time()
self._newMsg(" %s: %0.4f ms",
msg, (newTime - self._lastTime) * 1000)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished:
return
self._finished = True
if msg is not None:
self(msg)
self._newMsg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0]%m[1] for m in self._msgs]))
type(self)._msgs = []
def profile(code, name='profile_run', sort='cumulative', num=30):
"""Common-use for cProfile"""
cProfile.run(code, name)
stats = pstats.Stats(name)
stats.sort_stats(sort)
stats.print_stats(num)
return stats
#### Code for listing (nearly) all objects in the known universe
#### http://utcc.utoronto.ca/~cks/space/blog/python/GetAllObjects
# Recursively expand slist's objects
# into olist, using seen to track
# already processed objects.
def _getr(slist, olist, first=True):
i = 0
for e in slist:
oid = id(e)
typ = type(e)
if oid in olist or typ is int: ## or e in olist: ## since we're excluding all ints, there is no longer a need to check for olist keys
continue
olist[oid] = e
if first and (i%1000) == 0:
gc.collect()
tl = gc.get_referents(e)
if tl:
_getr(tl, olist, first=False)
i += 1
# The public function.
def get_all_objects():
"""Return a list of all live Python objects (excluding int and long), not including the list itself."""
gc.collect()
gcl = gc.get_objects()
olist = {}
_getr(gcl, olist)
del olist[id(olist)]
del olist[id(gcl)]
del olist[id(sys._getframe())]
return olist
def lookup(oid, objects=None):
"""Return an object given its ID, if it exists."""
if objects is None:
objects = get_all_objects()
return objects[oid]
class ObjTracker(object):
"""
Tracks all objects under the sun, reporting the changes between snapshots: what objects are created, deleted, and persistent.
This class is very useful for tracking memory leaks. The class goes to great (but not heroic) lengths to avoid tracking
its own internal objects.
Example:
ot = ObjTracker() # takes snapshot of currently existing objects
... do stuff ...
ot.diff() # prints lists of objects created and deleted since ot was initialized
... do stuff ...
ot.diff() # prints lists of objects created and deleted since last call to ot.diff()
# also prints list of items that were created since initialization AND have not been deleted yet
# (if done correctly, this list can tell you about objects that were leaked)
arrays = ot.findPersistent('ndarray') ## returns all objects matching 'ndarray' (string match, not instance checking)
## that were considered persistent when the last diff() was run
describeObj(arrays[0]) ## See if we can determine who has references to this array
"""
allObjs = {} ## keep track of all objects created and stored within class instances
allObjs[id(allObjs)] = None
def __init__(self):
self.startRefs = {} ## list of objects that exist when the tracker is initialized {oid: weakref}
## (If it is not possible to weakref the object, then the value is None)
self.startCount = {}
self.newRefs = {} ## list of objects that have been created since initialization
self.persistentRefs = {} ## list of objects considered 'persistent' when the last diff() was called
self.objTypes = {}
ObjTracker.allObjs[id(self)] = None
self.objs = [self.__dict__, self.startRefs, self.startCount, self.newRefs, self.persistentRefs, self.objTypes]
self.objs.append(self.objs)
for v in self.objs:
ObjTracker.allObjs[id(v)] = None
self.start()
def findNew(self, regex):
"""Return all objects matching regex that were considered 'new' when the last diff() was run."""
return self.findTypes(self.newRefs, regex)
def findPersistent(self, regex):
"""Return all objects matching regex that were considered 'persistent' when the last diff() was run."""
return self.findTypes(self.persistentRefs, regex)
def start(self):
"""
Remember the current set of objects as the comparison for all future calls to diff()
Called automatically on init, but can be called manually as well.
"""
refs, count, objs = self.collect()
for r in self.startRefs:
self.forgetRef(self.startRefs[r])
self.startRefs.clear()
self.startRefs.update(refs)
for r in refs:
self.rememberRef(r)
self.startCount.clear()
self.startCount.update(count)
#self.newRefs.clear()
#self.newRefs.update(refs)
def diff(self, **kargs):
"""
Compute all differences between the current object set and the reference set.
Print a set of reports for created, deleted, and persistent objects
"""
refs, count, objs = self.collect() ## refs contains the list of ALL objects
## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)
delRefs = {}
for i in self.startRefs.keys():
if i not in refs:
delRefs[i] = self.startRefs[i]
del self.startRefs[i]
self.forgetRef(delRefs[i])
for i in self.newRefs.keys():
if i not in refs:
delRefs[i] = self.newRefs[i]
del self.newRefs[i]
self.forgetRef(delRefs[i])
#print "deleted:", len(delRefs)
## Which refs have appeared since call to start() or diff()
persistentRefs = {} ## created since start(), but before last diff()
createRefs = {} ## created since last diff()
for o in refs:
if o not in self.startRefs:
if o not in self.newRefs:
createRefs[o] = refs[o] ## object has been created since last diff()
else:
persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)
#print "new:", len(newRefs)
## self.newRefs holds the entire set of objects created since start()
for r in self.newRefs:
self.forgetRef(self.newRefs[r])
self.newRefs.clear()
self.newRefs.update(persistentRefs)
self.newRefs.update(createRefs)
for r in self.newRefs:
self.rememberRef(self.newRefs[r])
#print "created:", len(createRefs)
## self.persistentRefs holds all objects considered persistent.
self.persistentRefs.clear()
self.persistentRefs.update(persistentRefs)
print("----------- Count changes since start: ----------")
c1 = count.copy()
for k in self.startCount:
c1[k] = c1.get(k, 0) - self.startCount[k]
typs = list(c1.keys())
typs.sort(lambda a,b: cmp(c1[a], c1[b]))
for t in typs:
if c1[t] == 0:
continue
num = "%d" % c1[t]
print(" " + num + " "*(10-len(num)) + str(t))
print("----------- %d Deleted since last diff: ------------" % len(delRefs))
self.report(delRefs, objs, **kargs)
print("----------- %d Created since last diff: ------------" % len(createRefs))
self.report(createRefs, objs, **kargs)
print("----------- %d Created since start (persistent): ------------" % len(persistentRefs))
self.report(persistentRefs, objs, **kargs)
def __del__(self):
self.startRefs.clear()
self.startCount.clear()
self.newRefs.clear()
self.persistentRefs.clear()
del ObjTracker.allObjs[id(self)]
for v in self.objs:
del ObjTracker.allObjs[id(v)]
@classmethod
def isObjVar(cls, o):
return type(o) is cls or id(o) in cls.allObjs
def collect(self):
print("Collecting list of all objects...")
gc.collect()
objs = get_all_objects()
frame = sys._getframe()
del objs[id(frame)] ## ignore the current frame
del objs[id(frame.f_code)]
ignoreTypes = [int]
refs = {}
count = {}
for k in objs:
o = objs[k]
typ = type(o)
oid = id(o)
if ObjTracker.isObjVar(o) or typ in ignoreTypes:
continue
try:
ref = weakref.ref(obj)
except:
ref = None
refs[oid] = ref
typ = type(o)
typStr = typeStr(o)
self.objTypes[oid] = typStr
ObjTracker.allObjs[id(typStr)] = None
count[typ] = count.get(typ, 0) + 1
print("All objects: %d Tracked objects: %d" % (len(objs), len(refs)))
return refs, count, objs
def forgetRef(self, ref):
if ref is not None:
del ObjTracker.allObjs[id(ref)]
def rememberRef(self, ref):
## Record the address of the weakref object so it is not included in future object counts.
if ref is not None:
ObjTracker.allObjs[id(ref)] = None
def lookup(self, oid, ref, objs=None):
if ref is None or ref() is None:
try:
obj = lookup(oid, objects=objs)
except:
obj = None
else:
obj = ref()
return obj
def report(self, refs, allobjs=None, showIDs=False):
if allobjs is None:
allobjs = get_all_objects()
count = {}
rev = {}
for oid in refs:
obj = self.lookup(oid, refs[oid], allobjs)
if obj is None:
typ = "[del] " + self.objTypes[oid]
else:
typ = typeStr(obj)
if typ not in rev:
rev[typ] = []
rev[typ].append(oid)
c = count.get(typ, [0,0])
count[typ] = [c[0]+1, c[1]+objectSize(obj)]
typs = list(count.keys())
typs.sort(lambda a,b: cmp(count[a][1], count[b][1]))
for t in typs:
line = " %d\t%d\t%s" % (count[t][0], count[t][1], t)
if showIDs:
line += "\t"+",".join(map(str,rev[t]))
print(line)
def findTypes(self, refs, regex):
allObjs = get_all_objects()
ids = {}
objs = []
r = re.compile(regex)
for k in refs:
if r.search(self.objTypes[k]):
objs.append(self.lookup(k, refs[k], allObjs))
return objs
def describeObj(obj, depth=4, path=None, ignore=None):
"""
Trace all reference paths backward, printing a list of different ways this object can be accessed.
Attempts to answer the question "who has a reference to this object"
"""
if path is None:
path = [obj]
if ignore is None:
ignore = {} ## holds IDs of objects used within the function.
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
printed=False
for ref in refs:
if id(ref) in ignore:
continue
if id(ref) in list(map(id, path)):
print("Cyclic reference: " + refPathString([ref]+path))
printed = True
continue
newPath = [ref]+path
if len(newPath) >= depth:
refStr = refPathString(newPath)
if '[_]' not in refStr: ## ignore '_' references generated by the interactive shell
print(refStr)
printed = True
else:
describeObj(ref, depth, newPath, ignore)
printed = True
if not printed:
print("Dead end: " + refPathString(path))
def typeStr(obj):
"""Create a more useful type string by making <instance> types report their class."""
typ = type(obj)
if typ == types.InstanceType:
return "<instance of %s>" % obj.__class__.__name__
else:
return str(typ)
def searchRefs(obj, *args):
"""Pseudo-interactive function for tracing references backward.
**Arguments:**
obj: The initial object from which to start searching
args: A set of string or int arguments.
each integer selects one of obj's referrers to be the new 'obj'
each string indicates an action to take on the current 'obj':
t: print the types of obj's referrers
l: print the lengths of obj's referrers (if they have __len__)
i: print the IDs of obj's referrers
o: print obj
ro: return obj
rr: return list of obj's referrers
Examples::
searchRefs(obj, 't') ## Print types of all objects referring to obj
searchRefs(obj, 't', 0, 't') ## ..then select the first referrer and print the types of its referrers
searchRefs(obj, 't', 0, 't', 'l') ## ..also print lengths of the last set of referrers
searchRefs(obj, 0, 1, 'ro') ## Select index 0 from obj's referrer, then select index 1 from the next set of referrers, then return that object
"""
ignore = {id(sys._getframe()): None}
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
for a in args:
#fo = allFrameObjs()
#refs = [r for r in refs if r not in fo]
if type(a) is int:
obj = refs[a]
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
elif a == 't':
print(list(map(typeStr, refs)))
elif a == 'i':
print(list(map(id, refs)))
elif a == 'l':
def slen(o):
if hasattr(o, '__len__'):
return len(o)
else:
return None
print(list(map(slen, refs)))
elif a == 'o':
print(obj)
elif a == 'ro':
return obj
elif a == 'rr':
return refs
def allFrameObjs():
"""Return list of frame objects in current stack. Useful if you want to ignore these objects in refernece searches"""
f = sys._getframe()
objs = []
while f is not None:
objs.append(f)
objs.append(f.f_code)
#objs.append(f.f_locals)
#objs.append(f.f_globals)
#objs.append(f.f_builtins)
f = f.f_back
return objs
def findObj(regex):
"""Return a list of objects whose typeStr matches regex"""
allObjs = get_all_objects()
objs = []
r = re.compile(regex)
for i in allObjs:
obj = allObjs[i]
if r.search(typeStr(obj)):
objs.append(obj)
return objs
def listRedundantModules():
"""List modules that have been imported more than once via different paths."""
mods = {}
for name, mod in sys.modules.items():
if not hasattr(mod, '__file__'):
continue
mfile = os.path.abspath(mod.__file__)
if mfile[-1] == 'c':
mfile = mfile[:-1]
if mfile in mods:
print("module at %s has 2 names: %s, %s" % (mfile, name, mods[mfile]))
else:
mods[mfile] = name
def walkQObjectTree(obj, counts=None, verbose=False, depth=0):
"""
Walk through a tree of QObjects, doing nothing to them.
The purpose of this function is to find dead objects and generate a crash
immediately rather than stumbling upon them later.
Prints a count of the objects encountered, for fun. (or is it?)
"""
if verbose:
print(" "*depth + typeStr(obj))
report = False
if counts is None:
counts = {}
report = True
typ = str(type(obj))
try:
counts[typ] += 1
except KeyError:
counts[typ] = 1
for child in obj.children():
walkQObjectTree(child, counts, verbose, depth+1)
return counts
QObjCache = {}
def qObjectReport(verbose=False):
"""Generate a report counting all QObjects and their types"""
global qObjCache
count = {}
for obj in findObj('PyQt'):
if isinstance(obj, QtCore.QObject):
oid = id(obj)
if oid not in QObjCache:
QObjCache[oid] = typeStr(obj) + " " + obj.objectName()
try:
QObjCache[oid] += " " + obj.parent().objectName()
QObjCache[oid] += " " + obj.text()
except:
pass
print("check obj", oid, str(QObjCache[oid]))
if obj.parent() is None:
walkQObjectTree(obj, count, verbose)
typs = list(count.keys())
typs.sort()
for t in typs:
print(count[t], "\t", t)
class PrintDetector(object):
def __init__(self):
self.stdout = sys.stdout
sys.stdout = self
def remove(self):
sys.stdout = self.stdout
def __del__(self):
self.remove()
def write(self, x):
self.stdout.write(x)
traceback.print_stack()
def flush(self):
self.stdout.flush()
class PeriodicTrace(object):
"""
Used to debug freezing by starting a new thread that reports on the
location of the main thread periodically.
"""
class ReportThread(QtCore.QThread):
def __init__(self):
self.frame = None
self.ind = 0
self.lastInd = None
self.lock = Mutex()
QtCore.QThread.__init__(self)
def notify(self, frame):
with self.lock:
self.frame = frame
self.ind += 1
def run(self):
while True:
time.sleep(1)
with self.lock:
if self.lastInd != self.ind:
print("== Trace %d: ==" % self.ind)
traceback.print_stack(self.frame)
self.lastInd = self.ind
def __init__(self):
self.mainThread = threading.current_thread()
self.thread = PeriodicTrace.ReportThread()
self.thread.start()
sys.settrace(self.trace)
def trace(self, frame, event, arg):
if threading.current_thread() is self.mainThread: # and 'threading' not in frame.f_code.co_filename:
self.thread.notify(frame)
# print("== Trace ==", event, arg)
# traceback.print_stack(frame)
return self.trace
class ThreadColor(object):
"""
Wrapper on stdout/stderr that colors text by the current thread ID.
*stream* must be 'stdout' or 'stderr'.
"""
colors = {}
lock = Mutex()
def __init__(self, stream):
self.stream = getattr(sys, stream)
self.err = stream == 'stderr'
setattr(sys, stream, self)
def write(self, msg):
with self.lock:
cprint.cprint(self.stream, self.color(), msg, -1, stderr=self.err)
def flush(self):
with self.lock:
self.stream.flush()
def color(self):
tid = threading.current_thread()
if tid not in self.colors:
c = (len(self.colors) % 15) + 1
self.colors[tid] = c
return self.colors[tid]
| mit | 1,375,255,485,171,331,300 | 32.670401 | 161 | 0.522254 | false |
cloudbase/maas | src/maasserver/migrations/0010_add_node_netboot.py | 1 | 11133 | # flake8: noqa
# SKIP this file when reformatting.
# The rest of this file was generated by South.
# encoding: utf-8
import datetime
from django.db import models
from maasserver.enum import NODE_STATUS
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Node.netboot'
db.add_column(u'maasserver_node', 'netboot', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
# Find all the allocated nodes with netboot=True.
allocated_nodes = orm['maasserver.node'].objects.filter(
status=NODE_STATUS.ALLOCATED, netboot=True)
# Set netboot=False on these nodes.
allocated_nodes.update(netboot=False)
def backwards(self, orm):
# Deleting field 'Node.netboot'
db.delete_column(u'maasserver_node', 'netboot')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maasserver.config': {
'Meta': {'object_name': 'Config'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('maasserver.fields.JSONObjectField', [], {'null': 'True'})
},
u'maasserver.filestorage': {
'Meta': {'object_name': 'FileStorage'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'filename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'maasserver.macaddress': {
'Meta': {'object_name': 'MACAddress'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_address': ('maasserver.fields.MACAddressField', [], {'unique': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.node': {
'Meta': {'object_name': 'Node'},
'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386'", 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}),
'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}),
'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-20250ca0-b8f4-11e1-afce-002215205ce8'", 'unique': 'True', 'max_length': '41'}),
'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.nodegroup': {
'Meta': {'object_name': 'NodeGroup'},
'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}),
'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}),
'broadcast_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_range_high': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'ip_range_low': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '80'}),
'router_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'subnet_mask': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'worker_ip': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'})
},
u'maasserver.sshkey': {
'Meta': {'unique_together': "((u'user', u'key'),)", 'object_name': 'SSHKey'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'maasserver.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'piston.consumer': {
'Meta': {'object_name': 'Consumer'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"})
},
'piston.token': {
'Meta': {'object_name': 'Token'},
'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1339989444L'}),
'token_type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}),
'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['maasserver']
| agpl-3.0 | -3,308,740,581,718,270,500 | 68.149068 | 182 | 0.548729 | false |
gusDuarte/software-center-5.2 | softwarecenter/config.py | 1 | 2669 | # Copyright (C) 20011 Canonical
#
# Authors:
# Andrew Higginson
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# py3 compat
try:
from configparser import SafeConfigParser
SafeConfigParser # pyflakes
except ImportError:
from ConfigParser import SafeConfigParser
import os
import logging
from paths import SOFTWARE_CENTER_CONFIG_FILE
LOG = logging.getLogger(__name__)
class SoftwareCenterConfig(SafeConfigParser):
def __init__(self, config):
SafeConfigParser.__init__(self)
from utils import safe_makedirs
safe_makedirs(os.path.dirname(config))
# we always want this section, even on fresh installs
self.add_section("general")
# read the config
self.configfile = config
try:
self.read(self.configfile)
except Exception as e:
# don't crash on a corrupted config file
LOG.warn("Could not read the config file '%s': %s",
self.configfile, e)
pass
def write(self):
tmpname = self.configfile + ".new"
# see LP: #996333, its ok to remove the old configfile as
# its rewritten anyway
from utils import ensure_file_writable_and_delete_if_not
ensure_file_writable_and_delete_if_not(tmpname)
ensure_file_writable_and_delete_if_not(self.configfile)
try:
f = open(tmpname, "w")
SafeConfigParser.write(self, f)
f.close()
os.rename(tmpname, self.configfile)
except Exception as e:
# don't crash if there's an error when writing to the config file
# (LP: #996333)
LOG.warn("Could not write the config file '%s': %s",
self.configfile, e)
pass
_software_center_config = None
def get_config(filename=SOFTWARE_CENTER_CONFIG_FILE):
""" get the global config class """
global _software_center_config
if not _software_center_config:
_software_center_config = SoftwareCenterConfig(filename)
return _software_center_config
| lgpl-3.0 | -5,625,440,615,949,773,000 | 33.217949 | 79 | 0.66242 | false |
veteman/thepython2blob | logicanalyser2fss.py | 1 | 5278 | #Copyright 2015 B. Johan G. Svensson
#Licensed under the terms of the MIT license (see LICENSE).
from __future__ import division
import struct, time, csv
import fsslib
class CSVWriter():
def __init__(self, fid, samplerate):
pass
class CSVReader():
def __init__(self, fid, samplerate):
self.samplerate = samplerate
self.csvfile = csv.reader(fid, skipinitialspace = True,strict=True)
self.csvfile.next() # Waste header
def getnext(self):
try:
linelst = self.csvfile.next()
except:
return False, False, False, False, False, False
dtime = int(round(float(linelst[0]) * self.samplerate))
data = linelst[1] == '1' # int() is too slow
indx = linelst[2] == '1' # int() is too slow
step = linelst[3] == '1' # int() is too slow
side = linelst[4] == '1' # int() is too slow
oper = linelst[5] == '1' # int() is too slow
return dtime, data, indx, step, side, oper
class BinDeltaWrite():
def __init__(self, fid, samplerate):
pass
class BinDeltaWriter():
def __init__(self, fid):
self.fid = fid
def write(self, dtime, data, indx, step, side, oper):
bitfield = data | (indx << 1) | (step << 2) | (side << 3) | (oper << 4)
data = struct.pack('<QB', dtime, bitfield)
self.fid.write(data)
class BinDeltaReader():
def __init__(self, fid):
self.fid = fid
def getnext(self):
data = self.fid.read(9)
if len(data) < 9:
return False, False, False, False, False, False
dtime, bitfield = struct.unpack('<QB',data)
data = bitfield & 1
indx = (bitfield >> 1) & 1
step = (bitfield >> 2) & 1
side = (bitfield >> 3) & 1
oper = (bitfield >> 4) & 1
return dtime, data, indx, step, side, oper
class BinDeltaReader2():
def __init__(self, fid, blocksize):
self.fid = fid
self.indx = 0
self.data = ''
self.datatuple = tuple()
self.blocksize = blocksize
def fillbuffer(self):
self.data = self.fid.read(9*self.blocksize)
length = len(self.data)//9
self.datatuple = struct.unpack('<' + length*'QB',self.data)
self.indx = 0
def getnext(self):
if self.indx + 1 >= len(self.datatuple):
self.fillbuffer()
if len(self.data) < 9:
return False, False, False, False, False, False
dtime = self.datatuple[self.indx]
bitfield = self.datatuple[self.indx + 1]
data = bitfield & 1
indx = (bitfield >> 1) & 1
step = (bitfield >> 2) & 1
side = (bitfield >> 3) & 1
oper = (bitfield >> 4) & 1
self.indx += 2
return dtime, data, indx, step, side, oper
def converttofss(fnamein,fnameout):
samplerate = 25000000
fin = open(fnamein,'rb')
fin.seek(0,2)
size = fin.tell()
fin.seek(0)
print 'Init readers'
#reader = CSVReader(fin, samplerate)
#reader = BinDeltaReader(fin)
reader = BinDeltaReader2(fin, 10000)
print 'Init buffers'
packer = fsslib.Pacman(samplerate)
print 'Init done'
howfar = 1
tim1sum = 0
tim2sum = 0
tim3sum = 0
tim4sum = 0
timtot = 0
howfar2 = 1
tic3 = time.clock()
while True:
tic0 = time.clock()
dtime, data, indx, step, side, oper = reader.getnext()
if not dtime:
break
tic1 = time.clock()
packer.store(dtime, data, indx, step, side, oper)
tic2 = time.clock()
howfar += 1
if howfar > 1000000:
print str(100*fin.tell()/size) + '%'
howfar = 1
print 'Time phase 1', tim1sum
print 'Time phase 2', tim2sum
print 'Time phase 3', tim3sum
print 'Time phase 4', tim4sum
print 'Time total', timtot
howfar2 += 1
tic3last = tic3
tic3 = time.clock()
tim1sum += tic1 - tic0
tim2sum += tic2 - tic1
tim3sum += tic3 - tic2
tim4sum += tic0 - tic3last
timtot += tic3 - tic3last
fin.close()
print 'Pack: Start'
outstr = packer.commit()
print 'Pack: Saving to file'
fout = open(fnameout,'wb')
fout.write(outstr)
fout.close()
print 'All done'
def convertfrfss(fnamein, fnameout):
fin = open(fnamein, 'rb')
print 'Reading file'
unpacker = fsslib.Streamer(fin)
samplerate = unpacker.samplerate
fin.close()
fout = open(fnameout, 'wb')
writer = BinDeltaWriter(fout)
print 'Decoding (will take LONG time!)'
while True:
stim, data, indx, step, side, oper = unpacker.getnext()
if stim is False:
break
writer.write(stim, data, indx, step, side, oper)
def test01():
converttofss('j:\\Transfer\\Midwinter-64bitDELTA8bit25MHz.bin', 'j:\\Transfer\\Midwinter.fss')
def test02():
convertfrfss('j:\\Transfer\\Midwinter.fss', 'j:\\Transfer\\Midwinter-DELTATEST.bin')
def main():
#test01()
test02()
if __name__ == '__main__':
main()
| mit | 2,724,560,461,482,590,700 | 28.333333 | 98 | 0.541872 | false |
laurentb/weboob | modules/bouygues/module.py | 1 | 2730 | # -*- coding: utf-8 -*-
# Copyright(C) 2019 Budget Insight
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.tools.backend import Module, BackendConfig
from weboob.capabilities.base import find_object
from weboob.capabilities.bill import CapDocument, Document, SubscriptionNotFound, Subscription, DocumentNotFound
from weboob.tools.value import Value, ValueBackendPassword
from .browser import BouyguesBrowser
__all__ = ['BouyguesModule']
class BouyguesModule(Module, CapDocument):
NAME = 'bouygues'
DESCRIPTION = 'Bouygues Télécom'
MAINTAINER = 'Florian Duguet'
EMAIL = '[email protected]'
LICENSE = 'LGPLv3+'
VERSION = '2.1'
CONFIG = BackendConfig(Value('login', label='Numéro de mobile, de clé/tablette ou e-mail en @bbox.fr'),
ValueBackendPassword('password', label='Mot de passe'),
ValueBackendPassword('lastname', label='Nom de famille', default=''))
BROWSER = BouyguesBrowser
def create_default_browser(self):
return self.create_browser(self.config['login'].get(), self.config['password'].get(), self.config['lastname'].get())
def iter_subscription(self):
return self.browser.iter_subscriptions()
def get_subscription(self, _id):
return find_object(self.iter_subscription(), id=_id, error=SubscriptionNotFound)
def iter_documents(self, subscription):
if not isinstance(subscription, Subscription):
subscription = self.get_subscription(subscription)
return self.browser.iter_documents(subscription)
def get_document(self, _id):
subid = _id.rsplit('_', 1)[0]
subscription = self.get_subscription(subid)
return find_object(self.iter_documents(subscription), id=_id, error=DocumentNotFound)
def download_document(self, document):
if not isinstance(document, Document):
document = self.get_document(document)
return self.browser.download_document(document)
| lgpl-3.0 | 8,566,490,347,002,986,000 | 39.088235 | 124 | 0.710565 | false |
sempliva/Emotly | emotly/tests/test_model_user.py | 1 | 6285 | """
MIT License
Copyright (c) 2016 Emotly Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# User Model Test Case
import unittest
import datetime
import time
from mongoengine import ValidationError, NotUniqueError
from emotly import app
from emotly import constants as CONSTANTS
from emotly.models import User
# Model: User.
class UserModelTestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def tearDown(self):
User.objects.delete()
def test_create_user(self):
u = User(nickname='testcreateuser',
email='[email protected]',
password="FakeUserPassword123", salt="salt")
self.assertTrue(u.save())
def test_create_3_user(self):
u = User(nickname='testemotly',
email='[email protected]',
password="FakeUserPassword123",
confirmed_email=True,
last_login=datetime.datetime.now(),
salt="salt")
u.save()
time.sleep(1) # sleep time in seconds
u1 = User(nickname='testemotly1',
email='[email protected]',
password="FakeUserPassword123",
confirmed_email=True,
last_login=datetime.datetime.now(),
salt="salt")
u1.save()
time.sleep(1) # sleep time in seconds
u2 = User(nickname='testemotly2',
email='[email protected]',
password="FakeUserPassword123",
confirmed_email=True,
last_login=datetime.datetime.now(),
salt="salt")
u2.save()
self.assertNotEqual(u.created_at, u1.created_at)
self.assertNotEqual(u1.created_at, u2.created_at)
def test_cannot_create_user_nickname_too_long(self):
u = User(nickname='VeryLongNicknameThatIsTooLong',
email='[email protected]',
password="FakeUserPassword123", salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_no_nickname(self):
u = User(email='[email protected]',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_nickname_too_short(self):
u = User(nickname='test',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_nickname_not_match_validation_regex(self):
u = User(nickname='test&@1235',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_nickname_not_match_validation_regex2(self):
u = User(nickname='^^^$$$$$!!',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_whitout_password(self):
u = User(nickname='testnopsw',
email='[email protected]',
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_whitout_salt(self):
u = User(nickname='testnosalt',
email='[email protected]',
password="FakeUserPassword123")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_whitout_email(self):
u = User(nickname='testnomail',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_email_not_valid(self):
u = User(nickname='testmailnovalid',
email='test_duplicateexample.com',
password="FakeUserPassword123",
salt="salt")
with self.assertRaises(ValidationError):
u.save()
def test_cannot_create_user_email_duplicate_key(self):
u = User(nickname='testuser',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
u2 = User(nickname='testuser2',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
u.save()
self.assertRaises(NotUniqueError, u2.save)
def test_cannot_create_user_nickname_duplicate_key(self):
u = User(nickname='testnickname',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
u2 = User(nickname='testnickname',
email='[email protected]',
password="FakeUserPassword123",
salt="salt")
u.save()
self.assertRaises(NotUniqueError, u2.save)
| mit | -5,786,649,106,648,142,000 | 36.861446 | 78 | 0.608115 | false |
Rayal/ROS_proov | ros_robotics_projects/chapter_7_codes/rostensorflow/image_recognition.py | 1 | 1917 | import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.models.image.imagenet import classify_image
class RosTensorFlow():
def __init__(self):
classify_image.maybe_download_and_extract()
self._session = tf.Session()
classify_image.create_graph()
self._cv_bridge = CvBridge()
self._sub = rospy.Subscriber('image', Image, self.callback, queue_size=1)
self._pub = rospy.Publisher('result', String, queue_size=1)
self.score_threshold = rospy.get_param('~score_threshold', 0.1)
self.use_top_k = rospy.get_param('~use_top_k', 5)
def callback(self, image_msg):
cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
# copy from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/imagenet/classify_image.py
image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
# Creates graph from saved GraphDef.
softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
predictions = self._session.run(
softmax_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = classify_image.NodeLookup()
top_k = predictions.argsort()[-self.use_top_k:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
if score > self.score_threshold:
rospy.loginfo('%s (score = %.5f)' % (human_string, score))
self._pub.publish(human_string)
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('rostensorflow')
tensor = RosTensorFlow()
tensor.main()
| mit | -6,235,122,650,728,627,000 | 38.122449 | 113 | 0.633281 | false |
DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/openmdao.main/src/openmdao/main/test/test_connect_var.py | 1 | 9889 | # test_connect_var.py
"""
Testing connecting different variable types to each other.
"""
import unittest
from openmdao.main.api import Assembly, Component, set_as_top
from openmdao.main.datatypes.api import Array, Float, Int, Str, Bool, Enum
class Oneout(Component):
""" A simple output component """
ratio1 = Float(3.54, iotype='out',
desc='Float Variable')
ratio2 = Int(9, iotype='out',
desc='Integer variable')
ratio3 = Bool(True, iotype='out',
desc='Boolean Variable')
ratio4 = Float(1.03, iotype='out',
desc='Float variable ', units='cm')
ratio5 = Str('05678', iotype='out',
desc='string variable')
ratio6 = Enum(27, (0, 3, 9, 27), iotype='out', desc='some enum')
unit = Float(12.0, units='inch', iotype='out')
no_unit = Float(12.0, iotype='out')
arrout = Array(dtype=float, default_value=[1, 2, 3], iotype='out')
def execute(self):
"""
execute
"""
# print '%s.execute()' % self.get_pathname()
class Oneinp(Component):
""" A simple input component """
ratio1 = Float(2., iotype='in',
desc='Float Variable')
ratio2 = Int(2, iotype='in',
desc='Int Variable')
ratio3 = Bool(False, iotype='in',
desc='Float Variable')
ratio4 = Float(1.03, iotype='in',
desc='Float variable ', units='ft')
ratio5 = Str('01234', iotype='in',
desc='string variable')
ratio6 = Enum(0, (0, 3, 11, 27), iotype='in', desc='some enum')
unit = Float(0.0, units='ft', iotype='in')
no_unit = Float(0.0, iotype='in')
arrinp = Array(dtype=float, default_value=[42, 13, 0], iotype='in')
def execute(self):
"""
execute
"""
# print '%s.execute()' % self.get_pathname()
class VariableTestCase(unittest.TestCase):
def setUp(self):
""" this function is used to test each type..."""
self.top = set_as_top(Assembly())
self.top.add('oneinp', Oneinp())
self.top.add('oneout', Oneout())
self.top.driver.workflow.add(['oneinp', 'oneout'])
def tearDown(self):
self.top = None
def test_var1(self):
# connect to same type variables....
self.top.connect('oneout.ratio1', 'oneinp.ratio1') # float to float
self.top.connect('oneout.ratio2', 'oneinp.ratio2') # int to int
self.top.connect('oneout.ratio3', 'oneinp.ratio3') # Bool to Bool
self.top.connect('oneout.ratio4', 'oneinp.ratio4') # float with units to float with unit
self.top.connect('oneout.ratio5', 'oneinp.ratio5') # Str to Str
self.top.connect('oneout.ratio6', 'oneinp.ratio6') # Enum to Enum (int valued)
self.top.run()
self.assertEqual(3.54, self.top.oneinp.ratio1)
self.assertEqual(9, self.top.oneinp.ratio2)
self.assertEqual(True, self.top.oneinp.ratio3)
self.assertAlmostEqual(0.033792, self.top.oneinp.ratio4, 5)
self.assertEqual('05678', self.top.oneinp.ratio5)
self.assertEqual(27, self.top.oneinp.ratio6)
def test_var2(self):
self.top.oneout.ratio2 = 11
self.top.connect('oneout.ratio2', 'oneinp.ratio1') # int to Float
self.top.oneout.ratio3 = True
self.top.connect('oneout.ratio3', 'oneinp.ratio2') # Bool to int
self.top.connect('oneout.ratio2', 'oneinp.ratio6') # Int to Enum (int valued)
self.top.run()
self.assertEqual(11.0, self.top.oneinp.ratio1)
self.assertEqual(True, self.top.oneinp.ratio2)
self.assertEqual(11, self.top.oneinp.ratio6)
def test_var3(self):
self.top.oneout.ratio3 = False
self.top.connect('oneout.ratio3', 'oneinp.ratio1') # Bool to Float
self.top.connect('oneout.ratio3', 'oneinp.ratio2') # Bool to int
self.top.run()
self.assertEqual(0.0, self.top.oneinp.ratio1)
self.assertEqual(False, self.top.oneinp.ratio2)
def test_var3a(self):
self.top.connect('oneout.ratio6', 'oneinp.ratio2') # Enum to Int
self.top.run()
self.assertEqual(27, self.top.oneinp.ratio2)
def test_var4(self):
self.top.oneout.ratio1 = 12.0
try:
self.top.connect('oneout.ratio1', 'oneinp.ratio2') # float to int
except Exception, err:
msg = "but a value of 12.0 <type 'float'> was specified"
self.assertTrue(msg in str(err))
else:
self.fail('Exception Expected')
def test_var5(self):
self.top.oneout.ratio1 = 12.0
try:
self.top.connect('oneout.ratio1', 'oneinp.ratio3') # float to Bool
except Exception, err:
msg = ": Can't connect 'oneout.ratio1' to 'oneinp.ratio3': " + \
"The 'ratio3' trait of an Oneinp instance must be a" \
" boolean, but a value of 12.0 <type 'float'> was specified."
self.assertEqual(str(err), msg)
else:
self.fail('Exception Expected')
def test_var6(self):
self.top.oneout.ratio1 = 12.0
try:
self.top.connect('oneout.ratio1', 'oneinp.ratio5') # float to Str
except Exception, err:
msg = ": Can't connect 'oneout.ratio1' to 'oneinp.ratio5': " + \
"The 'ratio5' trait of an Oneinp instance must be a" \
" string, but a value of 12.0 <type 'float'> was specified."
self.assertEqual(str(err), msg)
else:
self.fail('Exception Expected')
def test_var7(self):
self.top.oneout.ratio2 = 20
try:
self.top.connect('oneout.ratio2', 'oneinp.ratio3') # int to Bool
except Exception, err:
msg = ": Can't connect 'oneout.ratio2' to 'oneinp.ratio3': " + \
"The 'ratio3' trait of an Oneinp instance must be a" \
" boolean, but a value of 20 <type 'int'> was specified."
self.assertEqual(str(err), msg)
else:
self.fail('Exception Expected')
def test_var8(self):
self.top.oneout.ratio2 = 20
try:
self.top.connect('oneout.ratio2', 'oneinp.ratio5') # int to Str
except Exception, err:
msg = ": Can't connect 'oneout.ratio2' to 'oneinp.ratio5': " + \
"The 'ratio5' trait of an Oneinp instance must be a" \
" string, but a value of 20 <type 'int'> was specified."
self.assertEqual(str(err), msg)
else:
self.fail('Exception Expected')
def test_var9(self):
self.top.oneout.ratio3 = True
try:
self.top.connect('oneout.ratio3', 'oneinp.ratio5') # Bool to Str
except Exception, err:
msg = ": Can't connect 'oneout.ratio3' to 'oneinp.ratio5': " + \
"The 'ratio5' trait of an Oneinp instance must be a" \
" string, but a value of True <type 'bool'> was specified."
self.assertEqual(str(err), msg)
else:
self.fail('Exception Expected')
def test_var10(self):
self.top.oneout.ratio5 = '55555'
try:
self.top.connect('oneout.ratio5', 'oneinp.ratio2') # Str to int
except Exception, err:
msg = "a value of 55555 <type 'str'> was specified"
self.assertTrue(msg in str(err))
else:
self.fail('Exception Expected')
def test_var11(self):
self.top.oneout.ratio5 = '55555'
try:
self.top.connect('oneout.ratio5', 'oneinp.ratio1') # Str to Float
except Exception, err:
msg = ": Can't connect 'oneout.ratio5' to 'oneinp.ratio1': oneinp: " + \
"Variable 'ratio1' must be a float, but a value of 55555" + \
" <type 'str'> was specified."
self.assertEqual(str(err), msg)
else:
self.fail('Exception Expected')
def test_var12(self):
self.top.oneout.ratio5 = '55555'
try:
self.top.connect('oneout.ratio5', 'oneinp.ratio3') # Str to Bool
except Exception, err:
msg = ": Can't connect 'oneout.ratio5' to 'oneinp.ratio3': " + \
"The 'ratio3' trait of an Oneinp instance must be a boolean, but a" \
" value of '55555' <type 'str'> was specified."
self.assertEqual(str(err), msg)
else:
self.fail('Exception Expected')
def test_var13_units(self):
self.top.connect('oneout.unit', 'oneinp.no_unit') # Bool to Float
self.top.connect('oneout.no_unit', 'oneinp.unit') # Bool to int
self.top.run()
self.assertEqual(12.0, self.top.oneinp.no_unit)
self.assertEqual(12.0, self.top.oneinp.unit)
def _parse_list(self, liststr):
liststr = liststr[1:len(liststr) - 2]
return set([s.strip("'") for s in liststr.split(', ') if s.strip()])
def test_attributes(self):
# Check for correct 'connected' information.
self.top.connect('oneout.arrout[0]', 'oneinp.arrinp[0]')
self.top.connect('oneout.arrout[1]', 'oneinp.arrinp[1]')
inputs = self.top.oneinp.get_attributes()['Inputs']
for item in inputs:
if item['name'] == 'arrinp':
self.assertEqual(self._parse_list(item['connected']),
self._parse_list("['oneout.arrout[1]', 'oneout.arrout[0]']"))
break
else:
self.fail('No arrinp item!')
if __name__ == '__main__':
import nose
import sys
sys.argv.append('--cover-package=openmdao.main')
nose.runmodule()
| mit | 6,529,345,715,683,629,000 | 38.714859 | 97 | 0.557893 | false |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/models/match_spec.py | 1 | 31362 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Mapping
from functools import reduce
from os.path import basename
import re
from .channel import Channel
from .dist import Dist
from .records import PackageRecord, PackageRef
from .version import BuildNumberMatch, VersionSpec
from .._vendor.auxlib.collection import frozendict
from ..base.constants import CONDA_TARBALL_EXTENSION
from ..common.compat import (isiterable, iteritems, itervalues, string_types, text_type,
with_metaclass)
from ..common.path import expand
from ..common.url import is_url, path_to_url, unquote
from ..exceptions import CondaValueError
try:
from cytoolz.itertoolz import concat, concatv, groupby
except ImportError: # pragma: no cover
from .._vendor.toolz.itertoolz import concat, concatv, groupby # NOQA
class MatchSpecType(type):
def __call__(cls, spec_arg=None, **kwargs):
if spec_arg:
if isinstance(spec_arg, MatchSpec) and not kwargs:
return spec_arg
elif isinstance(spec_arg, MatchSpec):
new_kwargs = dict(spec_arg._match_components)
new_kwargs.setdefault('optional', spec_arg.optional)
new_kwargs.setdefault('target', spec_arg.target)
new_kwargs.update(**kwargs)
return super(MatchSpecType, cls).__call__(**new_kwargs)
elif isinstance(spec_arg, string_types):
parsed = _parse_spec_str(spec_arg)
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Mapping):
parsed = dict(spec_arg, **kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, PackageRef):
parsed = {
'channel': spec_arg.channel,
'subdir': spec_arg.subdir,
'name': spec_arg.name,
'version': spec_arg.version,
'build': spec_arg.build,
}
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Dist):
# TODO: remove this branch when we get rid of Dist
parsed = {
'name': spec_arg.name,
'version': spec_arg.version,
'build': spec_arg.build,
}
if spec_arg.channel:
parsed['channel'] = spec_arg.channel
if spec_arg.subdir:
parsed['subdir'] = spec_arg.subdir
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif hasattr(spec_arg, 'dump'):
parsed = spec_arg.dump()
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
else:
raise CondaValueError("Invalid MatchSpec:\n spec_arg=%s\n kwargs=%s"
% (spec_arg, kwargs))
else:
return super(MatchSpecType, cls).__call__(**kwargs)
@with_metaclass(MatchSpecType)
class MatchSpec(object):
"""
:class:`MatchSpec` is, fundamentally, a query language for conda packages. Any of the fields
that comprise a :class:`PackageRecord` can be used to compose a :class:`MatchSpec`.
:class:`MatchSpec` can be composed with keyword arguments, where keys are any of the
attributes of :class:`PackageRecord`. Values for keyword arguments are the exact values the
attribute should match against. Many fields can also be matched against non-exact values--by
including wildcard `*` and `>`/`<` ranges--where supported. Any non-specified field is
the equivalent of a full wildcard match.
:class:`MatchSpec` can also be composed using a single positional argument, with optional
keyword arguments. Keyword arguments also override any conflicting information provided in
the positional argument. The positional argument can be either an existing :class:`MatchSpec`
instance or a string. Conda has historically had several string representations for equivalent
:class:`MatchSpec`s. This :class:`MatchSpec` should accept any existing valid spec string, and
correctly compose a :class:`MatchSpec` instance.
A series of rules are now followed for creating the canonical string representation of a
:class:`MatchSpec` instance. The canonical string representation can generically be
represented by
(channel(/subdir):(namespace):)name(version(build))[key1=value1,key2=value2]
where `()` indicate optional fields. The rules for constructing a canonical string
representation are:
1. `name` (i.e. "package name") is required, but its value can be '*'. Its position is always
outside the key-value brackets.
2. If `version` is an exact version, it goes outside the key-value brackets and is prepended
by `==`. If `version` is a "fuzzy" value (e.g. `1.11.*`), it goes outside the key-value
brackets with the `.*` left off and is prepended by `=`. Otherwise `version` is included
inside key-value brackets.
3. If `version` is an exact version, and `build` is an exact value, `build` goes outside
key-value brackets prepended by a `=`. Otherwise, `build` goes inside key-value brackets.
`build_string` is an alias for `build`.
4. The `namespace` position is being held for a future conda feature.
5. If `channel` is included and is an exact value, a `::` separator is ued between `channel`
and `name`. `channel` can either be a canonical channel name or a channel url. In the
canonical string representation, the canonical channel name will always be used.
6. If `channel` is an exact value and `subdir` is an exact value, `subdir` is appended to
`channel` with a `/` separator. Otherwise, `subdir` is included in the key-value brackets.
7. Key-value brackets can be delimited by comma, space, or comma+space. Value can optionally
be wrapped in single or double quotes, but must be wrapped if `value` contains a comma,
space, or equal sign. The canonical format uses comma delimiters and single quotes.
8. When constructing a :class:`MatchSpec` instance from a string, any key-value pair given
inside the key-value brackets overrides any matching parameter given outside the brackets.
When :class:`MatchSpec` attribute values are simple strings, the are interpreted using the
following conventions:
- If the string begins with `^` and ends with `$`, it is converted to a regex.
- If the string contains an asterisk (`*`), it is transformed from a glob to a regex.
- Otherwise, an exact match to the string is sought.
Examples:
>>> str(MatchSpec(name='foo', build='py2*', channel='conda-forge'))
'conda-forge::foo[build=py2*]'
>>> str(MatchSpec('foo 1.0 py27_0'))
'foo==1.0=py27_0'
>>> str(MatchSpec('foo=1.0=py27_0'))
'foo==1.0=py27_0'
>>> str(MatchSpec('conda-forge::foo[version=1.0.*]'))
'conda-forge::foo=1.0'
>>> str(MatchSpec('conda-forge/linux-64::foo>=1.0'))
"conda-forge/linux-64::foo[version='>=1.0']"
>>> str(MatchSpec('*/linux-64::foo>=1.0'))
"foo[subdir=linux-64,version='>=1.0']"
To fully-specify a package with a full, exact spec, the fields
- channel
- subdir
- name
- version
- build
must be given as exact values. In the future, the namespace field will be added to this list.
Alternatively, an exact spec is given by '*[md5=12345678901234567890123456789012]'.
"""
FIELD_NAMES = (
'channel',
'subdir',
'name',
'version',
'build',
'build_number',
'track_features',
'features',
'url',
'md5',
)
def __init__(self, optional=False, target=None, **kwargs):
self.optional = optional
self.target = target
self._match_components = self._build_components(**kwargs)
def get_exact_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.exact_value
def get_raw_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.raw_value
def get(self, field_name, default=None):
v = self.get_raw_value(field_name)
return default if v is None else v
@property
def is_name_only_spec(self):
return (len(self._match_components) == 1
and 'name' in self._match_components
and self.name != '*')
def dist_str(self):
return self.__str__()
def match(self, rec):
"""
Accepts an `IndexRecord` or a dict, and matches can pull from any field
in that record. Returns True for a match, and False for no match.
"""
if isinstance(rec, dict):
rec = PackageRecord.from_objects(rec)
for field_name, v in iteritems(self._match_components):
if not self._match_individual(rec, field_name, v):
return False
return True
def _match_individual(self, record, field_name, match_component):
val = getattr(record, field_name)
try:
return match_component.match(val)
except AttributeError:
return match_component == val
def _is_simple(self):
return len(self._match_components) == 1 and self.get_exact_value('name') is not None
def _is_single(self):
return len(self._match_components) == 1
def _to_filename_do_not_use(self):
# WARNING: this is potentially unreliable and use should probably be limited
# returns None if a filename can't be constructed
fn_field = self.get_exact_value('fn')
if fn_field:
return fn_field
vals = tuple(self.get_exact_value(x) for x in ('name', 'version', 'build'))
if not any(x is None for x in vals):
return '%s-%s-%s.tar.bz2' % vals
else:
return None
def __repr__(self):
builder = []
builder += ["%s=%r" % (c, self._match_components[c])
for c in self.FIELD_NAMES if c in self._match_components]
if self.optional:
builder.append("optional=True")
if self.target:
builder.append("target=%r" % self.target)
return "%s(%s)" % (self.__class__.__name__, ', '.join(builder))
def __str__(self):
builder = []
brackets = []
channel_matcher = self._match_components.get('channel')
if channel_matcher and channel_matcher.exact_value:
builder.append(text_type(channel_matcher))
elif channel_matcher and not channel_matcher.matches_all:
brackets.append("channel=%s" % text_type(channel_matcher))
subdir_matcher = self._match_components.get('subdir')
if subdir_matcher:
if channel_matcher and channel_matcher.exact_value:
builder.append('/%s' % subdir_matcher)
else:
brackets.append("subdir=%s" % subdir_matcher)
name_matcher = self._match_components.get('name', '*')
builder.append(('::%s' if builder else '%s') % name_matcher)
version_exact = False
version = self._match_components.get('version')
if version:
version = text_type(version)
if any(s in version for s in '><$^|,'):
brackets.append("version='%s'" % version)
elif version.endswith('.*'):
builder.append('=' + version[:-2])
elif version.endswith('*'):
builder.append('=' + version[:-1])
elif version.startswith('=='):
builder.append(version)
version_exact = True
else:
builder.append('==' + version)
version_exact = True
build = self._match_components.get('build')
if build:
build = text_type(build)
if any(s in build for s in '><$^|,'):
brackets.append("build='%s'" % build)
elif '*' in build:
brackets.append("build=%s" % build)
elif version_exact:
builder.append('=' + build)
else:
brackets.append("build=%s" % build)
_skip = ('channel', 'subdir', 'name', 'version', 'build')
for key in self.FIELD_NAMES:
if key not in _skip and key in self._match_components:
if key == 'url' and channel_matcher:
# skip url in canonical str if channel already included
continue
value = text_type(self._match_components[key])
if any(s in value for s in ', ='):
brackets.append("%s='%s'" % (key, value))
else:
brackets.append("%s=%s" % (key, value))
if brackets:
builder.append('[%s]' % ','.join(brackets))
return ''.join(builder)
def __json__(self):
return self.__str__()
def conda_build_form(self):
builder = []
name = self.get_exact_value('name')
assert name
builder.append(name)
build = self.get_raw_value('build')
version = self.get_raw_value('version')
if build:
assert version
builder += [version, build]
elif version:
builder.append(version)
return ' '.join(builder)
def __eq__(self, other):
if isinstance(other, MatchSpec):
self_key = self._match_components, self.optional, self.target
other_key = other._match_components, other.optional, other.target
return self_key == other_key
else:
return False
def __hash__(self):
return hash((self._match_components, self.optional, self.target))
def __contains__(self, field):
return field in self._match_components
@staticmethod
def _build_components(**kwargs):
def _make(field_name, value):
if field_name not in PackageRecord.__fields__:
raise CondaValueError('Cannot match on field %s' % (field_name,))
elif isinstance(value, string_types):
value = text_type(value)
if hasattr(value, 'match'):
matcher = value
elif field_name in _implementors:
matcher = _implementors[field_name](value)
else:
matcher = StrMatch(text_type(value))
return field_name, matcher
return frozendict(_make(key, value) for key, value in iteritems(kwargs))
@property
def name(self):
return self.get_exact_value('name') or '*'
#
# Remaining methods are for back compatibility with conda-build. Do not remove
# without coordination with the conda-build team.
#
@property
def strictness(self):
# With the old MatchSpec, strictness==3 if name, version, and
# build were all specified.
s = sum(f in self._match_components for f in ('name', 'version', 'build'))
if s < len(self._match_components):
return 3
elif not self.get_exact_value('name') or 'build' in self._match_components:
return 3
elif 'version' in self._match_components:
return 2
else:
return 1
@property
def spec(self):
return self.conda_build_form()
@property
def version(self):
# in the old MatchSpec object, version was a VersionSpec, not a str
# so we'll keep that API here
return self._match_components.get('version')
@property
def fn(self):
val = self.get_raw_value('fn') or self.get_raw_value('url')
if val:
val = basename(val)
assert val
return val
@classmethod
def merge(cls, match_specs):
match_specs = tuple(cls(s) for s in match_specs)
grouped = groupby(lambda spec: spec.get_exact_value('name'), match_specs)
dont_merge_these = grouped.pop('*', []) + grouped.pop(None, [])
specs_map = {
name: reduce(lambda x, y: x._merge(y), specs) if len(specs) > 1 else specs[0]
for name, specs in iteritems(grouped)
}
return tuple(concatv(itervalues(specs_map), dont_merge_these))
def _merge(self, other):
if self.optional != other.optional or self.target != other.target:
raise ValueError("Incompatible MatchSpec merge: - %s\n - %s" % (self, other))
final_components = {}
component_names = set(self._match_components) | set(other._match_components)
for component_name in component_names:
this_component = self._match_components.get(component_name)
that_component = other._match_components.get(component_name)
if this_component is None and that_component is None:
continue
elif this_component is None:
final_components[component_name] = that_component
elif that_component is None:
final_components[component_name] = this_component
else:
final_components[component_name] = this_component.merge(that_component)
return self.__class__(optional=self.optional, target=self.target, **final_components)
def _parse_version_plus_build(v_plus_b):
"""This should reliably pull the build string out of a version + build string combo.
Examples:
>>> _parse_version_plus_build("=1.2.3 0")
('=1.2.3', '0')
>>> _parse_version_plus_build("1.2.3=0")
('1.2.3', '0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 =py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build("=1.2.3 ")
('=1.2.3', None)
>>> _parse_version_plus_build(">1.8,<2|==1.7")
('>1.8,<2|==1.7', None)
>>> _parse_version_plus_build("* openblas_0")
('*', 'openblas_0')
>>> _parse_version_plus_build("* *")
('*', '*')
"""
parts = re.search(r'((?:.+?)[^><!,|]?)(?:(?<![=!|,<>])(?:[ =])([^-=,|<>]+?))?$', v_plus_b)
if parts:
version, build = parts.groups()
build = build and build.strip()
else:
version, build = v_plus_b, None
return version and version.replace(' ', ''), build
def _parse_legacy_dist(dist_str):
"""
Examples:
>>> _parse_legacy_dist("_license-1.1-py27_1.tar.bz2")
('_license', '1.1', 'py27_1')
>>> _parse_legacy_dist("_license-1.1-py27_1")
('_license', '1.1', 'py27_1')
"""
if dist_str.endswith(CONDA_TARBALL_EXTENSION):
dist_str = dist_str[:-len(CONDA_TARBALL_EXTENSION)]
name, version, build = dist_str.rsplit('-', 2)
return name, version, build
def _parse_channel(channel_val):
if not channel_val:
return None, None
chn = Channel(channel_val)
channel_name = chn.name
return channel_name, chn.subdir
def _parse_spec_str(spec_str):
# pre-step for ugly backward compat
if spec_str.endswith('@'):
feature_name = spec_str[:-1]
return {
'name': '*',
'track_features': (feature_name,),
}
# Step 1. strip '#' comment
if '#' in spec_str:
ndx = spec_str.index('#')
spec_str, _ = spec_str[:ndx], spec_str[ndx:]
spec_str.strip()
# Step 2. done if spec_str is a tarball
if spec_str.endswith(CONDA_TARBALL_EXTENSION):
# treat as a normal url
if not is_url(spec_str):
spec_str = unquote(path_to_url(expand(spec_str)))
channel = Channel(spec_str)
if channel.subdir:
name, version, build = _parse_legacy_dist(channel.package_filename)
result = {
'channel': channel.canonical_name,
'subdir': channel.subdir,
'name': name,
'version': version,
'build': build,
'fn': channel.package_filename,
'url': spec_str,
}
else:
# url is not a channel
return {
'name': '*',
'fn': basename(spec_str),
'url': spec_str,
}
return result
# Step 3. strip off brackets portion
brackets = {}
m3 = re.match(r'.*(?:(\[.*\]))', spec_str)
if m3:
brackets_str = m3.groups()[0]
spec_str = spec_str.replace(brackets_str, '')
brackets_str = brackets_str[1:-1]
m3b = re.finditer(r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', brackets_str)
for match in m3b:
key, _, value, _ = match.groups()
if not key or not value:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
brackets[key] = value
# Step 4. strip off parens portion
m4 = re.match(r'.*(?:(\(.*\)))', spec_str)
parens = {}
if m4:
parens_str = m4.groups()[0]
spec_str = spec_str.replace(parens_str, '')
parens_str = parens_str[1:-1]
m4b = re.finditer(r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', parens_str)
for match in m4b:
key, _, value, _ = match.groups()
parens[key] = value
if 'optional' in parens_str:
parens['optional'] = True
# Step 5. strip off '::' channel and namespace
m5 = spec_str.rsplit(':', 2)
m5_len = len(m5)
if m5_len == 3:
channel_str, namespace, spec_str = m5
elif m5_len == 2:
namespace, spec_str = m5
channel_str = None
elif m5_len:
spec_str = m5[0]
channel_str, namespace = None, None
else:
raise NotImplementedError()
channel, subdir = _parse_channel(channel_str)
if 'channel' in brackets:
b_channel, b_subdir = _parse_channel(brackets.pop('channel'))
if b_channel:
channel = b_channel
if b_subdir:
subdir = b_subdir
if 'subdir' in brackets:
subdir = brackets.pop('subdir')
# Step 6. strip off package name from remaining version + build
m3 = re.match(r'([^ =<>!]+)?([><!= ].+)?', spec_str)
if m3:
name, spec_str = m3.groups()
if name is None:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
else:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
# Step 7. otherwise sort out version + build
spec_str = spec_str and spec_str.strip()
# This was an attempt to make MatchSpec('numpy-1.11.0-py27_0') work like we'd want. It's
# not possible though because plenty of packages have names with more than one '-'.
# if spec_str is None and name.count('-') >= 2:
# name, version, build = _parse_legacy_dist(name)
if spec_str:
if '[' in spec_str:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
version, build = _parse_version_plus_build(spec_str)
# translate version '=1.2.3' to '1.2.3*'
# is it a simple version starting with '='? i.e. '=1.2.3'
if version.startswith('='):
test_str = version[1:]
if version.startswith('==') and build is None:
version = version[2:]
elif not any(c in test_str for c in "=,|"):
if build is None and not test_str.endswith('*'):
version = test_str + '*'
else:
version = test_str
else:
version, build = None, None
# Step 8. now compile components together
components = {}
components['name'] = name if name else '*'
if channel is not None:
components['channel'] = channel
if subdir is not None:
components['subdir'] = subdir
if namespace is not None:
# components['namespace'] = namespace
pass
if version is not None:
components['version'] = version
if build is not None:
components['build'] = build
# anything in brackets will now strictly override key as set in other area of spec str
components.update(brackets)
return components
@with_metaclass(ABCMeta)
class MatchInterface(object):
def __init__(self, value):
self._raw_value = value
@abstractmethod
def match(self, other):
raise NotImplementedError()
def matches(self, value):
return self.match(value)
@property
def raw_value(self):
return self._raw_value
@abstractproperty
def exact_value(self):
"""If the match value is an exact specification, returns the value.
Otherwise returns None.
"""
raise NotImplementedError()
@abstractmethod
def merge(self, other):
raise NotImplementedError()
class SplitStrMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(SplitStrMatch, self).__init__(self._convert(value))
def _convert(self, value):
try:
return frozenset(value.replace(' ', ',').split(','))
except AttributeError:
if isiterable(value):
return frozenset(value)
raise
def match(self, other):
try:
return other and self._raw_value & other._raw_value
except AttributeError:
return self._raw_value & self._convert(other)
def __repr__(self):
if self._raw_value:
return "{%s}" % ', '.join("'%s'" % s for s in sorted(self._raw_value))
else:
return 'set()'
def __str__(self):
# this space delimiting makes me nauseous
return ' '.join(sorted(self._raw_value))
def __eq__(self, other):
return isinstance(other, self.__class__) and self._raw_value == other._raw_value
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
def merge(self, other):
if self.raw_value != other.raw_value:
raise ValueError("Incompatible component merge:\n - %r\n - %r"
% (self.raw_value, other.raw_value))
return self.raw_value
class FeatureMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(FeatureMatch, self).__init__(self._convert(value))
def _convert(self, value):
if not value:
return frozenset()
elif isinstance(value, string_types):
return frozenset(f for f in (
ff.strip() for ff in value.replace(' ', ',').split(',')
) if f)
else:
return frozenset(f for f in (ff.strip() for ff in value) if f)
def match(self, other):
other = self._convert(other)
return self._raw_value == other
def __repr__(self):
return "[%s]" % ', '.join("'%s'" % k for k in sorted(self._raw_value))
def __str__(self):
return ' '.join(sorted(self._raw_value))
def __eq__(self, other):
return isinstance(other, self.__class__) and self._raw_value == other._raw_value
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
def merge(self, other):
if self.raw_value != other.raw_value:
raise ValueError("Incompatible component merge:\n - %r\n - %r"
% (self.raw_value, other.raw_value))
return self.raw_value
class StrMatch(MatchInterface):
__slots__ = '_raw_value', '_re_match'
def __init__(self, value):
super(StrMatch, self).__init__(value)
self._re_match = None
if value.startswith('^') and value.endswith('$'):
self._re_match = re.compile(value).match
elif '*' in value:
self._re_match = re.compile(r'^(?:%s)$' % value.replace('*', r'.*')).match
def match(self, other):
try:
_other_val = other._raw_value
except AttributeError:
_other_val = text_type(other)
if self._re_match:
return self._re_match(_other_val)
else:
return self._raw_value == _other_val
def __str__(self):
return self._raw_value
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._raw_value)
def __eq__(self, other):
return isinstance(other, self.__class__) and self._raw_value == other._raw_value
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value if self._re_match is None else None
@property
def matches_all(self):
return self._raw_value == '*'
def merge(self, other):
if self.raw_value != other.raw_value:
raise ValueError("Incompatible component merge:\n - %r\n - %r"
% (self.raw_value, other.raw_value))
return self.raw_value
class ChannelMatch(StrMatch):
def __init__(self, value):
self._re_match = None
if isinstance(value, string_types):
if value.startswith('^') and value.endswith('$'):
self._re_match = re.compile(value).match
elif '*' in value:
self._re_match = re.compile(r'^(?:%s)$' % value.replace('*', r'.*')).match
else:
value = Channel(value)
super(StrMatch, self).__init__(value) # lgtm [py/super-not-enclosing-class]
def match(self, other):
try:
_other_val = Channel(other._raw_value)
except AttributeError:
_other_val = Channel(other)
if self._re_match:
return self._re_match(_other_val.canonical_name)
else:
# assert ChannelMatch('pkgs/free').match('defaults') is False
# assert ChannelMatch('defaults').match('pkgs/free') is True
return (self._raw_value.name == _other_val.name
or self._raw_value.name == _other_val.canonical_name)
def __str__(self):
try:
return "%s" % self._raw_value.name
except AttributeError:
return "%s" % self._raw_value
def __repr__(self):
return "'%s'" % self.__str__()
def merge(self, other):
if self.raw_value != other.raw_value:
raise ValueError("Incompatible component merge:\n - %r\n - %r"
% (self.raw_value, other.raw_value))
return self.raw_value
class LowerStrMatch(StrMatch):
def __init__(self, value):
super(LowerStrMatch, self).__init__(value.lower())
_implementors = {
'name': LowerStrMatch,
'track_features': FeatureMatch,
'features': FeatureMatch,
'version': VersionSpec,
'build_number': BuildNumberMatch,
'channel': ChannelMatch,
}
| apache-2.0 | -8,014,581,032,672,361,000 | 35.048276 | 99 | 0.568554 | false |
open-austin/capture | distance.py | 1 | 4386 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import argparse
import glob
import os
import numpy as np
import pandas as pd
from geopy.distance import vincenty as point_distance
def ingest(fn, route_id, begin_latlng, end_latlng):
df = pd.read_csv(fn, parse_dates=['timestamp'])
df = df.drop(['speed', 'trip_headsign'], axis=1)
df = df[df.route_id == route_id]
df['begin_distances'] = compute_distance(df, begin_latlng)
df['end_distances'] = compute_distance(df, end_latlng)
return df
def compute_distance(df, latlng):
df = df.copy()
starts = zip(df.latitude, df.longitude)
return [point_distance(latlng, s).meters for s in starts]
def parse_duration(df):
'''
for each trip id
choose a reading nearest the begin stop
choose a reading nearest downtown
subtract the times for those two readings
positive is southbound
'''
mins = df.groupby('trip_id').idxmin()
begin_mins = df.loc[mins.begin_distances].set_index('trip_id')
end_mins = df.loc[mins.end_distances].set_index('trip_id')
unneeded_cols = ['begin_distances', 'end_distances', 'latitude', 'longitude']
begin_mins.drop(unneeded_cols, axis=1, inplace=True)
end_mins.drop(['vehicle_id', 'route_id'] + unneeded_cols, axis=1, inplace=True)
result = begin_mins.join(end_mins, rsuffix='_begin', lsuffix='_end')
duration = begin_mins.timestamp - end_mins.timestamp
result['duration'] = duration / np.timedelta64(1, 's')
return result
def parse_duration_by_hour(df):
df['duration_abs'] = df['duration'].abs()
df['hour'] = df['timestamp_begin'].apply(
lambda x: x.tz_localize('UTC').tz_convert('US/Central').hour
)
df_byhour = df.groupby('hour')
results = pd.concat([
df_byhour['duration_abs'].count(),
df_byhour['duration_abs'].mean()
], axis=1, keys=['count', 'mean'])
return results.reindex(index=range(0, 24))
def parse(capmetrics_path=None, leglob=None, route_id=None, begin_lat=None, begin_lon=None, end_lat=None, end_lon=None, name=None):
df_total = pd.DataFrame()
data_glob = os.path.join(capmetrics_path, 'data', 'vehicle_positions', leglob)
files = glob.glob(data_glob)
for i, fname in enumerate(files):
print('({}/{}) Ingesting {}'.format(i + 1, len(files), fname))
try:
df_ingested = ingest(fname, route_id, (begin_lat, begin_lon), (end_lat, end_lon))
df_duration = parse_duration(df_ingested)
df_total = pd.concat([df_total, df_duration])
except Exception as e:
print(e)
print('Skipping ', fname)
if df_total.empty:
print('No vehicle positions found')
return
return parse_duration_by_hour(df_duration)
def main():
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument('--capmetrics_path', help='Path to the capmetrics directory', required=True, type=str)
parser.add_argument('--glob', help='Glob of vehicle positions CSV files', required=True, type=str)
parser.add_argument('--name', help='Name of the output file', required=True, type=str)
parser.add_argument('--route_id', help='Route ID', required=True, type=int)
parser.add_argument('--begin_lat', help='Latitude of first stop', required=True, type=float)
parser.add_argument('--begin_lon', help='Longitude of first stop', required=True, type=float)
parser.add_argument('--end_lat', help='Latitude of second stop', required=True, type=float)
parser.add_argument('--end_lon', help='Longitude of second stop', required=True, type=float)
args = parser.parse_args()
results = parse(
capmetrics_path=args.capmetrics_path,
name=args.name,
leglob=args.glob,
route_id=args.route_id,
begin_lat=args.begin_lat,
begin_lon=args.begin_lon,
end_lat=args.end_lat,
end_lon=args.end_lon
)
output_filename = '{route_id}_{name}_{glob}'.format(route_id=args.route_id, glob=args.glob, name=args.name)
output_path_duration_by_hour = 'results/duration_by_hour/{}.csv'.format(output_filename)
results.to_csv(output_path_duration_by_hour, header=True, sep='\t')
print('Saved duration by hour to {}'.format(output_path_duration_by_hour))
if __name__ == '__main__':
main()
| gpl-3.0 | -4,384,529,574,560,891,400 | 35.247934 | 131 | 0.649111 | false |
borg-project/borg | borg/unix/accounting.py | 1 | 8038 | """@author: Bryan Silverthorn <[email protected]>"""
import os
import select
import signal
import datetime
import collections
import borg
log = borg.get_logger(__name__)
class SessionTimeAccountant(object):
"""
Track the total CPU (user) time for members of a session.
Process accounting under Linux is a giant pain, especially without root
access. In the general case, it is literally impossible (without patching
the kernel or some such craziness). Whatever. We do our best. Slightly
fancier schemes are available, but they're not much fancier---they're
mostly good only at making it harder for processes to actively evade being
charged. For primarily long-running processes that act in good faith, we
should do ok.
"""
def __init__(self, sid):
"""
Initialize.
"""
self.sid = sid
self.charged = {}
def audit(self):
"""
Update estimates.
"""
for p in borg.unix.proc.ProcessStat.in_session(self.sid):
self.charged[p.pid] = p.user_time
@property
def total(self):
"""
Return estimated total.
"""
return sum(self.charged.values(), datetime.timedelta())
class PollingReader(object):
"""
Read from file descriptors with timeout.
"""
def __init__(self, fds):
"""
Initialize.
"""
self.fds = fds
self.polling = select.poll()
for fd in fds:
self.polling.register(fd, select.POLLIN)
def unregister(self, fds):
"""
Unregister descriptors.
"""
for fd in fds:
self.polling.unregister(fd)
self.fds.remove(fd)
def read(self, timeout = -1):
"""
Read with an optional timeout.
"""
changed = self.polling.poll(timeout * 1000)
for (fd, event) in changed:
log.debug("event on fd %i is %#o", fd, event)
if event & select.POLLIN:
# POLLHUP is level-triggered; we'll be back if it was missed
return (fd, os.read(fd, 65536))
elif event & select.POLLHUP:
return (fd, "")
else:
raise IOError("unexpected poll response %#o from file descriptor" % event)
return (None, None)
CPU_LimitedRun = \
collections.namedtuple(
"CPU_LimitedRun",
[
"started",
"limit",
"out_chunks",
"err_chunks",
"usage_elapsed",
"proc_elapsed",
"exit_status",
"exit_signal",
],
)
def run_cpu_limited(
arguments,
limit,
pty = True,
environment = {},
resolution = 0.5,
):
"""
Spawn a subprocess whose process tree is granted limited CPU (user) time.
@param environment Override specific existing environment variables.
The subprocess must not expect input. This method is best suited to
processes which may run for a reasonable amount of time (eg, at least
several seconds); it will be fairly inefficient (and ineffective) at
fine-grained limiting of CPU allocation to short-duration processes.
We run the process and read its output. Every time we receive a chunk of
data, or every C{resolution} seconds, we estimate the total CPU time used
by the session---and store that information with the chunk of output, if
any. After at least C{limit} of CPU time has been used by the spawned
session, or after the session leader terminates, whichever is first, the
session is (sig)killed, the session leader waited on, and any data
remaining in the pipe is read.
Note that the use of SIGKILL means that child processes *cannot* perform
their own cleanup.
If C{pty} is specified, process stdout is piped through a pty, which makes
process output less likely to be buffered. This behavior is the default.
Kernel-reported resource usage includes the sum of all directly and
indirectly waited-on children. It will be accurate in the common case where
processes terminate after correctly waiting on their children, and
inaccurate in cases where zombies are reparented to init. Elapsed CPU time
taken from the /proc accounting mechanism is used to do CPU time limiting,
and will always be at least the specified limit.
"""
log.detail("running %s for %s", arguments, limit)
# sanity
if not arguments:
raise ValueError()
# start the run
popened = None
fd_chunks = {}
exit_pid = None
started = datetime.datetime.utcnow()
try:
# start running the child process
if pty:
popened = borg.unix.sessions.spawn_pty_session(arguments, environment)
else:
popened = borg.unix.sessions.spawn_pipe_session(arguments, environment)
fd_chunks = {
popened.stdout.fileno(): [],
popened.stderr.fileno(): [],
}
log.debug("spawned child with pid %i", popened.pid)
# read the child's output while accounting (note that the session id
# is, under Linux, the pid of the session leader)
accountant = SessionTimeAccountant(popened.pid)
reader = PollingReader(fd_chunks.keys())
while reader.fds:
# nuke if we're past cutoff
if accountant.total >= limit:
popened.kill()
break
# read from and audit the child process
(chunk_fd, chunk) = reader.read(resolution)
accountant.audit()
if chunk is not None:
log.debug(
"got %i bytes at %s (user time) on fd %i; chunk follows:\n%s",
len(chunk),
accountant.total,
chunk_fd,
chunk,
)
if chunk != "":
fd_chunks[chunk_fd].append((accountant.total, chunk))
else:
reader.unregister([chunk_fd])
# wait for our child to die
(exit_pid, termination, usage) = os.wait4(popened.pid, 0)
# nuke the session from orbit (it's the only way to be sure)
borg.unix.sessions.kill_session(popened.pid, signal.SIGKILL)
except:
# something has gone awry, so we need to kill our children
log.warning("something went awry! (our pid is %i)", os.getpid())
raised = borg.util.Raised()
if exit_pid is None and popened is not None:
try:
# nuke the entire session
borg.unix.sessions.kill_session(popened.pid, signal.SIGKILL)
# and don't leave the child as a zombie
os.waitpid(popened.pid, 0)
except:
borg.util.Raised().print_ignored()
raised.re_raise()
else:
# grab any output left in the kernel buffers
while reader.fds:
(chunk_fd, chunk) = reader.read(128.0)
if chunk:
fd_chunks[chunk_fd].append((accountant.total, chunk))
elif chunk_fd:
reader.unregister([chunk_fd])
else:
raise RuntimeError("final read from child timed out; undead child?")
# done
from datetime import timedelta
return \
CPU_LimitedRun(
started,
limit,
fd_chunks[popened.stdout.fileno()],
fd_chunks[popened.stderr.fileno()],
timedelta(seconds = usage.ru_utime),
accountant.total,
os.WEXITSTATUS(termination) if os.WIFEXITED(termination) else None,
os.WTERMSIG(termination) if os.WIFSIGNALED(termination) else None,
)
finally:
# let's not leak file descriptors
if popened is not None:
popened.stdout.close()
popened.stderr.close()
| mit | -8,009,201,627,987,073,000 | 30.155039 | 90 | 0.581488 | false |
arenadata/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HDFS/package/scripts/params.py | 1 | 1253 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons import OSCheck
from resource_management.libraries.functions.default import default
if OSCheck.is_windows_family():
from params_windows import *
else:
from params_linux import *
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
retryAble = default("/commandParams/command_retry_enabled", False)
script_https_protocol = Script.get_force_https_protocol_name()
| apache-2.0 | 1,570,136,230,896,415,200 | 40.766667 | 83 | 0.7917 | false |
pudo/archivekit | archivekit/store/s3.py | 1 | 4834 | import os
from urllib2 import urlopen
from boto.s3.connection import S3Connection, S3ResponseError
from boto.s3.connection import Location
from archivekit.store.common import Store, StoreObject, MANIFEST
DELIM = os.path.join(' ', ' ').strip()
ALL_USERS = 'http://acs.amazonaws.com/groups/global/AllUsers'
class S3Store(Store):
def __init__(self, aws_key_id=None, aws_secret=None, bucket_name=None,
prefix=None, location=Location.EU, **kwargs):
if aws_key_id is None:
aws_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret = os.environ.get('AWS_SECRET_ACCESS_KEY')
self.aws_key_id = aws_key_id
self.aws_secret = aws_secret
if bucket_name is None:
bucket_name = os.environ.get('AWS_BUCKET_NAME')
self.bucket_name = bucket_name
self.prefix = prefix
self.location = location
self._bucket = None
@property
def bucket(self):
if self._bucket is None:
self.conn = S3Connection(self.aws_key_id, self.aws_secret)
try:
self._bucket = self.conn.get_bucket(self.bucket_name)
except S3ResponseError, se:
if se.status != 404:
raise
self._bucket = self.conn.create_bucket(self.bucket_name,
location=self.location)
return self._bucket
def get_object(self, collection, package_id, path):
return S3StoreObject(self, collection, package_id, path)
def _get_prefix(self, collection):
prefix = collection
if self.prefix:
prefix = os.path.join(self.prefix, prefix)
return os.path.join(prefix, '')
def list_collections(self):
prefix = os.path.join(self.prefix, '') if self.prefix else None
for prefix in self.bucket.list(prefix=prefix, delimiter=DELIM):
yield prefix.name.rsplit(DELIM, 2)[-2]
def list_packages(self, collection):
prefix = self._get_prefix(collection)
for sub_prefix in self.bucket.list(prefix=prefix, delimiter=DELIM):
yield sub_prefix.name.rsplit(DELIM, 2)[-2]
def list_resources(self, collection, package_id):
prefix = os.path.join(self._get_prefix(collection), package_id)
skip = os.path.join(prefix, MANIFEST)
offset = len(skip) - len(MANIFEST)
for key in self.bucket.get_all_keys(prefix=prefix):
if key.name == skip:
continue
yield key.name[offset:]
def __repr__(self):
return '<S3Store(%r, %r)>' % (self.bucket_name, self.prefix)
def __unicode__(self):
return os.path.join(self.bucket_name, self.prefix)
class S3StoreObject(StoreObject):
def __init__(self, store, collection, package_id, path):
self.store = store
self.package_id = package_id
self.path = path
self._key = None
self._key_name = os.path.join(collection, package_id, path)
if store.prefix:
self._key_name = os.path.join(store.prefix, self._key_name)
@property
def key(self):
if self._key is None:
self._key = self.store.bucket.get_key(self._key_name)
if self._key is None:
self._key = self.store.bucket.new_key(self._key_name)
return self._key
def exists(self):
if self._key is None:
self._key = self.store.bucket.get_key(self._key_name)
return self._key is not None
def save_fileobj(self, fileobj):
self.key.set_contents_from_file(fileobj)
def save_file(self, file_name, destructive=False):
with open(file_name, 'rb') as fh:
self.save_fileobj(fh)
def save_data(self, data):
self.key.set_contents_from_string(data)
def load_fileobj(self):
return urlopen(self.public_url())
def load_data(self):
return self.key.get_contents_as_string()
def _is_public(self):
try:
for grant in self.key.get_acl().acl.grants:
if grant.permission == 'READ':
if grant.uri == ALL_USERS:
return True
except:
pass
return False
def public_url(self):
if not self.exists():
return
# Welcome to the world of open data:
if not self._is_public():
self.key.make_public()
return self.key.generate_url(expires_in=0,
force_http=True,
query_auth=False)
def __repr__(self):
return '<S3StoreObject(%r, %r, %r)>' % (self.store, self.package_id,
self.path)
def __unicode__(self):
return self.public_url()
| mit | -4,507,298,742,891,452,000 | 33.042254 | 78 | 0.571369 | false |
DonHilborn/DataGenerator | faker/providers/es_MX/company.py | 1 | 8627 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from ..company import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
'{{last_name}} {{company_suffix}}',
'{{last_name}}-{{last_name}}',
'{{company_prefix}} {{last_name}}-{{last_name}}',
'{{company_prefix}} {{last_name}} y {{last_name}}',
'{{company_prefix}} {{last_name}}, {{last_name}} y {{last_name}}',
'{{last_name}}-{{last_name}} {{company_suffix}}',
'{{last_name}}, {{last_name}} y {{last_name}}',
'{{last_name}} y {{last_name}} {{company_suffix}}'
)
catch_phrase_words = (
(
"habilidad", "acceso", "adaptador", "algoritmo", "alianza",
"analista", "aplicación", "enfoque", "arquitectura",
"archivo", "inteligencia artificial", "array", "actitud",
"medición", "gestión presupuestaria", "capacidad", "desafío",
"circuito", "colaboración", "complejidad", "concepto",
"conglomeración", "contingencia", "núcleo", "fidelidad",
"base de datos", "data-warehouse", "definición", "emulación",
"codificar", "encriptar", "extranet", "firmware",
"flexibilidad", "focus group", "previsión", "base de trabajo",
"función", "funcionalidad", "Interfaz Gráfica", "groupware",
"Interfaz gráfico de usuario", "hardware", "Soporte", "jerarquía",
"conjunto", "implementación", "infraestructura", "iniciativa",
"instalación", "conjunto de instrucciones", "interfaz",
"intranet", "base del conocimiento", "red de area local",
"aprovechar", "matrices", "metodologías", "middleware",
"migración", "modelo", "moderador", "monitorizar",
"arquitectura abierta", "sistema abierto", "orquestar",
"paradigma", "paralelismo", "política", "portal",
"estructura de precios", "proceso de mejora",
"producto", "productividad", "proyecto", "proyección",
"protocolo", "línea segura", "software", "solución",
"estandardización", "estrategia", "estructura", "éxito",
"superestructura", "soporte", "sinergia", "mediante",
"marco de tiempo", "caja de herramientas", "utilización",
"website", "fuerza de trabajo"),
(
"24 horas", "24/7", "3ra generación", "4ta generación",
"5ta generación", "6ta generación", "analizada",
"asimétrica", "asíncrona", "monitorizada por red",
"bidireccional", "bifurcada", "generada por el cliente",
"cliente servidor", "coherente", "cohesiva", "compuesto",
"sensible al contexto", "basado en el contexto",
"basado en contenido", "dedicada",
"generado por la demanda", "didactica", "direccional",
"discreta", "dinámica", "potenciada", "acompasada",
"ejecutiva", "explícita", "tolerante a fallos",
"innovadora", "amplio ábanico", "global", "heurística",
"alto nivel", "holística", "homogénea", "híbrida",
"incremental", "intangible", "interactiva", "intermedia",
"local", "logística", "maximizada", "metódica",
"misión crítica", "móbil", "modular", "motivadora",
"multimedia", "multiestado", "multitarea", "nacional",
"basado en necesidades", "neutral", "nueva generación",
"no-volátil", "orientado a objetos", "óptima", "optimizada",
"radical", "tiempo real", "recíproca", "regional",
"escalable", "secundaria", "orientada a soluciones",
"estable", "estatica", "sistemática", "sistémica",
"tangible", "terciaria", "transicional", "uniforme",
"valor añadido", "vía web", "defectos cero", "tolerancia cero"
),
(
'adaptivo', 'avanzado', 'asimilado', 'automatizado',
'balanceado', 'enfocado al negocio',
'centralizado', 'clonado', 'compatible', 'configurable',
'multiplataforma', 'enfocado al cliente', 'personalizable',
'descentralizado', 'digitizado', 'distribuido', 'diverso',
'mejorado', 'en toda la empresa', 'ergonómico', 'exclusivo',
'expandido', 'extendido', 'cara a cara', 'enfocado',
'de primera línea', 'totalmente configurable',
'basado en funcionalidad', 'fundamental', 'horizontal',
'implementado', 'innovador', 'integrado', 'intuitivo',
'inverso', 'administrado', 'mandatorio', 'monitoreado',
'multicanal', 'multilateral', 'multi-capas', 'en red',
'basado en objetos', 'de arquitectura abierta',
'Open-source', 'operativo', 'optimizado', 'opcional',
'orgánico', 'organizado', 'perseverante', 'persistente',
'polarizado', 'preventivo', 'proactivo', 'enfocado a ganancias',
'programable', 'progresivo', 'llave pública',
'enfocado a la calidad', 'reactivo', 'realineado',
're-contextualizado', 'reducido', 'con ingeniería inversa',
'de tamaño adecuado', 'robusto', 'seguro', 'compartible',
'sincronizado', 'orientado a equipos', 'total',
'universal', 'actualizable', 'centrado al usuario',
'versátil', 'virtual', 'visionario',
)
)
bsWords = (
(
'implementa', 'utiliza', 'integrata', 'optimiza',
'evoluciona', 'transforma', 'abraza', 'habilia',
'orquesta', 'reinventa', 'agrega', 'mejora', 'incentiviza',
'modifica', 'empondera', 'monetiza', 'fortalece',
'facilita', 'synergiza', 'crear marca', 'crece',
'sintetiza', 'entrega', 'mezcla', 'incuba', 'compromete',
'maximiza', 'inmediata', 'visualiza', 'inova',
'escala', 'libera', 'maneja', 'extiende', 'revoluciona',
'genera', 'explota', 'transición', 'itera', 'cultiva',
'redefine', 'recontextualiza',
),
(
'synergías', 'paradigmas', 'marcados', 'socios',
'infraestructuras', 'plataformas', 'iniciativas',
'chanales', 'communidades', 'ROI', 'soluciones',
'portales', 'nichos', 'tecnologías', 'contenido',
'cadena de producción', 'convergencia', 'relaciones',
'architecturas', 'interfaces', 'comercio electrónico',
'sistemas', 'ancho de banda', 'modelos', 'entregables',
'usuarios', 'esquemas', 'redes', 'aplicaciones', 'métricas',
'funcionalidades', 'experiencias', 'servicios web',
'metodologías'
),
(
'valor agregado', 'verticales', 'proactivas', 'robustas',
'revolucionarias', 'escalables', 'de punta', 'innovadoras',
'intuitivas', 'estratégicas', 'e-business', 'de misión crítica',
'uno-a-uno', '24/7', 'end-to-end', 'globales', 'B2B', 'B2C',
'granulares', 'sin fricciones', 'virtuales', 'virales',
'dinámicas', '24/365', 'magnéticas', 'listo para la web',
'interactivas', 'dot-com', 'sexi', 'en tiempo real',
'eficientes', 'front-end', 'distribuidas', 'extensibles',
'llave en mano', 'de clase mundial', 'open-source',
'plataforma cruzada', 'de paquete', 'empresariales',
'integrado', 'impacto total', 'inalámbrica', 'transparentes',
'de siguiente generación', 'lo último', 'centrado al usuario',
'visionarias', 'personalizado', 'ubicuas', 'plug-and-play',
'colaborativas', 'holísticas', 'ricas'
),
)
company_preffixes = ('Despacho', 'Grupo', 'Corporativo', 'Club',
'Industrias', 'Laboratorios', 'Proyectos')
company_suffixes = ('A.C.', 'S.A.', 'S.A. de C.V.', 'S.C.',
'S. R.L. de C.V.','e Hijos', 'y Asociados')
def company_prefix(self):
"""
Ejemplo: Grupo
"""
return self.random_element(self.company_preffixes)
def catch_phrase(self):
"""
:example 'Robust full-range hub'
"""
result = []
for word_list in self.catch_phrase_words:
result.append(self.random_element(word_list))
return " ".join(result)
def bs(self):
"""
:example 'integrate extensible convergence'
"""
result = []
for word_list in self.bsWords:
result.append(self.random_element(word_list))
return " ".join(result)
| mit | -2,724,498,202,587,713,500 | 49.311765 | 77 | 0.559102 | false |
chrys87/orca-beep | src/orca/generator.py | 1 | 43334 | # Orca
#
# Copyright 2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Superclass of classes used to generate presentations for objects."""
__id__ = "$Id:$"
__version__ = "$Revision:$"
__date__ = "$Date:$"
__copyright__ = "Copyright (c) 2009 Sun Microsystems Inc."
__license__ = "LGPL"
import sys
import time
import traceback
import pyatspi
from . import braille
from . import debug
from . import messages
from . import object_properties
from . import settings
import collections
def _formatExceptionInfo(maxTBlevel=5):
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
try:
excArgs = exc.args
except KeyError:
excArgs = "<no args>"
excTb = traceback.format_tb(trbk, maxTBlevel)
return (excName, excArgs, excTb)
# [[[WDW - general note -- for all the _generate* methods, it would be great if
# we could return an empty array if we can determine the method does not
# apply to the object. This would allow us to reduce the number of strings
# needed in formatting.py.]]]
# The prefix to use for the individual generator methods
#
METHOD_PREFIX = "_generate"
class Generator:
"""Takes accessible objects and generates a presentation for those
objects. See the generate method, which is the primary entry
point."""
# pylint: disable-msg=W0142
def __init__(self, script, mode):
# pylint: disable-msg=W0108
self._mode = mode
self._script = script
self._methodsDict = {}
for method in \
[z for z in [getattr(self, y).__get__(self, self.__class__) for y in [x for x in dir(self) if x.startswith(METHOD_PREFIX)]] if isinstance(z, collections.Callable)]:
name = method.__name__[len(METHOD_PREFIX):]
name = name[0].lower() + name[1:]
self._methodsDict[name] = method
self._verifyFormatting()
def _addGlobals(self, globalsDict):
"""Other things to make available from the formatting string.
"""
globalsDict['obj'] = None
globalsDict['role'] = None
globalsDict['pyatspi'] = pyatspi
def _verifyFormatting(self):
# Verify the formatting strings are OK. This is only
# for verification and does not effect the function of
# Orca at all.
# Populate the entire globals with empty arrays
# for the results of all the legal method names.
#
globalsDict = {}
for key in list(self._methodsDict.keys()):
globalsDict[key] = []
self._addGlobals(globalsDict)
for roleKey in self._script.formatting[self._mode]:
for key in ["focused", "unfocused"]:
try:
evalString = \
self._script.formatting[self._mode][roleKey][key]
except:
continue
else:
if not evalString:
# It's legal to have an empty string.
#
continue
while True:
try:
eval(evalString, globalsDict)
break
except NameError:
info = _formatExceptionInfo()
arg = info[1][0]
arg = arg.replace("name '", "")
arg = arg.replace("' is not defined", "")
if arg not in self._methodsDict:
debug.printException(debug.LEVEL_SEVERE)
globalsDict[arg] = []
except:
debug.printException(debug.LEVEL_SEVERE)
break
def _overrideRole(self, newRole, args):
"""Convenience method to allow you to temporarily override the role in
the args dictionary. This changes the role in args ags
returns the old role so you can pass it back to _restoreRole.
"""
oldRole = args.get('role', None)
args['role'] = newRole
return oldRole
def _restoreRole(self, oldRole, args):
"""Convenience method to restore the old role back in the args
dictionary. The oldRole should have been obtained from
_overrideRole. If oldRole is None, then the 'role' key/value
pair will be deleted from args.
"""
if oldRole:
args['role'] = oldRole
else:
del args['role']
def generateContents(self, contents, **args):
return []
def generate(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the complete presentatin for the
object. The presentatin to be generated depends highly upon the
formatting strings in formatting.py.
args is a dictionary that may contain any of the following:
- alreadyFocused: if True, we're getting an object
that previously had focus
- priorObj: if set, represents the object that had focus before
this object
- includeContext: boolean (default=True) which says whether
the context for an object should be included as a prefix
and suffix
- role: a role to override the object's role
- formatType: the type of formatting, such as
'focused', 'basicWhereAmI', etc.
- forceMnemonic: boolean (default=False) which says if we
should ignore the settings.enableMnemonicSpeaking setting
- forceTutorial: boolean (default=False) which says if we
should force a tutorial to be spoken or not
"""
startTime = time.time()
result = []
globalsDict = {}
self._addGlobals(globalsDict)
globalsDict['obj'] = obj
try:
globalsDict['role'] = args.get('role', obj.getRole())
except:
msg = 'ERROR: Cannot generate presentation for: %s. Aborting' % obj
debug.println(debug.LEVEL_INFO, msg, True)
return result
try:
# We sometimes want to override the role. We'll keep the
# role in the args dictionary as a means to let us do so.
#
args['role'] = globalsDict['role']
# We loop through the format string, catching each error
# as we go. Each error should always be a NameError,
# where the name is the name of one of our generator
# functions. When we encounter this, we call the function
# and get its results, placing them in the globals for the
# the call to eval.
#
args['mode'] = self._mode
if not args.get('formatType', None):
if args.get('alreadyFocused', False):
args['formatType'] = 'focused'
else:
args['formatType'] = 'unfocused'
formatting = self._script.formatting.getFormat(**args)
# Add in the context if this is the first time
# we've been called.
#
if not args.get('recursing', False):
if args.get('includeContext', True):
prefix = self._script.formatting.getPrefix(**args)
suffix = self._script.formatting.getSuffix(**args)
formatting = '%s + %s + %s' % (prefix, formatting, suffix)
args['recursing'] = True
firstTimeCalled = True
else:
firstTimeCalled = False
msg = '%s GENERATOR: Starting generation for %s' % (self._mode.upper(), obj)
debug.println(debug.LEVEL_INFO, msg, True)
assert(formatting)
while True:
currentTime = time.time()
try:
result = eval(formatting, globalsDict)
break
except NameError:
result = []
info = _formatExceptionInfo()
arg = info[1][0]
arg = arg.replace("name '", "")
arg = arg.replace("' is not defined", "")
if arg not in self._methodsDict:
debug.printException(debug.LEVEL_SEVERE)
break
globalsDict[arg] = self._methodsDict[arg](obj, **args)
duration = "%.4f" % (time.time() - currentTime)
debug.println(debug.LEVEL_ALL,
" GENERATION TIME: %s ----> %s=%s" \
% (duration, arg, repr(globalsDict[arg])))
except:
debug.printException(debug.LEVEL_SEVERE)
result = []
duration = "%.4f" % (time.time() - startTime)
debug.println(debug.LEVEL_ALL, " COMPLETION TIME: %s" % duration)
debug.println(debug.LEVEL_ALL, "%s GENERATOR: Results:" % self._mode.upper(), True)
for element in result:
debug.println(debug.LEVEL_ALL, " %s" % element)
return result
#####################################################################
# #
# Name, role, and label information #
# #
#####################################################################
def _generateRoleName(self, obj, **args):
"""Returns the role name for the object in an array of strings, with
the exception that the pyatspi.ROLE_UNKNOWN role will yield an
empty array. Note that a 'role' attribute in args will
override the accessible role of the obj.
"""
# Subclasses must override this.
return []
def _generateName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the name of the object. If the object is directly
displaying any text, that text will be treated as the name.
Otherwise, the accessible name of the object will be used. If
there is no accessible name, then the description of the
object will be used. This method will return an empty array
if nothing can be found. [[[WDW - I wonder if we should just
have _generateName, _generateDescription,
_generateDisplayedText, etc., that don't do any fallback.
Then, we can allow the formatting to do the fallback (e.g.,
'displayedText or name or description'). [[[JD to WDW - I
needed a _generateDescription for whereAmI. :-) See below.
"""
result = []
name = self._script.utilities.displayedText(obj)
if obj.getRole() == pyatspi.ROLE_COMBO_BOX:
children = self._script.utilities.selectedChildren(obj)
if not children and obj.childCount:
children = self._script.utilities.selectedChildren(obj[0])
children = children or [child for child in obj]
names = map(self._script.utilities.displayedText, children)
names = list(filter(lambda x: x, names))
if len(names) == 1:
name = names[0].strip()
elif len(children) == 1 and children[0].name:
name = children[0].name.strip()
elif not names and obj.name:
name = obj.name
if name:
result.append(name)
else:
try:
description = obj.description
except (LookupError, RuntimeError):
return result
if description:
result.append(description)
else:
link = None
if obj.getRole() == pyatspi.ROLE_LINK:
link = obj
elif obj.parent and obj.parent.getRole() == pyatspi.ROLE_LINK:
link = obj.parent
if link:
basename = self._script.utilities.linkBasename(link)
if basename:
result.append(basename)
# To make the unlabeled icons in gnome-panel more accessible.
try:
role = args.get('role', obj.getRole())
except (LookupError, RuntimeError):
return result
if not result and obj.getRole() == pyatspi.ROLE_ICON \
and obj.parent.getRole() == pyatspi.ROLE_PANEL:
return self._generateName(obj.parent)
return result
def _generatePlaceholderText(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the 'placeholder' text. This is typically text that
serves as a functional label and is found in a text widget until
that widget is given focus at which point the text is removed,
the assumption being that the user was able to see the text prior
to giving the widget focus.
"""
result = [x for x in obj.getAttributes() if x.startswith('placeholder-text:')]
return [x.replace('placeholder-text:', '') for x in result]
def _generateLabelAndName(self, obj, **args):
"""Returns the label and the name as an array of strings for speech
and braille. The name will only be present if the name is
different from the label.
"""
result = []
label = self._generateLabel(obj, **args)
name = self._generateName(obj, **args)
result.extend(label)
if not len(label):
result.extend(name)
elif len(name) and name[0].strip() != label[0].strip():
result.extend(name)
return result
def _generateLabelOrName(self, obj, **args):
"""Returns the label as an array of strings for speech and braille.
If the label cannot be found, the name will be used instead.
If the name cannot be found, an empty array will be returned.
"""
result = []
result.extend(self._generateLabel(obj, **args))
if not result:
if obj.name and (len(obj.name)):
result.append(obj.name)
return result
def _generateDescription(self, obj, **args):
"""Returns an array of strings fo use by speech and braille that
represent the description of the object, if that description
is different from that of the name and label.
"""
result = []
if obj.description:
label = self._script.utilities.displayedLabel(obj) or ""
name = obj.name or ""
desc = obj.description.lower()
if not (desc in name.lower() or desc in label.lower()):
result.append(obj.description)
return result
def _generateLabel(self, obj, **args):
"""Returns the label for an object as an array of strings for use by
speech and braille. The label is determined by the displayedLabel
method of the script utility, and an empty array will be returned if
no label can be found.
"""
result = []
label = self._script.utilities.displayedLabel(obj)
if label:
result.append(label)
return result
#####################################################################
# #
# Image information #
# #
#####################################################################
def _generateImageDescription(self, obj, **args ):
"""Returns an array of strings for use by speech and braille that
represent the description of the image on the object, if it
exists. Otherwise, an empty array is returned.
"""
result = []
try:
image = obj.queryImage()
except NotImplementedError:
pass
else:
description = image.imageDescription
if description and len(description):
result.append(description)
return result
#####################################################################
# #
# State information #
# #
#####################################################################
def _generateClickable(self, obj, **args):
return []
def _generateHasLongDesc(self, obj, **args):
return []
def _generateAvailability(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the grayed/sensitivity/availability state of the
object, but only if it is insensitive (i.e., grayed out and
inactive). Otherwise, and empty array will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'insensitive'
if not obj.getState().contains(pyatspi.STATE_SENSITIVE):
result.append(self._script.formatting.getString(**args))
return result
def _generateRequired(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the required state of the object, but only if it is
required (i.e., it is in a dialog requesting input and the
user must give it a value). Otherwise, and empty array will
be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'required'
if obj.getState().contains(pyatspi.STATE_REQUIRED) \
or (obj.getRole() == pyatspi.ROLE_RADIO_BUTTON \
and obj.parent.getState().contains(pyatspi.STATE_REQUIRED)):
result.append(self._script.formatting.getString(**args))
return result
def _generateReadOnly(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the read only state of this object, but only if it
is read only (i.e., it is a text area that cannot be edited).
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'readonly'
if self._script.utilities.isReadOnlyTextArea(obj):
result.append(self._script.formatting.getString(**args))
return result
def _generateCellCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes that are in a table. An empty array will be
returned if this is not a checkable cell.
"""
result = []
if self._script.utilities.hasMeaningfulToggleAction(obj):
oldRole = self._overrideRole(pyatspi.ROLE_CHECK_BOX, args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
return result
def _generateCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'checkbox'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_CHECKED):
result.append(indicators[1])
elif state.contains(pyatspi.STATE_INDETERMINATE):
result.append(indicators[2])
else:
result.append(indicators[0])
return result
def _generateRadioState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'radiobutton'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_CHECKED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
def _generateChildWidget(self, obj, **args):
widgetRoles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_TOGGLE_BUTTON]
isWidget = lambda x: x and x.getRole() in widgetRoles
# For GtkListBox, such as those found in the control center
if obj.parent and obj.parent.getRole() == pyatspi.ROLE_LIST_BOX:
widget = pyatspi.findDescendant(obj, isWidget)
if widget:
return self.generate(widget, includeContext=False)
return []
def _generateToggleState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'togglebutton'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_CHECKED) \
or state.contains(pyatspi.STATE_PRESSED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
def _generateMenuItemCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the menu item, only if it is
checked. Otherwise, and empty array will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'checkbox'
indicators = self._script.formatting.getString(**args)
if obj.getState().contains(pyatspi.STATE_CHECKED):
result.append(indicators[1])
return result
def _generateExpandableState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the expanded/collapsed state of an object, such as a
tree node. If the object is not expandable, an empty array
will be returned.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'expansion'
indicators = self._script.formatting.getString(**args)
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
if state.contains(pyatspi.STATE_EXPANDED):
result.append(indicators[1])
else:
result.append(indicators[0])
return result
#####################################################################
# #
# Table interface information #
# #
#####################################################################
def _generateRowHeader(self, obj, **args):
"""Returns an array of strings to be used in speech and braille that
represent the row header for an object that is in a table, if
it exists. Otherwise, an empty array is returned.
"""
result = []
header = self._script.utilities.rowHeaderForCell(obj)
if not header:
return result
text = self._script.utilities.displayedText(header)
if not text:
return result
roleString = self.getLocalizedRoleName(obj, pyatspi.ROLE_ROW_HEADER)
if args.get('mode') == 'speech':
if settings.speechVerbosityLevel == settings.VERBOSITY_LEVEL_VERBOSE \
and not args.get('formatType') in ['basicWhereAmI', 'detailedWhereAmI']:
text = "%s %s" % (text, roleString)
elif args.get('mode') == 'braille':
text = "%s %s" % (text, roleString)
result.append(text)
return result
def _generateColumnHeader(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the column header for an object
that is in a table, if it exists. Otherwise, an empty array
is returned.
"""
result = []
header = self._script.utilities.columnHeaderForCell(obj)
if not header:
return result
text = self._script.utilities.displayedText(header)
if not text:
return result
roleString = self.getLocalizedRoleName(obj, pyatspi.ROLE_COLUMN_HEADER)
if args.get('mode') == 'speech':
if settings.speechVerbosityLevel == settings.VERBOSITY_LEVEL_VERBOSE \
and not args.get('formatType') in ['basicWhereAmI', 'detailedWhereAmI']:
text = "%s %s" % (text, roleString)
elif args.get('mode') == 'braille':
text = "%s %s" % (text, roleString)
result.append(text)
return result
def _generateTableCell2ChildLabel(self, obj, **args):
"""Returns an array of strings for use by speech and braille for the
label of a toggle in a table cell that has a special 2 child
pattern that we run into. Otherwise, an empty array is
returned.
"""
result = []
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [False, False]
for i, child in enumerate(obj):
if self._script.utilities.hasMeaningfulToggleAction(child):
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
if not hasToggle[i]:
result.extend(self.generate(obj[i], **args))
return result
def _generateTableCell2ChildToggle(self, obj, **args):
"""Returns an array of strings for use by speech and braille for the
toggle value of a toggle in a table cell that has a special 2
child pattern that we run into. Otherwise, an empty array is
returned.
"""
result = []
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [False, False]
for i, child in enumerate(obj):
if self._script.utilities.hasMeaningfulToggleAction(child):
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
if hasToggle[i]:
result.extend(self.generate(obj[i], **args))
return result
def _generateColumnHeaderIfToggleAndNoText(self, obj, **args):
"""If this table cell has a "toggle" action, and doesn't have any
label associated with it then also speak the table column
header. See Orca bug #455230 for more details.
"""
# If we're reading just a single cell in speech, the new
# header portion is going to give us this information.
#
if args['mode'] == 'speech' and not args.get('readingRow', False):
return []
result = []
descendant = self._script.utilities.realActiveDescendant(obj)
label = self._script.utilities.displayedText(descendant)
if not label and self._script.utilities.hasMeaningfulToggleAction(obj):
accHeader = self._script.utilities.columnHeaderForCell(obj)
result.append(accHeader.name)
return result
def _generateRealTableCell(self, obj, **args):
"""Orca has a feature to automatically read an entire row of a table
as the user arrows up/down the roles. This leads to
complexity in the code. This method is used to return an
array of strings for use by speech and braille for a single
table cell itself. The string, 'blank', is added for empty
cells.
"""
result = []
oldRole = self._overrideRole('REAL_ROLE_TABLE_CELL', args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
return result
def _generateTable(self, obj, **args):
"""Returns an array of strings for use by speech and braille to present
the size of a table."""
if self._script.utilities.isLayoutOnly(obj):
return []
try:
table = obj.queryTable()
except:
return []
return [messages.tableSize(table.nRows, table.nColumns)]
def _generateTableCellRow(self, obj, **args):
"""Orca has a feature to automatically read an entire row of a table
as the user arrows up/down the roles. This leads to complexity in
the code. This method is used to return an array of strings
(and possibly voice and audio specifications) for an entire row
in a table if that's what the user has requested and if the row
has changed. Otherwise, it will return an array for just the
current cell.
"""
result = []
try:
parentTable = obj.parent.queryTable()
except:
parentTable = None
isDetailedWhereAmI = args.get('formatType', None) == 'detailedWhereAmI'
readFullRow = self._script.utilities.shouldReadFullRow(obj)
if (readFullRow or isDetailedWhereAmI) and parentTable \
and (not self._script.utilities.isLayoutOnly(obj.parent)):
parent = obj.parent
index = self._script.utilities.cellIndex(obj)
row = parentTable.getRowAtIndex(index)
column = parentTable.getColumnAtIndex(index)
# This is an indication of whether we should speak all the
# table cells (the user has moved focus up or down a row),
# or just the current one (focus has moved left or right in
# the same row).
#
presentAll = True
if isDetailedWhereAmI:
if parentTable.nColumns <= 1:
return result
elif "lastRow" in self._script.pointOfReference \
and "lastColumn" in self._script.pointOfReference:
pointOfReference = self._script.pointOfReference
presentAll = \
(self._mode == 'braille') \
or \
((pointOfReference["lastRow"] != row) \
or ((row == 0 or row == parentTable.nRows-1) \
and pointOfReference["lastColumn"] == column))
if presentAll:
args['readingRow'] = True
if self._script.utilities.isTableRow(obj):
cells = [x for x in obj]
else:
cells = [parentTable.getAccessibleAt(row, i) \
for i in range(parentTable.nColumns)]
for cell in cells:
if not cell:
continue
state = cell.getState()
showing = state.contains(pyatspi.STATE_SHOWING)
if showing:
cellResult = self._generateRealTableCell(cell, **args)
if cellResult and result and self._mode == 'braille':
result.append(braille.Region(
object_properties.TABLE_CELL_DELIMITER_BRAILLE))
result.extend(cellResult)
else:
result.extend(self._generateRealTableCell(obj, **args))
else:
result.extend(self._generateRealTableCell(obj, **args))
return result
#####################################################################
# #
# Text interface information #
# #
#####################################################################
def _generateExpandedEOCs(self, obj, **args):
"""Returns the expanded embedded object characters for an object."""
return []
def _generateSubstring(self, obj, **args):
start = args.get('startOffset')
end = args.get('endOffset')
if start is None or end is None:
return []
substring = self._script.utilities.substring(obj, start, end)
if substring and not self._script.EMBEDDED_OBJECT_CHARACTER in substring:
return [substring]
return []
def _generateStartOffset(self, obj, **args):
return args.get('startOffset')
def _generateEndOffset(self, obj, **args):
return args.get('endOffset')
def _generateCurrentLineText(self, obj, **args ):
"""Returns an array of strings for use by speech and braille
that represents the current line of text, if
this is a text object. [[[WDW - consider returning an empty
array if this is not a text object.]]]
"""
result = self._generateSubstring(obj, **args)
if result:
return result
[text, caretOffset, startOffset] = self._script.getTextLineAtCaret(obj)
if text and not self._script.EMBEDDED_OBJECT_CHARACTER in text:
return [text]
return []
def _generateDisplayedText(self, obj, **args ):
"""Returns an array of strings for use by speech and braille that
represents all the text being displayed by the object.
"""
result = self._generateSubstring(obj, **args)
if result:
return result
displayedText = self._script.utilities.displayedText(obj)
if not displayedText:
return []
return [displayedText]
#####################################################################
# #
# Tree interface information #
# #
#####################################################################
def _generateNodeLevel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the tree node level of the object, or an empty
array if the object is not a tree node.
"""
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'nodelevel'
level = self._script.utilities.nodeLevel(obj)
if level >= 0:
result.append(self._script.formatting.getString(**args)\
% (level + 1))
return result
#####################################################################
# #
# Value interface information #
# #
#####################################################################
def _generateValue(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the value of the object. This is typically the
numerical value, but may also be the text of the 'value'
attribute if it exists on the object. [[[WDW - we should
consider returning an empty array if there is no value.
"""
return [self._script.utilities.textForValue(obj)]
#####################################################################
# #
# Hierarchy and related dialog information #
# #
#####################################################################
def _generateApplicationName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the name of the applicaton for the object.
"""
result = []
try:
result.append(obj.getApplication().name)
except:
pass
return result
def _generateNestingLevel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the nesting level of an object in a list.
"""
start = args.get('startOffset')
end = args.get('endOffset')
if start is not None and end is not None:
return []
result = []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'nestinglevel'
nestingLevel = self._script.utilities.nestingLevel(obj)
if nestingLevel:
result.append(self._script.formatting.getString(**args)\
% nestingLevel)
return result
def _generateRadioButtonGroup(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the radio button group label for the object, or an
empty array if the object has no such label.
"""
result = []
try:
role = obj.getRole()
except:
role = None
if role == pyatspi.ROLE_RADIO_BUTTON:
radioGroupLabel = None
relations = obj.getRelationSet()
for relation in relations:
if (not radioGroupLabel) \
and (relation.getRelationType() \
== pyatspi.RELATION_LABELLED_BY):
radioGroupLabel = relation.getTarget(0)
break
if radioGroupLabel:
result.append(self._script.utilities.\
displayedText(radioGroupLabel))
else:
parent = obj.parent
while parent and (parent.parent != parent):
if parent.getRole() in [pyatspi.ROLE_PANEL,
pyatspi.ROLE_FILLER]:
label = self._generateLabelAndName(parent)
if label:
result.extend(label)
break
parent = parent.parent
return result
def _generateRealActiveDescendantDisplayedText(self, obj, **args ):
"""Objects, such as tables and trees, can represent individual cells
via a complicated nested hierarchy. This method returns an
array of strings for use by speech and braille that represents
the text actually being painted in the cell, if it can be
found. Otherwise, an empty array is returned.
"""
result = []
rad = self._script.utilities.realActiveDescendant(obj)
return self._generateDisplayedText(rad, **args)
def _generateRealActiveDescendantRoleName(self, obj, **args ):
"""Objects, such as tables and trees, can represent individual cells
via a complicated nested hierarchy. This method returns an
array of strings for use by speech and braille that represents
the role of the object actually being painted in the cell.
"""
rad = self._script.utilities.realActiveDescendant(obj)
args['role'] = rad.getRole()
return self._generateRoleName(rad, **args)
def _generateNamedContainingPanel(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represents the nearest ancestor of an object which is a named panel.
"""
result = []
parent = obj.parent
while parent and (parent.parent != parent):
if parent.getRole() == pyatspi.ROLE_PANEL:
label = self._generateLabelAndName(parent)
if label:
result.extend(label)
break
parent = parent.parent
return result
def _generatePageSummary(self, obj, **args):
return []
def _generateProgressBarIndex(self, obj, **args):
return []
def _generateProgressBarValue(self, obj, **args):
return []
| lgpl-2.1 | -4,279,859,017,480,769,000 | 40.78785 | 176 | 0.542461 | false |
peterheim1/robbie | bin/speech_text.py | 1 | 2005 | #!/usr/bin/env python
# Author: Derek Green
PKG = 'pocketsphinx'
#import roslib; roslib.load_manifest(PKG)
import rospy
import re
import os
from std_msgs.msg import String
from subprocess import Popen, PIPE
class SpeechText():
def __init__(self):
self.pub = rospy.Publisher('speech_text', String)
rospy.init_node('speech_text_node', anonymous=True)
path = rospy.get_param("/speech_text/lm_path")
# the language model directory found at path should have a .dic and a .lm, grab them:
lm = None
dic = None
for filename in os.listdir(path):
if re.match(".*\.dic", filename):
dic = filename
if re.match(".*\.lm", filename):
lm = filename
if lm and dic:
args = ["pocketsphinx_continuous", "-hmm", "/usr/share/pocketsphinx/model/hmm/wsj1", "-lm", path + lm, "-dict", path + dic]
self.ps = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
rospy.on_shutdown( self.clean_up )
else:
print "ERROR: pocketsphinx is missing language model file. dic = " + dic + ", lm = " + lm
def speech_to_text(self):
print "ENTERING SPEECH_TEXT"
while not rospy.is_shutdown():
line = self.ps.stdout.readline()
if re.match("READY.*",line):
print "======= pocket sphinx is ready ======="
heard = re.match("\d{9}[:](.*)( [(]-\d*[)])",line)
if heard:
out = heard.group(1).lower().strip()
print "JUST HEARD: \"" + out + "\""
rospy.loginfo(out)
self.pub.publish(out)
def clean_up(self):
print "=============================== speech_txt is shutting down. Killing pocketsphinx process #", self.ps.pid
self.ps.kill()
if __name__ == '__main__':
try:
st = SpeechText()
st.speech_to_text()
rospy.spin()
except rospy.ROSInterruptException:
pass
| gpl-3.0 | 1,194,413,395,588,688,100 | 32.983051 | 135 | 0.54015 | false |
noppanit/sweepy | sweepy.py | 1 | 3267 | #!/usr/bin/env python
import pymongo
import tweepy
from pymongo import MongoClient
from sweepy.get_config import get_config
config = get_config()
consumer_key = config.get('PROCESS_TWITTER_CONSUMER_KEY')
consumer_secret = config.get('PROCESS_TWITTER_CONSUMER_SECRET')
access_token = config.get('PROCESS_TWITTER_ACCESS_TOKEN')
access_token_secret = config.get('PROCESS_TWITTER_ACCESS_TOKEN_SECRET')
MONGO_URL = config.get('MONGO_URL')
MONGO_PORT = config.get('MONGO_PORT')
MONGO_USERNAME = config.get('MONGO_USERNAME')
MONGO_PASSWORD = config.get('MONGO_PASSWORD')
MONGO_DATABASE = config.get('MONGO_DATABASE')
client = MongoClient(MONGO_URL, int(MONGO_PORT))
print 'Establishing Tweepy connection'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, retry_count=3)
db = client[MONGO_DATABASE]
db.authenticate(MONGO_USERNAME, MONGO_PASSWORD)
raw_tweets = db.raw_tweets
users = db.users
def is_user_in_db(screen_name):
return get_user_from_db(screen_name) is None
def get_user_from_db(screen_name):
return users.find_one({'screen_name' : screen_name})
def get_user_from_twitter(user_id):
return api.get_user(user_id)
def get_followers(screen_name):
users = []
for i, page in enumerate(tweepy.Cursor(api.followers, id=screen_name, count=200).pages()):
print 'Getting page {} for followers'.format(i)
users += page
return users
def get_friends(screen_name):
users = []
for i, page in enumerate(tweepy.Cursor(api.friends, id=screen_name, count=200).pages()):
print 'Getting page {} for friends'.format(i)
users += page
return users
def get_followers_ids(screen_name):
ids = []
try:
for i, page in enumerate(tweepy.Cursor(api.followers_ids, id=screen_name, count=5000).pages()):
print 'Getting page {} for followers ids'.format(i)
ids += page
except tweepy.error.TweepError as e:
print e.message
return ids
def get_friends_ids(screen_name):
ids = []
try:
for i, page in enumerate(tweepy.Cursor(api.friends_ids, id=screen_name, count=5000).pages()):
print 'Getting page {} for friends ids'.format(i)
ids += page
except tweepy.error.TweepError as e:
print e.message
return ids
def process_user(user):
screen_name = user['screen_name']
print 'Processing user : {}'.format(screen_name)
if is_user_in_db(screen_name):
user['followers_ids'] = get_followers_ids(screen_name)
user['friends_ids'] = get_friends_ids(screen_name)
users.insert_one(user)
else:
print '{} exists!'.format(screen_name)
print 'End processing user : {}'.format(screen_name)
if __name__ == "__main__":
for doc in raw_tweets.find({'processed' : {'$exists': False}}):
print 'Start processing'
try:
process_user(doc['user'])
except KeyError:
pass
try:
process_user(doc['retweeted_status']['user'])
except KeyError:
pass
raw_tweets.update_one({'_id': doc['_id']}, {'$set':{'processed':True}})
| mit | -7,822,002,205,192,745,000 | 28.7 | 103 | 0.654729 | false |
kaajavi/ninformes | escolar/test_dev.py | 1 | 3482 | from django.shortcuts import render_to_response
from django.template import RequestContext
import os
from django.conf import settings
from django.template import Context
from django.template.loader import get_template
from xhtml2pdf import pisa #INSTALAR ESTA LIBRERIA
from django.templatetags.static import static
from django.http import HttpResponseRedirect, HttpResponse
from escolar.models import Docente, Curso, Alumno, MatriculaAlumnado, Campo, MatriculaDocentes, SITUACION_DOCENTE, TIPO_MATRICULA_DOCENTE, ItemCampo, DescripcionCampo
FILE_LIST = settings.BASE_DIR+'/test.pdf'
# Convert HTML URIs to absolute system paths so xhtml2pdf can access those resources
def link_callback(uri, rel):
# use short variable names
sUrl = settings.STATIC_URL # Typically /static/
sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/
mUrl = settings.MEDIA_URL # Typically /static/media/
mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/BASE_DIR
# convert URIs to absolute system paths
if uri.startswith(mUrl):
path = os.path.join(mRoot, uri.replace(mUrl, ""))
elif uri.startswith(sUrl):
path = os.path.join(sRoot, uri.replace(sUrl, ""))
# make sure that file exists
if not os.path.isfile(path):
raise Exception(
'media URI must start with %s or %s' % \
(sUrl, mUrl))
return path
def view_create_principal(request):
context = RequestContext(request)
return render_to_response('testing/test_create_principal.html', {},context)
###para cfk
from django import forms
from ckeditor.widgets import CKEditorWidget
class ExampleCFKForm(forms.Form):
content = forms.CharField(widget=CKEditorWidget())
def test_cfkeditor(request):
context = RequestContext(request)
form = ExampleCFKForm()
return render_to_response('testing/test_cfkeditor.html', {'form':form},context)
def test_generar_informe_matricula(request):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="informe_test.pdf"'
from escolar.default_data.images_base64 import LOGO_PROVINCIAL
# Prepare context
matricula = MatriculaAlumnado.objects.get(pk=112)
descrCampo = DescripcionCampo.objects.filter(matricula_alumno=matricula, semestre=1, campo__especial=False)
descrCampoInstitucionales = DescripcionCampo.objects.filter(matricula_alumno=matricula, semestre=1, campo__especial=True)
data = {'etapa':1,
'matricula':matricula,
'descrCampo':descrCampo,
'descrCampoInstitucionales':descrCampoInstitucionales,
'logo_provincial':LOGO_PROVINCIAL
}
# Render html content through html template with context
template = get_template('informe/_informe.html')
html = template.render(Context(data))
# Write PDF to file
file = open(FILE_LIST, "w+b")
pisaStatus = pisa.CreatePDF(html, dest=file,
link_callback = link_callback)
# Return PDF document through a Django HTTP response
file.seek(0)
pdf = file.read()
file.close()
response.write(pdf)
# Don't forget to close the file handle
#BORRO EL ARCHIVO
if os.path.exists(FILE_LIST):
try:
os.remove(FILE_LIST)
except OSError, e:
pass
return response | gpl-2.0 | 624,910,965,018,021,500 | 34.907216 | 166 | 0.688685 | false |
cloudnull/genastack_roles | genastack_roles/nova_api_os_compute/__init__.py | 1 | 1103 | # =============================================================================
# Copyright [2013] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
BUILD_DATA = {
'nova_api_os_compute': {
'help': 'Install nova OS Compute API from upstream',
'required': [
'nova'
],
'init_script': [
{
'help': 'Start and stop nova on boot',
'init_path': '/etc/init.d',
'name': 'nova',
'chuid': 'nova',
'chdir': '/var/lib/nova',
'options': '--'
' --config-file=/etc/nova/nova.conf',
'program': 'nova-api-os-compute'
}
]
}
}
| gpl-3.0 | -5,563,318,845,062,327,000 | 35.766667 | 79 | 0.446056 | false |
svenfraeys/sftoolbox | sftoolboxmaya/widgets.py | 1 | 1480 | import sftoolboxmaya.utils
from sftoolboxqt import qtgui
from sftoolboxqt.widgets import ProjectWidget
class MayaProjectWidget(sftoolboxmaya.utils.DialogWidget):
"""toolbox widget
"""
def _wrapped_set_window_title(self, func):
"""wrap for the set window title to keep it synced
"""
def wrapped_func(text):
self.setWindowTitle(text)
func(text)
return wrapped_func
def __init__(self, project=None, parent=None):
"""settings and context are given for the init
"""
super(MayaProjectWidget, self).__init__()
layout = qtgui.QVBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self._toolbox_widget = ProjectWidget(project)
layout.addWidget(self._toolbox_widget)
self.setWindowTitle(self._toolbox_widget.windowTitle())
# wrap the set window title so we keep it in sync
self._toolbox_widget.setWindowTitle = self._wrapped_set_window_title(
self._toolbox_widget.setWindowTitle)
@property
def project(self):
return self._toolbox_widget.project
@project.setter
def project(self, value):
self._toolbox_widget.project = value
@property
def active_panel(self):
return self._toolbox_widget.active_panel
@active_panel.setter
def active_panel(self, value):
self._toolbox_widget.active_panel = value
| mit | 6,410,005,747,240,740,000 | 28.6 | 77 | 0.648649 | false |
RyanofRIT/DjangoWebsite | HealthNet/HealthNet/urls.py | 1 | 2087 | from django.conf.urls import include, url, patterns
from django.contrib import admin
from HealthApp import views
"""
The urlpatterns is how we map the site urls to specific views in the views.py. The first part is
a regular expression to describe the url pattern, followed by the view that should be called.
Lastly, a name is given to each pattern so that they can be referenced from elsewhere in the code.
For example, when an HTTPResponseRedirect(reverse('login')) is returned in one of the views, it
is doing a reverse lookup of the url pattern named 'login' and returning the view (and subsequently
the html page) associated with the view.
There are a couple patterns that are a bit unique. The first is the url for the admin page which
links to the built in url network already created by django. The other unique urls are the ones
that deal with patient information since the urls are specific to the patient, and the username in
the url needs to be passed into the view as a parameter. The format of (?P<username>\w+) is used
to first identify that information is being captured, and to identify what parameter it is being passed
in as (in this case, the username parameter).
Note: the first url is used to redirect users to the login page when at the 'root' url of the site.
"""
urlpatterns = [
url(r'^$', views.userLogin, name='login'),
url(r'^login/$', views.userLogin, name='login'),
url(r'^register/$', views.register, name='register'),
url(r'^(?P<username>\w+)/profile/$', views.profile, name='profile'),
url(r'^(?P<username>\w+)/staffProfile/$', views.staffProfile, name='staffProfile'),
url(r'^(?P<username>\w+)/staffProfile/(?P<patient>\w+)$', views.updateUser, name='updateUser'),
url(r'^logout/$', views.userLogout, name='logout'),
url(r'^admin/', include(admin.site.urls)),
url(r'^profileEdit/$', views.profileEdit, name='profileEdit'),
url(r'^createAppForm/', views.createApp, name='createAppForm'),
url(r'^deleteAppForm/(\d+)$', views.deleteApp, name='deleteAppForm'),
url(r'^export/$', views.export, name='export')
] | gpl-2.0 | 9,009,979,145,393,319,000 | 62.272727 | 103 | 0.729756 | false |
LEAMgroup/leam.stress | leam/stress/interfaces/stressanalysis.py | 1 | 1042 | from zope.interface import Interface
# -*- Additional Imports Here -*-
from zope import schema
from leam.stress import stressMessageFactory as _
class IStressAnalysis(Interface):
"""Frontend to the LEAM Stress Analysis Model"""
# -*- schema definition goes here -*-
layer = schema.Object(
title=_(u"GIS Layer"),
required=True,
description=_(u"A GIS layer with the environmentally sensitive areas."),
schema=Interface, # specify the interface(s) of the addable types here
)
#
scenario = schema.Object(
title=_(u"LUC Scenario"),
required=True,
description=_(u"An existing LUC Scenario with it's associated probability maps."),
schema=Interface, # specify the interface(s) of the addable types here
)
#
section = schema.Object(
title=_(u"Section Map"),
required=False,
description=_(u"Section layer used to split the sensative layer."),
schema=Interface, # specify the interface(s) of the addable types here
)
#
| gpl-2.0 | -7,602,434,538,923,923,000 | 30.575758 | 90 | 0.658349 | false |
dereulenspiegel/spotimc | resources/libs/spotimcgui/views/__init__.py | 1 | 7854 | '''
Copyright 2011 Mikel Azkolain
This file is part of Spotimc.
Spotimc is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Spotimc is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Spotimc. If not, see <http://www.gnu.org/licenses/>.
'''
import xbmc
import xbmcgui
import weakref
from inspect import isfunction
def iif(cond, on_true, on_false):
if cond:
if not isfunction(on_true):
return on_true
else:
return on_true()
else:
if not isfunction(on_false):
return on_false
else:
return on_false()
class ViewManager:
__window = None
__view_list = None
__position = None
__vars = None
def __init__(self, window):
self.__window = weakref.proxy(window)
self.__view_list = []
self.__position = -1
self.__vars = {}
def num_views(self):
return len(self.__view_list)
def position(self):
return self.__position
def has_next(self):
return(
self.num_views() > 0
and self.position() < self.num_views() - 1
)
def _show_view(self, view):
view.show(self)
container_id = view.get_container_id()
if container_id is not None:
xbmc.executebuiltin("Control.SetFocus(%d)" % container_id)
def next(self):
#Fail if no next window
if not self.has_next():
raise IndexError("No more views available")
#If there's one active
if self.__position != -1:
self.__view_list[self.__position].hide(self)
#Show the next one
self.__position += 1
self._show_view(self.__view_list[self.__position])
def has_previous(self):
return self.__position > 0
def previous(self):
#Fail if no previous window
if not self.has_previous():
raise IndexError("No previous views available")
#Hide current
self.__view_list[self.__position].hide(self)
#Show previous
self.__position -= 1
self._show_view(self.__view_list[self.__position])
def add_view(self, view):
#Remove all views that come next (if any)
del self.__view_list[self.__position+1:]
#Add the new one
self.__view_list.append(view)
#Go to the next view
self.next()
def click(self, control_id):
self.__view_list[self.__position].click(self, control_id)
def show(self, give_focus=True):
self.__view_list[self.__position].show(self, give_focus)
def clear_views(self):
#Check at least if a view is visible
if self.__position != -1:
#Hide current
self.__view_list[self.__position].hide(self)
#Delete all views
self.__view_list = []
#And reset the position counter
self.__position = -1
def set_var(self, name, value):
self.__vars[name] = value
def get_var(self, name):
return self.__vars[name]
def get_window(self):
return self.__window
class BaseView:
__is_visible = None
def __init__(self):
self.__is_visible = False
def is_visible(self):
return self.__is_visible
def click(self, view_manager, control_id):
pass
def show(self, view_manager, give_focus=True):
self.__is_visible = True
def hide(self, view_manager):
self.__is_visible = False
def back(self, view_manager):
pass
def get_container_id(self):
pass
class BaseContainerView(BaseView):
def render(self, view_manager):
"""Tell the view to render it's content.
The view should return True if the content was rendered successfully,
and False if data was not still available.
"""
raise NotImplementedError()
def get_container(self, view_manager):
raise NotImplementedError()
def show(self, view_manager, give_focus=True):
BaseView.show(self, view_manager, give_focus)
#Hide container and show loading anim.
self.get_container(view_manager).setVisibleCondition('false')
view_manager.get_window().show_loading()
if self.render(view_manager):
#Hide loading and show container
view_manager.get_window().hide_loading()
self.get_container(view_manager).setVisibleCondition('true')
#And give focus if asked to do so
if give_focus:
view_manager.get_window().setFocus(
self.get_container(view_manager)
)
def hide(self, view_manager):
BaseView.hide(self, view_manager)
#Just hide the container
self.get_container(view_manager).setVisibleCondition('false')
class BaseListContainerView(BaseContainerView):
__list_position = None
def get_list(self, view_manager):
raise NotImplementedError()
def show(self, view_manager, give_focus=True):
BaseView.show(self, view_manager, give_focus)
window = view_manager.get_window()
#Hide container and show loading anim.
self.get_container(view_manager).setVisibleCondition('false')
window.show_loading()
if self.render(view_manager):
#If we have a stored list position
if self.__list_position is not None:
self.get_list(view_manager).selectItem(self.__list_position)
#Not list position? Set it on the start
else:
self.get_list(view_manager).selectItem(0)
#List was rendered but with no items, add a placeholder
if self.get_list(view_manager).size() == 0:
window.setProperty('ListWithNoItems', 'true')
item = xbmcgui.ListItem()
item.setProperty('NoItems', 'true')
self.get_list(view_manager).addItem(item)
else:
window.setProperty('ListWithNoItems', 'false')
#Hide loading and show container
window.hide_loading()
self.get_container(view_manager).setVisibleCondition('true')
#And give focus if asked to do so
if give_focus:
view_manager.get_window().setFocus(
self.get_container(view_manager)
)
def hide(self, view_manager):
BaseView.hide(self, view_manager)
#Keep the list position
list_obj = self.get_list(view_manager)
self.__list_position = list_obj.getSelectedPosition()
#And call the container stuff
BaseContainerView.hide(self, view_manager)
| gpl-3.0 | 4,706,419,649,641,739,000 | 25.950178 | 77 | 0.540998 | false |
meeza/PythonWork | SearchEngines/crawler.py | 1 | 1625 | import urllib
def get_page(url):
try:
return urllib.urlopen(url).read()
except:
return ""
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
def crawl_web(seed): # returns index, graph of inlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
| mit | -1,297,772,012,444,380,000 | 22.214286 | 54 | 0.548308 | false |
xthirtyfive/gamemod | log.py | 1 | 1077 | # Copyright 2013 X35
#
# This file is part of gamemod.
#
# gamemod is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gamemod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gamemod. If not, see <http:#www.gnu.org/licenses/>.
import config
class log:
f = None
@staticmethod
def out(msg):
if config.LOG_PRINT: print(msg)
if not log.f: log.openfile()
if log.f:
log.f.write(msg+"\n")
log.f.flush()
@staticmethod
def openfile():
if not config.LOG_FILE: return
log.f = open(config.LOG_FILE, "a")
@staticmethod
def closefile():
if not log.f: return
log.f.close()
| gpl-3.0 | 8,196,184,236,019,420,000 | 26.615385 | 73 | 0.694522 | false |
jwhitlock/kuma | kuma/users/signal_handlers.py | 1 | 4279 | from allauth.account.signals import email_confirmed, user_signed_up
from allauth.socialaccount.signals import social_account_removed
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from waffle import switch_is_active
from kuma.core.urlresolvers import reverse
from kuma.wiki.jobs import DocumentContributorsJob
from .jobs import UserGravatarURLJob
from .models import User, UserBan
from .tasks import send_welcome_email
@receiver(post_save, sender=User, dispatch_uid='users.user.post_save')
def on_user_save(sender, instance, created, **kwargs):
"""
A signal handler to be called after saving a user.
Invalidates the cache for the given user's gravatar URL.
"""
job = UserGravatarURLJob()
if instance.email:
handler = job.invalidate
elif instance.email is None:
handler = job.delete
else:
return
# do the heavy-lifting for all avatar sizes
for size in settings.AVATAR_SIZES:
handler(instance.email, size=size)
@receiver(user_signed_up, dispatch_uid='users.user_signed_up')
def on_user_signed_up(sender, request, user, **kwargs):
"""
Signal handler to be called when a given user has signed up.
"""
url = reverse('wiki.document', args=['MDN/Getting_started'])
msg = _('You have completed the first step of '
'<a href="%s">getting started with MDN</a>') % url
messages.success(request, msg)
if switch_is_active('welcome_email'):
# only send if the user has already verified
# at least one email address
if user.emailaddress_set.filter(verified=True).exists():
send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)
@receiver(email_confirmed, dispatch_uid='users.email_confirmed')
def on_email_confirmed(sender, request, email_address, **kwargs):
"""
Signal handler to be called when a given email address was confirmed
by a user.
"""
if switch_is_active('welcome_email'):
# only send if the user has exactly one verified (the given)
# email address, in other words if it was just confirmed
user = email_address.user
previous_emails = user.emailaddress_set.exclude(pk=email_address.pk)
if not previous_emails.exists():
send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)
@receiver(social_account_removed, dispatch_uid='users.social_account_removed')
def on_social_account_removed(sender, request, socialaccount, **kwargs):
"""
Invoked just after a user successfully removed a social account
We use it to reset the name of the socialaccount provider in
the user's session to one that he also has.
"""
user = socialaccount.user
try:
all_socialaccounts = user.socialaccount_set.all()
next_socialaccount = all_socialaccounts[0]
request.session['sociallogin_provider'] = next_socialaccount.provider
request.session.modified = True
except (ObjectDoesNotExist, IndexError):
pass
@receiver(post_save, sender=UserBan, dispatch_uid='users.user_ban.save')
def on_ban_save(sender, instance, **kwargs):
"""
Signal handler to be called when a given user ban is saved.
"""
user = instance.user
user.is_active = not instance.is_active
user.save()
invalidate_document_contribution(user)
@receiver(post_delete, sender=UserBan, dispatch_uid='users.user_ban.delete')
def on_ban_delete(sender, instance, **kwargs):
"""
Signal handler to be called when a user ban is deleted.
"""
user = instance.user
user.is_active = True
user.save()
invalidate_document_contribution(user)
def invalidate_document_contribution(user):
"""
Invalidate the contributor list for Documents the user has edited.
This will remove them if they have been banned, and add them if they
have been unbanned.
"""
revisions = user.created_revisions
doc_ids = set(revisions.values_list('document_id', flat=True))
job = DocumentContributorsJob()
for doc_id in doc_ids:
job.invalidate(doc_id)
| mpl-2.0 | -3,534,320,353,739,426,300 | 34.658333 | 78 | 0.703903 | false |
Samcbehrens/VisualAnalytics | dataProcessing/USTimeline.py | 1 | 2413 | import csv
import datetime
import json
import calendar
from timeline3 import convertToFile
def convertTime(dateAsString):
MillisecNum=''
conv =''
if len(dateAsString)>4:
conv = datetime.datetime.strptime(dateAsString, '%m/%d/%Y')
MillisecNum = calendar.timegm(conv.timetuple())
else:
numberAsInt = int(dateAsString)
d = datetime.datetime(numberAsInt,1,1)
MillisecNum = calendar.timegm(d.timetuple())
MillisecNum = MillisecNum *1000
return MillisecNum
def readCsv():
allInformation = []
with open('usTimeline.csv', 'rb') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(), delimiters=',')
csvfile.seek(0)
reader=csv.reader(csvfile,dialect)
for line in reader:
print line
allInformation.append(line)
return allInformation
def reformat(allInformation):
newFormation =[]
for i in range(0, len(allInformation)):
## get index out of range if you dont check this first
if i+1 < len(allInformation)-1:
##look ahead to see if the next one doesnt have a date
if allInformation[i+1][0]=='':
allInformation[i+1][0]=allInformation[i][0]
#add if it has the correct date
thisPotYear = allInformation[i][0]
if thisPotYear.isdigit():
newFormation.append(allInformation[i])
return newFormation
def webToJson(soup):
## formatting to turn into correct json
colors = ["red","orange", "yellow", "green", "blue"]
timeline = {"label": "usTimeline", "times": []}
addEvent={"color":"blue", "description":"description", "starting_time": 1}
## Must be in a certain format have to put in a array and then a set...crying
outerMost = []
print soup
for n in soup:
print n
print type(n)
if n[1] != '':
print n[1]
millis = convertTime(n[1])
addEvent["starting_time"] = millis
if n[0].isdigit():
millis = convertTime(n[0])
addEvent["starting_time"] = millis
addEvent["description"] = n[2]
if addEvent["description"]!="description" and addEvent["starting_time"]!=1:
addEvent["color"]='orange'
print 'addingEvent'
print addEvent
timeline["times"].append(addEvent)
addEvent={"color":"blue", "description":"description", "starting_time": 1}
outerMost.append(timeline)
return outerMost
if __name__ == '__main__':
allInformation = readCsv()
newFormation = reformat(allInformation)
finalFormation = webToJson(newFormation)
convertToFile('usTimeline.json',finalFormation)
| bsd-3-clause | 5,171,126,104,977,085,000 | 23.13 | 79 | 0.688355 | false |
okey/aduana | example/example/settings.py | 2 | 1573 | #--------------------------------------------------------------------------
# Scrapy Settings
#--------------------------------------------------------------------------
BOT_NAME = 'example'
SPIDER_MODULES = ['example.spiders']
NEWSPIDER_MODULE = 'example.spiders'
HTTPCACHE_ENABLED = True
REDIRECT_ENABLED = True
COOKIES_ENABLED = False
DOWNLOAD_TIMEOUT = 10
RETRY_ENABLED = False
AJAXCRAWL_ENABLED = True
CONCURRENT_REQUESTS = 256
CONCURRENT_REQUESTS_PER_DOMAIN = 2
LOGSTATS_INTERVAL = 10
SPIDER_MIDDLEWARES = {}
DOWNLOADER_MIDDLEWARES = {}
#--------------------------------------------------------------------------
# Frontier Settings
#--------------------------------------------------------------------------
SPIDER_MIDDLEWARES.update(
{'frontera.contrib.scrapy.middlewares.schedulers.SchedulerSpiderMiddleware': 999},
)
DOWNLOADER_MIDDLEWARES.update(
{'frontera.contrib.scrapy.middlewares.schedulers.SchedulerDownloaderMiddleware': 999}
)
SCHEDULER = 'frontera.contrib.scrapy.schedulers.frontier.FronteraScheduler'
FRONTERA_SETTINGS = 'example.frontera.settings'
#--------------------------------------------------------------------------
# Seed loaders
#--------------------------------------------------------------------------
SPIDER_MIDDLEWARES.update({
'frontera.contrib.scrapy.middlewares.seeds.file.FileSeedLoader': 1,
})
SEEDS_SOURCE = 'seeds.txt'
#--------------------------------------------------------------------------
# Testing
#--------------------------------------------------------------------------
#CLOSESPIDER_PAGECOUNT = 1
| bsd-3-clause | 5,058,943,456,257,775,000 | 31.770833 | 89 | 0.478067 | false |
sapcc/monasca-notification | monasca_notification/plugins/jira_notifier.py | 1 | 8896 | # (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urlparse
import jira
import yaml
from jinja2 import Template
from monasca_notification.monitoring import client
from monasca_notification.monitoring.metrics import NOTIFICATION_SEND_TIMER
from monasca_notification.plugins.abstract_notifier import AbstractNotifier
"""
Note:
This plugin doesn't support multi tenancy. Multi tenancy requires support for
multiple JIRA server url. JIRA doesn't support OAUTH2 tokens, we may need to get
the user credentials in query params and store them in monasca DB which we don't want to do.
That is the reason for not supporting true multitenancy.
MultiTenancy can be achieved by creating issues in different project for different tenant on
the same JIRA server.
notification.address = https://<jira_url>/?project=<project_name>
Dependency for Jira
1) Jira plugin requires Jira library. Consumers need to install
JIRA via pip
2) (i.e) pip install jira
Jira Configuration
1) jira:
username: username
password: password
Sample notification:
monasca notification-create MyIssuer JIRA https://jira.hpcloud.net/?project=MyProject
monasca notification-create MyIssuer1 JIRA https://jira.hpcloud.net/?project=MyProject&
component=MyComponent
"""
STATSD_CLIENT = client.get_client()
STATSD_TIMER = STATSD_CLIENT.get_timer()
class JiraNotifier(AbstractNotifier):
_search_query = search_query = "project={} and reporter='{}' and summary ~ '{}'"
def __init__(self, log):
super(JiraNotifier, self).__init__("jira")
self._log = log
self.jira_fields_format = None
def config(self, config_dict):
super(JiraNotifier, self).config(config_dict)
if not config_dict.get("user") and not config_dict.get("password"):
message = "Missing user and password settings in JIRA plugin configuration"
self._log.exception(message)
raise Exception(message)
self.jira_fields_format = self._get_jira_custom_format_fields()
def _get_jira_custom_format_fields(self):
jira_fields_format = None
if (not self.jira_fields_format and self._config.get("custom_formatter")):
try:
with open(self._config.get("custom_formatter")) as f:
jira_fields_format = yaml.safe_load(f)
except Exception:
self._log.exception("Unable to read custom_formatter file. Check file location")
raise
# Remove the top element
jira_fields_format = jira_fields_format["jira_format"]
return jira_fields_format
def _build_custom_jira_message(self, notification, jira_fields_format):
jira_fields = {}
# Templatize the message object
jira_field_summary_field = jira_fields_format.get("summary", None)
if jira_field_summary_field:
template = Template(jira_field_summary_field)
jira_fields["summary"] = template.render(notification=notification)
jira_field_comments_field = jira_fields_format.get("comments", None)
if jira_field_comments_field:
template = Template(jira_field_comments_field)
jira_fields["comments"] = template.render(notification=notification)
jira_field_description_field = jira_fields_format.get("description", None)
if jira_field_description_field:
template = Template(jira_field_description_field)
jira_fields["description"] = template.render(notification=notification)
return jira_fields
def _build_default_jira_message(self, notification):
"""Builds jira message body
"""
body = {'alarm_id': notification.alarm_id,
'alarm_definition_id': notification.raw_alarm['alarmDefinitionId'],
'alarm_name': notification.alarm_name,
'alarm_description': notification.raw_alarm['alarmDescription'],
'alarm_timestamp': notification.alarm_timestamp,
'state': notification.state,
'old_state': notification.raw_alarm['oldState'],
'message': notification.message,
'tenant_id': notification.tenant_id,
'metrics': notification.metrics}
jira_fields = {}
summary_format_string = "Monasca alarm for alarm_defintion {0} status changed to {1} for the alarm_id {2}"
jira_fields["summary"] = summary_format_string.format(notification.alarm_name,
notification.state,
notification.alarm_id)
jira_fields["comments"] = "{code}%s{code}" % (json.dumps(body, indent=3))
return jira_fields
def _build_jira_message(self, notification):
if self._config.get("custom_formatter"):
return self._build_custom_jira_message(notification, self.jira_fields_format)
return self._build_default_jira_message(notification)
@STATSD_TIMER.timed(NOTIFICATION_SEND_TIMER, dimensions={'notification_type': 'pagerduty'})
def send_notification(self, notification):
"""Creates or Updates an issue in Jira
"""
jira_fields = self._build_jira_message(notification)
parsed_url = urlparse.urlsplit(notification.address)
query_params = urlparse.parse_qs(parsed_url.query)
# URL without query params
url = urlparse.urljoin(notification.address, urlparse.urlparse(notification.address).path)
jira_fields["project"] = query_params["project"][0]
if query_params.get("component"):
jira_fields["component"] = query_params["component"][0]
auth = (self._config["user"], self._config["password"])
proxyDict = None
if (self._config.get("proxy")):
proxyDict = {"https": self._config.get("proxy")}
try:
jira_obj = jira.JIRA(url, basic_auth=auth, proxies=proxyDict)
self.jira_workflow(jira_fields, jira_obj, notification)
except Exception:
self._log.exception("Error creating issue in Jira at URL {}".format(url))
return False
return True
def jira_workflow(self, jira_fields, jira_obj, notification):
"""How does Jira plugin work?
1) Check whether the issue with same description exists?
2) If issue exists, and if it is closed state, open it
3) if the issue doesn't exist, then create the issue
4) Add current alarm details in comments
"""
issue_dict = {'project': {'key': jira_fields["project"]},
'summary': jira_fields["summary"],
'description': 'Monasca alaram',
'issuetype': {'name': 'Bug'}, }
# If the JIRA workflow is created with mandatory components
if jira_fields.get("component"):
issue_dict["components"] = [{"name": jira_fields.get("component")}]
search_term = self._search_query.format(issue_dict["project"]["key"],
self._config["user"], notification.alarm_id)
issue_list = jira_obj.search_issues(search_term)
if not issue_list:
self._log.debug("Creating an issue with the data {}".format(issue_dict))
issue = jira_obj.create_issue(fields=issue_dict)
else:
issue = issue_list[0]
self._log.debug("Found an existing issue {} for this notification".format(issue))
current_state = issue.fields.status.name
if current_state.lower() in ["resolved", "closed"]:
# Open the the issue
transitions = jira_obj.transitions(issue)
allowed_transistions = [(t['id'], t['name']) for t in transitions if "reopen" in t['name'].lower()]
if allowed_transistions:
# Reopen the issue
jira_obj.transition_issue(issue, allowed_transistions[0][0])
jira_comment_message = jira_fields.get("comments")
if jira_comment_message:
jira_obj.add_comment(issue, jira_comment_message)
| apache-2.0 | -2,488,763,707,057,027,000 | 40.962264 | 115 | 0.630283 | false |
DavidAndreev/indico | indico/MaKaC/webinterface/wcomponents.py | 1 | 44542 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import os
import exceptions
import urllib
import pkg_resources
from flask import session
from lxml import etree
from pytz import timezone
from speaklater import _LazyString
from datetime import timedelta
from xml.sax.saxutils import escape, quoteattr
from MaKaC.i18n import _
from MaKaC.common import info
from MaKaC.webinterface import urlHandlers
from MaKaC.common.url import URL
from indico.core.config import Config
from MaKaC.conference import Conference
from MaKaC.common.timezoneUtils import DisplayTZ
from MaKaC.common import utils
from MaKaC.errors import MaKaCError
from MaKaC.common.ContextHelp import ContextHelp
from MaKaC.common.contextManager import ContextManager
import MaKaC.common.TemplateExec as templateEngine
from indico.core import signals
from indico.core.db import DBMgr, db
from indico.modules.api import APIMode
from indico.modules.api import settings as api_settings
from indico.modules.events.layout import layout_settings, theme_settings
from indico.modules.legal import legal_settings
from indico.util.i18n import i18nformat, get_current_locale, get_all_locales
from indico.util.date_time import format_date
from indico.util.signals import values_from_signal
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import url_for
from indico.web.menu import HeaderMenuEntry
MIN_PRESENT_EVENTS = 6
OPTIMAL_PRESENT_EVENTS = 10
class WTemplated():
"""This class provides a basic implementation of a web component (an
object which generates HTML related to a certain feature or
functionality) which relies in a template file for generating the
HTML it's in charge of.
By templating file we mean that there will be a file in the file
system (uniquely identified) which will contain HTML code plus some
"variables" (dynamic values). The class will take care of opening
this file, parsing the HTML and replacing the variables by the
corresponding values.
"""
tplId = None
@classmethod
def forModule(cls, module, *args):
tplobj = cls(*args)
tplobj._for_module = module
return tplobj
def __init__(self, tpl_name=None):
if tpl_name is not None:
self.tplId = tpl_name
self._rh = ContextManager.get('currentRH', None)
def _getSpecificTPL(self, dir, tplId, extension="tpl"):
"""
Checks if there is a defined set of specific templates (i.e. CERN),
and if there is a specific file for this page, for this template set.
Returns the file that should be used.
"""
if DBMgr.getInstance().isConnected():
template = info.HelperMaKaCInfo.getMaKaCInfoInstance().getDefaultTemplateSet()
if template != None :
specTpl = "%s.%s.%s" % (tplId, template, extension)
if os.path.exists(os.path.join(dir,specTpl)):
return specTpl
return "%s.%s" % (tplId, extension)
def _setTPLFile(self):
"""Sets the TPL (template) file for the object. It will try to get
from the configuration if there's a special TPL file for it and
if not it will look for a file called as the class name+".tpl"
in the configured TPL directory.
"""
cfg = Config.getInstance()
#file = cfg.getTPLFile(self.tplId)
# because MANY classes skip the constructor...
tplDir = cfg.getTPLDir()
if hasattr(self, '_for_module') and self._for_module:
self.tplFile = pkg_resources.resource_filename(self._for_module.__name__,
'tpls/{0}.tpl'.format(self.tplId))
else:
self.tplFile = self._getSpecificTPL(tplDir, self.tplId)
hfile = self._getSpecificTPL(os.path.join(tplDir,'chelp'),
self.tplId,
extension='wohl')
self.helpFile = os.path.join('chelp', hfile)
def getVars( self ):
"""Returns a dictionary containing the TPL variables that will
be passed at the TPL formating time. For this class, it will
return the configuration user defined variables.
Classes inheriting from this one will have to take care of adding
their variables to the ones returned by this method.
"""
self._rh = ContextManager.get('currentRH', None)
cfg = Config.getInstance()
vars = cfg.getTPLVars()
for paramName in self.__params:
vars[ paramName ] = self.__params[ paramName ]
return vars
def getHTML( self, params=None ):
"""Returns the HTML resulting of formating the text contained in
the corresponding TPL file with the variables returned by the
getVars method.
Params:
params -- additional paramters received from the caller
"""
self._rh = ContextManager.get('currentRH', None)
if self.tplId == None:
self.tplId = self.__class__.__name__[1:]
self._setTPLFile()
self.__params = {}
if params != None:
self.__params = params
# include context help info, if it exists
helpText = None
if os.path.exists(self.helpFile):
try:
fh = open( self.helpFile, "r")
helpText = fh.read()
fh.close()
except exceptions.IOError:
pass
vars = self.getVars()
vars['__rh__'] = self._rh
vars['self_'] = self
tempHTML = templateEngine.render(self.tplFile, vars, self)
if helpText == None:
return tempHTML
else:
try:
return ContextHelp().merge(self.tplId, tempHTML, helpText)
except etree.LxmlError, e:
if tempHTML.strip() == '':
raise MaKaCError(_("Template " + str(self.tplId) + " produced empty output, and it has a .wohl file. Error: " + str(e)))
else:
raise
@staticmethod
def htmlText(param):
if not param:
return ''
if not isinstance(param, (basestring, _LazyString)):
param = repr(param)
if isinstance(param, unicode):
param = param.encode('utf-8')
return escape(param)
@staticmethod
def textToHTML(param):
if param != "":
if param.lower().find("<br>") == -1 and param.lower().find("<p>") == -1 and param.lower().find("<li>") == -1 and param.lower().find("<table") == -1:
param=param.replace("\r\n", "<br>")
param=param.replace("\n","<br>")
return param
return " "
def _escapeChars(self, text):
# Does nothing right now - it used to replace % with %% for the old-style templates
return text
class WHTMLHeader(WTemplated):
def __init__(self, tpl_name=None):
WTemplated.__init__(self)
class WHeader(WTemplated):
"""Templating web component for generating a common HTML header for
the web interface.
"""
def __init__(self, aw, locTZ="", isFrontPage=False, currentCategory=None, tpl_name=None, prot_obj=None):
WTemplated.__init__(self, tpl_name=tpl_name)
self._currentuser = aw.getUser()
self._locTZ = locTZ
self._aw = aw
self._isFrontPage = isFrontPage
self.__currentCategory = currentCategory
# The object for which to show the protection indicator
self._prot_obj = prot_obj
"""
Returns timezone string that is show to the user.
"""
def _getTimezoneDisplay( self, timezone ):
if timezone == 'LOCAL':
if self._locTZ:
return self._locTZ
else:
return Config.getInstance().getDefaultTimezone()
else:
return timezone
def _get_protection_new(self, obj):
if not obj.is_protected:
return ['Public', _('Public')]
else:
networks = [x.name for x in obj.get_access_list() if x.is_network]
if networks:
return ['DomainProtected', _('{} network only').format('/'.join(networks))]
else:
return ["Restricted", _("Restricted")]
def _getProtection(self, target):
"""
Return a list with the status (Public, Protected, Restricted)
and extra info (domain list).
"""
if isinstance(target, Conference):
return self._get_protection_new(target.as_event)
elif isinstance(target, db.m.Category):
return self._get_protection_new(target)
else:
raise TypeError('Unexpected object: {}'.format(target))
def getVars( self ):
vars = WTemplated.getVars(self)
vars["currentUser"] = self._currentuser
config = Config.getInstance()
imgLogin = config.getSystemIconURL("login")
vars["imgLogin"] = imgLogin
vars["isFrontPage"] = self._isFrontPage
vars["currentCategory"] = self.__currentCategory
vars['prot_obj'] = self._prot_obj
current_locale = get_current_locale()
vars["ActiveTimezone"] = session.timezone
"""
Get the timezone for displaying on top of the page.
1. If the user has "LOCAL" timezone then show the timezone
of the event/category. If that's not possible just show the
standard timezone.
2. If the user has a custom timezone display that one.
"""
vars["ActiveTimezoneDisplay"] = self._getTimezoneDisplay(vars["ActiveTimezone"])
vars["SelectedLanguage"] = str(current_locale)
vars["SelectedLanguageName"] = current_locale.language_name
vars["Languages"] = get_all_locales()
if DBMgr.getInstance().isConnected():
vars["title"] = info.HelperMaKaCInfo.getMaKaCInfoInstance().getTitle()
vars["organization"] = info.HelperMaKaCInfo.getMaKaCInfoInstance().getOrganisation()
else:
vars["title"] = "Indico"
vars["organization"] = ""
vars['roomBooking'] = Config.getInstance().getIsRoomBookingActive()
vars['protectionDisclaimerProtected'] = legal_settings.get('network_protected_disclaimer')
vars['protectionDisclaimerRestricted'] = legal_settings.get('restricted_disclaimer')
#Build a list of items for the administration menu
adminItemList = []
if session.user and session.user.is_admin:
adminItemList.append({'id': 'serverAdmin', 'url': urlHandlers.UHAdminArea.getURL(),
'text': _("Server admin")})
vars["adminItemList"] = adminItemList
vars['extra_items'] = HeaderMenuEntry.group(values_from_signal(signals.indico_menu.send()))
vars["getProtection"] = self._getProtection
vars["show_contact"] = config.getPublicSupportEmail() is not None
return vars
class WConferenceHeader(WHeader):
"""Templating web component for generating the HTML header for
the conferences' web interface.
"""
def __init__(self, aw, conf):
self._conf = conf
self._aw = aw
WHeader.__init__(self, self._aw, prot_obj=self._conf, tpl_name='EventHeader')
tzUtil = DisplayTZ(self._aw,self._conf)
self._locTZ = tzUtil.getDisplayTZ()
def getVars( self ):
from indico.web.http_api.util import generate_public_auth_request
vars = WHeader.getVars( self )
vars["categurl"] = self._conf.as_event.category.url
vars["conf"] = vars["target"] = self._conf
vars["imgLogo"] = Config.getInstance().getSystemIconURL("miniLogo")
vars["MaKaCHomeURL"] = self._conf.as_event.category.url
# Default values to avoid NameError while executing the template
styles = theme_settings.get_themes_for("conference")
vars["viewoptions"] = [{'id': theme_id, 'name': data['title']}
for theme_id, data in sorted(styles.viewitems(), key=lambda x: x[1]['title'])]
vars["SelectedStyle"] = ""
vars["pdfURL"] = ""
vars["displayURL"] = str(urlHandlers.UHConferenceOtherViews.getURL(self._conf))
# Setting the buttons that will be displayed in the header menu
vars["showFilterButton"] = False
vars["showMoreButton"] = True
vars["showExportToICal"] = True
vars["showExportToPDF"] = False
vars["showDLMaterial"] = True
vars["showLayout"] = True
vars["displayNavigationBar"] = layout_settings.get(self._conf, 'show_nav_bar')
# This is basically the same WICalExportBase, but we need some extra
# logic in order to have the detailed URLs
apiMode = api_settings.get('security_mode')
vars["icsIconURL"] = str(Config.getInstance().getSystemIconURL("ical_grey"))
vars["apiMode"] = apiMode
vars["signingEnabled"] = apiMode in {APIMode.SIGNED, APIMode.ONLYKEY_SIGNED, APIMode.ALL_SIGNED}
vars["persistentAllowed"] = api_settings.get('allow_persistent')
user = self._aw.getUser()
apiKey = user.api_key if user else None
topURLs = generate_public_auth_request(apiKey, '/export/event/%s.ics' % self._conf.getId())
urls = generate_public_auth_request(apiKey, '/export/event/%s.ics' % self._conf.getId(),
{'detail': 'contributions'})
vars["requestURLs"] = {
'publicRequestURL': topURLs["publicRequestURL"],
'authRequestURL': topURLs["authRequestURL"],
'publicRequestDetailedURL': urls["publicRequestURL"],
'authRequestDetailedURL': urls["authRequestURL"]
}
vars["persistentUserEnabled"] = apiKey.is_persistent_allowed if apiKey else False
vars["apiActive"] = apiKey is not None
vars["userLogged"] = user is not None
tpl = get_template_module('api/_messages.html')
vars['apiKeyUserAgreement'] = tpl.get_ical_api_key_msg()
vars['apiPersistentUserAgreement'] = tpl.get_ical_persistent_msg()
return vars
class WMenuConferenceHeader( WConferenceHeader ):
"""Templating web component for generating the HTML header for
the conferences' web interface with a menu
"""
def __init__(self, aw, conf):
self._conf = conf
self._aw=aw
WConferenceHeader.__init__(self, self._aw, conf)
def getVars( self ):
vars = WConferenceHeader.getVars( self )
vars["categurl"] = self._conf.as_event.category.url
# Dates Menu
tz = DisplayTZ(self._aw,self._conf,useServerTZ=1).getDisplayTZ()
sdate = self._conf.getStartDate().astimezone(timezone(tz))
edate = self._conf.getEndDate().astimezone(timezone(tz))
dates = []
if sdate.strftime("%Y-%m-%d") != edate.strftime("%Y-%m-%d"):
selected = ""
if vars.has_key("selectedDate"):
selectedDate = vars["selectedDate"]
if selectedDate == "all" or selectedDate == "":
selected = "selected"
else:
selectedDate = "all"
dates = [ i18nformat(""" <select name="showDate" onChange="document.forms[0].submit();" style="font-size:8pt;"><option value="all" %s>- - _("all days") - -</option> """)%selected]
while sdate.strftime("%Y-%m-%d") <= edate.strftime("%Y-%m-%d"):
selected = ""
if selectedDate == sdate.strftime("%d-%B-%Y"):
selected = "selected"
d = sdate.strftime("%d-%B-%Y")
dates.append(""" <option value="%s" %s>%s</option> """%(d, selected, d))
sdate = sdate + timedelta(days=1)
dates.append("</select>")
else:
dates.append("""<input type="hidden" name="showDate" value="all">""")
# Sessions Menu
sessions = []
if len(self._conf.getSessionList()) != 0:
selected = ""
if vars.has_key("selectedSession"):
selectedSession = vars["selectedSession"]
if selectedSession == "all" or selectedSession == "":
selected = "selected"
else:
selectedSession = "all"
sessions = [ i18nformat(""" <select name="showSession" onChange="document.forms[0].submit();" style="font-size:8pt;"><option value="all" %s>- - _("all sessions") - -</option> """)%selected]
for session in self._conf.getSessionList():
selected = ""
sid = session.friendly_id
if sid == selectedSession:
selected = "selected"
sessions.append(""" <option value="%s" %s>%s</option> """ % (sid, selected, session.title))
sessions.append("</select>")
else:
sessions.append("""<input type="hidden" name="showSession" value="all">""")
# Handle hide/show contributions option
hideContributions = None;
if len(self._conf.getSessionList()) != 0:
if vars.has_key("detailLevel"):
if vars["detailLevel"] == "session":
hideContributions = "checked"
else:
hideContributions = ""
# Save to session
vars["hideContributions"] = hideContributions
urlCustPrint = urlHandlers.UHConferenceOtherViews.getURL(self._conf)
urlCustPrint.addParam("showDate", vars.get("selectedDate") or "all")
urlCustPrint.addParam("showSession", vars.get("selectedSession") or "all")
urlCustPrint.addParam("fr", "no")
urlCustPrint.addParam("view", vars["currentView"])
vars["printURL"]=str(urlCustPrint)
vars["printIMG"] = quoteattr(str(Config.getInstance().getSystemIconURL("printer")))
vars["pdfURL"] = quoteattr(url_for('timetable.export_pdf', self._conf))
vars["pdfIMG"] = quoteattr(str(Config.getInstance().getSystemIconURL("pdf")))
vars["zipIMG"] = quoteattr(str(Config.getInstance().getSystemIconURL("smallzip")))
return vars
class WMenuMeetingHeader( WConferenceHeader ):
"""Templating web component for generating the HTML header for
the meetings web interface with a menu
"""
def __init__(self, aw, conf):
self._conf = conf
self._aw=aw
WHeader.__init__(self, self._aw, prot_obj=self._conf, tpl_name='EventHeader')
tzUtil = DisplayTZ(self._aw,self._conf)
self._locTZ = tzUtil.getDisplayTZ()
def getVars( self ):
vars = WConferenceHeader.getVars( self )
vars["categurl"] = self._conf.as_event.category.url
view_options = [{'id': tid, 'name': data['title']} for tid, data in
sorted(theme_settings.get_themes_for(vars["type"]).viewitems(), key=lambda x: x[1]['title'])]
vars["viewoptions"] = view_options
vars["SelectedStyle"] = theme_settings.themes[vars['currentView']]['title']
vars["displayURL"] = urlHandlers.UHConferenceDisplay.getURL(self._rh._conf)
# Setting the buttons that will be displayed in the header menu
vars["showFilterButton"] = True
vars["showExportToPDF"] = True
vars["showDLMaterial"] = True
vars["showLayout"] = True
# Dates Menu
tz = DisplayTZ(self._aw,self._conf,useServerTZ=1).getDisplayTZ()
sdate = self._conf.getStartDate().astimezone(timezone(tz))
edate = self._conf.getEndDate().astimezone(timezone(tz))
selected = ""
if vars.has_key("selectedDate"):
selectedDate = vars["selectedDate"]
if selectedDate == "all" or selectedDate == "":
selected = "selected"
else:
selectedDate = "all"
dates = [ i18nformat(""" <option value="all" %s>- - _("all days") - -</option> """)%selected]
while sdate.date() <= edate.date():
iso_date = sdate.date().isoformat()
selected = 'selected' if selectedDate == iso_date else ''
dates.append('<option value="{}" {}>{}</option>'.format(iso_date, selected, format_date(sdate)))
sdate = sdate + timedelta(days=1)
vars["datesMenu"] = "".join(dates);
# Sessions Menu
selected = ""
if vars.has_key("selectedSession"):
selectedSession = vars["selectedSession"]
if selectedSession == "all" or selectedSession == "":
selected = "selected"
else:
selectedSession = "all"
sessions = [ i18nformat(""" <option value="all" %s>- - _("all sessions") - -</option> """)%selected]
for session_ in self._conf.as_event.sessions:
selected = "selected" if unicode(session_.friendly_id) == selectedSession else ''
title = session_.title
if len(title) > 60:
title = title[0:40] + u"..."
sessions.append(""" <option value="%s" %s>%s</option> """ % (session_.friendly_id, selected,
title.encode('utf-8')))
vars["sessionsMenu"] = "".join(sessions)
# Handle hide/show contributions option
hideContributions = None;
if len(self._conf.getSessionList()) != 0:
if vars.has_key("detailLevel"):
if vars["detailLevel"] == "session":
hideContributions = "checked"
else:
hideContributions = ""
vars["hideContributions"] = hideContributions
urlCustPrint = urlHandlers.UHConferenceOtherViews.getURL(self._conf)
urlCustPrint.addParam("showDate", vars.get("selectedDate") or "all")
urlCustPrint.addParam("showSession", vars.get("selectedSession") or "all")
urlCustPrint.addParam("detailLevel", vars.get("detailLevel") or "all")
urlCustPrint.addParam("fr", "no")
urlCustPrint.addParam("view", vars["currentView"])
vars["printURL"]=str(urlCustPrint)
vars["pdfURL"] = url_for('timetable.export_pdf', self._conf)
return vars
class WMenuSimpleEventHeader( WMenuMeetingHeader ):
"""Templating web component for generating the HTML header for
the simple event' web interface with a menu
"""
def getVars( self ):
vars = WMenuMeetingHeader.getVars( self )
# Setting the buttons that will be displayed in the header menu
vars["showFilterButton"] = False
vars["showExportToPDF"] = False
vars["accessWrapper"] = self._aw
return vars
class WFooter(WTemplated):
"""Templating web component for generating a common HTML footer for the
web interface.
"""
def __init__(self, tpl_name = None, isFrontPage = False):
WTemplated.__init__(self, tpl_name)
self._isFrontPage = isFrontPage
def getVars( self ):
from MaKaC.webinterface.rh.conferenceModif import RHConferenceModifBase
vars = WTemplated.getVars(self)
vars["isFrontPage"] = self._isFrontPage
event = getattr(self._rh, '_conf', None)
vars['is_meeting'] = event and event.getType() == 'meeting' and not isinstance(self._rh, RHConferenceModifBase)
if not vars.has_key("modificationDate"):
vars["modificationDate"] = ""
if not vars.has_key("shortURL"):
vars["shortURL"] = ""
return vars
class WEventFooter(WFooter):
"""
Specialization of WFooter that provides extra info for events
"""
def __init__(self, conf, tpl_name = None, isFrontPage = False):
WFooter.__init__(self, tpl_name, isFrontPage)
self._conf = conf
self._event = conf.as_event
def _gCalDateFormat(self, dtime):
return dtime.strftime("%Y%m%dT%H%M%SZ")
def getVars(self):
v = WFooter.getVars(self)
cid = self._conf.getUrlTag().strip() or self._conf.getId()
location = self._event.venue_name
if self._event.room_name:
location = u'{} ({})'.format(self._event.room_name, location)
description = self._conf.getDescription()
if len(description) > 1000:
description = description[:997] + "..."
if description:
description += '\n\n'
description += Config.getInstance().getShortEventURL() + cid
v['gc_params'] = urllib.urlencode({
'action': 'TEMPLATE',
'text': self._conf.getTitle(),
'dates': "%s/%s" % (self._gCalDateFormat(self._conf.getStartDate()),
self._gCalDateFormat(self._conf.getEndDate())),
'details': description,
'location': location.encode('utf-8'),
'trp': False,
'sprop': [str(urlHandlers.UHConferenceDisplay.getURL(self._conf)),
'name:indico']
})
minfo = info.HelperMaKaCInfo.getMaKaCInfoInstance()
app_data = minfo.getSocialAppConfig()
v["shortURL"] = Config.getInstance().getShortEventURL() + cid
v["app_data"] = app_data
v["showSocial"] = app_data.get('active', False) and layout_settings.get(self._conf, 'show_social_badges')
v['conf'] = self._conf
return v
class WNavigationDrawer(WTemplated):
def __init__( self, pars, bgColor = None, type = None):
self._target = pars["target"]
self._isModif = pars.get("isModif", False)
self._track = pars.get("track", None) #for abstracts viewed inside a track
self._bgColor = bgColor
self._actionType = type #type of action
def getVars( self ):
vars = WTemplated.getVars( self )
vars["target"] = self._target
vars["isModif"]= self._isModif
vars["track"]= self._track
vars["bgColor"] = self._bgColor
vars["actionType"] = self._actionType
return vars
def getHTML(self, params=None):
return WTemplated.getHTML(self, params)
class WSimpleNavigationDrawer(WTemplated):
def __init__( self, title, handler = None, bgColor = None, **pars ):
self._urlHandler = handler
self._pars = pars
self._title = title
self._bgColor = bgColor
def getVars( self ):
vars = WTemplated.getVars( self )
vars["urlHandler"] = self._urlHandler
vars["title"] = self._title
vars["pars"] = self._pars
vars["bgColor"] = self._bgColor
return vars
def getHTML(self, params=None):
return WTemplated.getHTML(self, params)
class WBannerModif(WTemplated):
def __init__(self, path = [], itemType = "", title = ""):
WTemplated.__init__( self, "BannerModif" )
self._path = path
self._title = title
self._type = itemType
def getHTML(self):
""" Retrieves the HTML of the banner of the modification interface
of the given target event / category / contribution / abstract / etc.
'track' argument should be provided for abstracts viewed inside a track.
If originUrl and originPageTitle is set then this link his added to the end
of the breadcrumb showed in the banner.
"""
return WTemplated.getHTML(self, {"type" : self._type, "path": self._path, "title": self._title})
class WListOfPapersToReview(WBannerModif):
def __init__(self, target, user ):
## PATH
# Iterate till conference is reached
conf = target.event_new.as_legacy
if user == "referee":
path = [{"url": urlHandlers.UHConfModifListContribToJudge.getURL(conf), "title":_("Contributions list")}]
if user == "reviewer":
path = [{"url": urlHandlers.UHConfModifListContribToJudgeAsReviewer.getURL(conf), "title":_("Contributions list")}]
if user == "editor":
path = [{"url": urlHandlers.UHConfModifListContribToJudgeAsEditor.getURL(conf), "title":_("Contributions list")}]
# TITLE AND TYPE
itemType = type(target).__name__
title = target.title
WBannerModif.__init__(self, path, itemType, title)
class WNotifTplBannerModif(WBannerModif):
def __init__( self, target ):
path = [{"url": urlHandlers.UHAbstractReviewingNotifTpl.getURL(target), "title":_("Notification template list")}]
itemType="Notification Template"
title=target.getName()
WBannerModif.__init__(self, path, itemType, title)
class WAbstractBannerModif(WBannerModif):
def __init__( self, target ):
path = [{"url": urlHandlers.UHConfAbstractManagment.getURL(target), "title":_("Abstracts list")}]
itemType="Abstract"
title=target.getTitle()
WBannerModif.__init__(self, path, itemType, title)
class WTrackBannerModif(WBannerModif):
def __init__( self, track, abstract=None, isManager = False ):
path = []
target = track
if abstract:
path.append({"url": urlHandlers.UHTrackModifAbstracts.getURL(track), "title":_("Abstract list")})
if isManager:
path.append({"url": urlHandlers.UHConfModifProgram.getURL(track.getConference()), "title":_("Track list")})
itemType=type(target).__name__
title=target.getTitle()
WBannerModif.__init__(self, path, itemType, title)
class WConferenceModifFrame(WTemplated):
def __init__( self, conference, aw,):
self.__conf = conference
self._aw = aw
def getHTML( self, body, **params ):
params["body"] = body
return WTemplated.getHTML( self, params )
def getVars( self ):
vars = WTemplated.getVars( self )
vars["conf"] = self.__conf
vars["startDate"] = utils.formatDateTime(self.__conf.getAdjustedStartDate(), format="d MMM")
vars["endDate"] = utils.formatDateTime(self.__conf.getAdjustedEndDate(), format="d MMM")
return vars
class WConfirmation(WTemplated):
def getHTML(self, message, postURL, passingArgs, loading=False, severity="warning", **opts):
params = {}
params["message"] = message
params["postURL"] = postURL
params["severity"] = severity
params["passingArgs"] = passingArgs
params["loading"] = loading
params["confirmButtonCaption"] = opts.get("confirmButtonCaption", _("Yes"))
params["cancelButtonCaption"] = opts.get("cancelButtonCaption", _("Cancel"))
params["systemIconWarning"] = Config.getInstance().getSystemIconURL("warning")
return WTemplated.getHTML(self, params)
class WClosed(WTemplated):
pass
class TabControl:
def __init__( self, parent=None, child=None ):
self._tabs = []
self._active = None
self._default = None
# Parent element (another tabcontrol),
# in case there is nesting
self._parent = parent
if parent != None:
parent.setChild(self);
self._child = child
def _addTab( self, tab ):
self._tabs.append( tab )
if len( self._tabs ) == 1:
self._default = tab
self._active = tab
def newTab( self, id, caption, url, hidden=False, className="" ):
tab = Tab( self, id, caption, url, hidden=hidden, className=className )
self._addTab( tab )
return tab
def setDefaultTab( self, tab ):
if tab in self._tabs:
self._default = tab
def getDefaultTab( self ):
return self._default
def setActiveTab( self, tab ):
if tab in self._tabs:
self._active = tab
def getActiveTab( self ):
return self._active
def getTabList( self ):
return self._tabs
def getTabById( self, id ):
for tab in self.getTabList():
if tab.getId() == id:
return tab
return None
def getParent( self ):
# retrieve parent TabControl
return self._parent
def setChild( self, child ):
self._child = child
def getChild( self ):
# retrieve child TabControl
return self._child
def getLevel( self ):
tmp = self.getParent()
level = 0
while tmp:
level += 1
tmp = tmp.getParent()
return level
class Tab:
def __init__( self, owner, id, caption, URL, hidden = False, className="" ):
self._owner = owner
self._id = id.strip()
self._caption = caption.strip()
self._url = URL
self._enabled = True
self._subtabControl=None
self._hidden = hidden
self._className = className
def __repr__(self):
return '<Tab(%s, %s, %s, %s)>' % (self._id, self._caption, self._url, int(self.isActive()))
def getId( self ):
return self._id
def getCaption( self ):
return self._caption
def setCaption( self, cp):
self._caption = cp
def getURL( self ):
return self._url
def setDefault( self ):
self._owner.setDefaultTab( self )
def isDefault( self ):
return self._owner.getDefaultTab() == self
def isActive( self ):
return self._owner.getActiveTab() == self
def setActive( self ):
self._owner.setActiveTab( self )
def enable( self ):
self._enabled = True
def disable( self ):
self._enabled = False
def setEnabled(self,value):
self._enabled=value
def isEnabled( self ):
return self._enabled
def setHidden(self, value):
self._hidden = value
def isHidden( self ):
return self._hidden
def getSubTabControl(self):
return self._subtabControl
def newSubTab( self, id, caption, url ):
# guarantee that a subtabControl exists
if not self._subtabControl:
self._subtabControl = TabControl(parent=self._owner)
tab=self._subtabControl.newTab( id, caption, url )
return tab
def hasChildren(self):
return self._subtabControl is not None
def getClassName(self):
return self._className
class WTabControl(WTemplated):
def __init__(self, ctrl, accessWrapper, **params):
self._tabCtrl = ctrl
self._aw = accessWrapper
def _getTabs(self):
tabs = []
for tab in self._tabCtrl.getTabList():
if (not tab.isEnabled() or tab.isHidden()) and not tab.isActive():
# The active tab may never be skipped. If we skipped it jQuery would consider the first tab active and
# send an AJAX request to load its contents, which would break the whole page.
continue
tabs.append((tab.getCaption(), tab.getURL(), tab.isActive(), tab.getClassName()))
return tabs
def _getActiveTabId(self):
skipped = 0
for i, tab in enumerate(self._tabCtrl.getTabList()):
if tab.isActive():
return i - skipped
if not tab.isEnabled() or tab.isHidden():
skipped += 1
return 0
def _getActiveTab(self):
for tab in self._tabCtrl.getTabList():
if tab.isActive():
return tab
def _getBody(self):
tab = self._getActiveTab()
if not tab:
return self._body
sub = tab.getSubTabControl()
if not sub:
return self._body
return WTabControl(sub, self._aw).getHTML(self._body)
def getHTML(self, body):
self._body = body
return WTemplated.getHTML(self)
def getVars( self ):
vars = WTemplated.getVars(self)
vars['body'] = self._getBody()
vars['tabs'] = self._getTabs()
vars['activeTab'] = self._getActiveTabId()
vars['tabControlId'] = id(self)
return vars
class WAdminCreated(WTemplated):
def __init__(self, av):
self._av = av
class WAbstractModIntCommentEdit(WTemplated):
def __init__(self,comment):
self._comment=comment
def getVars(self):
vars=WTemplated.getVars(self)
vars["content"]=self.htmlText(self._comment.getContent())
return vars
class WAbstractModNewIntComment(WTemplated):
def __init__(self,aw,abstract):
self._aw=aw
self._abstract=abstract
def getVars(self):
vars=WTemplated.getVars(self)
return vars
class WAbstractModIntComments(WTemplated):
def __init__(self,aw,abstract):
self._aw=aw
self._abstract=abstract
def _getCommentsHTML(self,commentEditURLGen,commentRemURLGen):
res=[]
commentList = self._abstract.getIntCommentList()
for c in commentList:
mailtoSubject="[Indico] Abstract %s: %s"%(self._abstract.getId(), self._abstract.getTitle())
mailtoURL=URL("mailto:%s"%c.getResponsible().getEmail())
mailtoURL.addParam("subject", mailtoSubject)
responsible="""<a href=%s>%s</a>"""%(quoteattr(str(mailtoURL)),self.htmlText(c.getResponsible().getFullName()))
date=self.htmlText(c.getCreationDate().strftime("%Y-%m-%d %H:%M"))
buttonMod,buttonRem="",""
if self._aw.getUser()==c.getResponsible():
buttonMod= i18nformat("""
<form action=%s method="POST">
<td valign="bottom">
<input type="submit" class="btn" value="_("modify")">
</td>
</form>
""")%quoteattr(str(commentEditURLGen(c)))
buttonRem= i18nformat("""
<form action=%s method="POST">
<td valign="bottom">
<input type="submit" class="btn" value="_("remove")">
</td>
</form>
""")%quoteattr(str(commentRemURLGen(c)))
res.append("""
<tr>
<td bgcolor="white" style="border-top:1px solid #777777;border-bottom:1px solid #777777;">
<table>
<tr>
<td width="100%%">%s on %s</td>
</tr>
<tr>
<td>%s</td>
%s
%s
</tr>
</table>
</td>
</tr>"""%(responsible,date,c.getContent(),buttonMod,buttonRem))
if res == []:
res.append( i18nformat("""<tr><td align=\"center\" style=\"color:black\"><br>--_("no internal comments")--<br><br></td></tr>"""))
return "".join(res)
def getVars(self):
vars=WTemplated.getVars(self)
vars["comments"]=self._getCommentsHTML(vars["commentEditURLGen"],vars["commentRemURLGen"])
vars["newCommentURL"]=quoteattr(str(vars["newCommentURL"]))
return vars
class WAbstractModMarkAsDup(WTemplated):
def __init__(self,abstract):
self._abstract=abstract
def getVars(self):
vars=WTemplated.getVars(self)
vars["duplicateURL"]=quoteattr(str(vars["duplicateURL"]))
vars["cancelURL"]=quoteattr(str(vars["cancelURL"]))
return vars
class WAbstractModUnMarkAsDup(WTemplated):
def __init__(self,abstract):
self._abstract=abstract
def getVars(self):
vars=WTemplated.getVars(self)
vars["unduplicateURL"]=quoteattr(str(vars["unduplicateURL"]))
vars["cancelURL"]=quoteattr(str(vars["cancelURL"]))
return vars
#--------------------------------------------------------------------------------------
class WConfModMoveContribsToSessionConfirmation(WTemplated):
def __init__(self,conf,contribIdList=[],targetSession=None):
self._conf=conf
self._contribIdList=contribIdList
self._targetSession=targetSession
def _getWarningsHTML(self):
wl=[]
for id in self._contribIdList:
contrib=self._conf.getContributionById(id)
if contrib is None:
continue
spkList=[]
for spk in contrib.getSpeakerList():
spkList.append(self.htmlText(spk.getFullName()))
spkCaption=""
if len(spkList)>0:
spkCaption=" by %s"%"; ".join(spkList)
if (contrib.getSession() is not None and \
contrib.getSession()!=self._targetSession):
scheduled=""
if contrib.isScheduled():
scheduled= i18nformat(""" _("and scheduled") (%s)""")%self.htmlText(contrib.getStartDate().strftime("%Y-%b-%d %H:%M"))
wl.append( i18nformat("""
<li>%s-<i>%s</i>%s: is <font color="red"> _("already in session") <b>%s</b>%s</font></li>
""")%(self.htmlText(contrib.getId()),
self.htmlText(contrib.getTitle()),
spkCaption,
self.htmlText(contrib.getSession().getTitle()),
scheduled))
if (contrib.getSession() is None and \
self._targetSession is not None and \
contrib.isScheduled()):
wl.append( i18nformat("""
<li>%s-<i>%s</i>%s: is <font color="red"> _("scheduled") (%s)</font></li>
""")%(self.htmlText(contrib.getId()),
self.htmlText(contrib.getTitle()),
spkCaption,
self.htmlText(contrib.getStartDate().strftime("%Y-%b-%d %H:%M"))))
return "<ul>%s</ul>"%"".join(wl)
def getVars(self):
vars=WTemplated.getVars(self)
vars["postURL"]=quoteattr(str(vars["postURL"]))
vars["systemIconWarning"]=Config.getInstance().getSystemIconURL("warning")
vars["contribIdList"]=", ".join(self._contribIdList)
vars["targetSession"]="--none--"
if self._targetSession is not None:
vars["targetSession"]=self.htmlText("%s"%self._targetSession.getTitle())
vars["warnings"]=self._getWarningsHTML()
vars["targetSessionId"]=quoteattr("--none--")
if self._targetSession is not None:
vars["targetSessionId"]=quoteattr(str(self._targetSession.getId()))
l=[]
for id in self._contribIdList:
l.append("""<input type="hidden" name="contributions" value=%s">"""%quoteattr(str(id)))
vars["contributions"]="\n".join(l)
return vars
class WConfTickerTapeDrawer(WTemplated):
def __init__(self,conf, tz=None):
self._conf = conf
self._tz = tz
def getSimpleText( self ):
if layout_settings.get(self._conf, 'show_announcement'):
return layout_settings.get(self._conf, 'announcement')
class WFilterCriteria(WTemplated):
"""
Draws the options for a filter criteria object
This means rendering the actual table that contains
all the HTML for the several criteria
"""
def __init__(self, options, filterCrit, extraInfo=""):
WTemplated.__init__(self, tpl_name = "FilterCriteria")
self._filterCrit = filterCrit
self._options = options
self._extraInfo = extraInfo
def _drawFieldOptions(self, formName, form):
raise Exception("Method WFilterCriteria._drawFieldOptions must be overwritten")
def getVars(self):
vars = WTemplated.getVars( self )
vars["extra"] = self._extraInfo
vars["content"] = list((name, self._drawFieldOptions(name, form))
for (name, form) in self._options)
return vars
| gpl-3.0 | -5,558,711,386,967,988,000 | 35.995017 | 202 | 0.586009 | false |
aaabhilash97/google-python-exercises | basic/string2.py | 1 | 2740 | #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
if len(s)<3:
return s
elif s[len(s)-3:]=='ing':
return s+'ly'
else:
return s+'ing'
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
st=s.find(' not ')
lt=s.find(' bad')
if st<lt and st!=-1 and lt!=-1:
return s[:st]+' good'+s[lt+4:]
else:
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
if len(a)%2==0:
a_f=a[:len(a)/2]
a_b=a[len(a)/2:]
else:
a_f=a[:len(a)/2+1]
a_b=a[len(a)/2+1:]
if len(b)%2==0:
b_f=b[:len(b)/2]
b_b=b[len(b)/2:]
else:
b_f=b[:len(b)/2+1]
b_b=b[len(b)/2+1:]
return a_f+b_f+a_b+b_b
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| apache-2.0 | -1,827,243,979,055,912,400 | 26.959184 | 77 | 0.637591 | false |
zhaochl/python-utils | agrith_util/graph/g.py | 1 | 9283 | #!/usr/bin/env python
# coding=utf-8
import pygraphviz as pgv
from pdb import *
gdata = [
['A', '->', 'B',1],
['A', '->', 'C',1],
['B', '->', 'C',1],
['B', '->', 'D',1],
['C', '->', 'D',1],
['D', '->', '',1],
]
graph = {
'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D':[]
}
def gen_edge_relations(graph):
graph_edge = {}
for node,neighbour_list in graph.iteritems():
node_relations = {}
for nb in neighbour_list:
node_relations[nb] = 1
#print node,neighbour_list,node_relations
if not graph_edge.has_key(node):
graph_edge[node] = node_relations
return graph_edge
def check_is_edge(graph_edge,_from,_to):
is_edge = False
if len(graph_edge)>0:
if graph_edge.has_key(_from):
node_relations = graph_edge[_from]
if node_relations.has_key(_to):
is_edge = True
return is_edge
def width_order_graph(graph):
graph_edge = gen_edge_relations(graph)
def BFS(node):
print(node)
visited[node] = 1
for _node,_node_relations in graph.iteritems():
if check_is_edge(graph_edge,node,_node) and not visited.has_key(_node):
BFS(_node)
visited = {}
#set_trace()
for node,node_relations in graph.iteritems():
if not visited.has_key(node):
print 'start BFS:',node
BFS(node)
print visited
def depth_order_graph(graph):
graph_edge = gen_edge_relations(graph)
def DFS(node,queue):
queue.append(node)
print(node)
visited[node] = 1
if len(queue) != 0:
q_node = queue.pop()
for _node,_node_relations in graph.iteritems():
if check_is_edge(graph_edge,q_node,_node) and not visited.has_key(_node):
DFS(_node, queue)
visited = {}
queue = []
for node,node_relations in graph.iteritems():
if not visited.has_key(node):
DFS(node,queue)
def build_graph(data):
graph = {}
for r in gdata:
_from = r[0]
to = r[2]
status = r[3]
if status!=1:
continue
if _from=='D':
set_trace()
if not graph.has_key(_from):
graph[_from] = [to]
else:
graph[_from].append(to)
return graph
def add_node(graph,_from,to):
#set_trace()
if len(graph)>0:
if not graph.has_key(_from):
graph[_from] = [to]
else:
graph[_from].append(to)
#fix add leaf node
if not graph.has_key(to):
graph[to] = []
else:
graph[_from] =[to]
return graph
def del_node(graph,_from,to):
if len(graph)>0:
#del edge
if graph.has_key(_from):
graph[_from].remove(to)
#del to -if leaf
if graph.has_key(to):
t = graph[to]
if len(t)==0:
graph.pop(to)
return graph
def find_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath:
return newpath
return None
def find_path2 (graph, start, end, path=[]):
_path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
for node in graph[start]:
if node not in _path:
newpath = find_path(graph, node, end, _path)
if newpath:
return newpath
return None
def find_all_paths(graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if not graph.has_key(start):
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
"""
find from ->to ,[from]->from_list
"""
def find_from_path(graph,to):
path = []
graph_edge = gen_edge_relations(graph)
for node,node_relations in graph_edge.iteritems():
if node_relations.has_key(to):
path.append(node)
return path
"""
find x->from ->to ,[from]+[x]->from_list
"""
def find_from_path_all(graph,to,path=[]):
if to not in path:
path = path+[to]
#print path,to in path
graph_edge = gen_edge_relations(graph)
for node,node_relations in graph_edge.iteritems():
if node_relations.has_key(to):
if not node in path:
path.append(node)
find_from_path_all(graph,node,path)
return path
"""
find x->from ->to ,[from]+[x]->from_list
"""
def find_from_path_all_depth_order_graph(graph,to):
graph_edge = gen_edge_relations(graph)
def DFS(node,queue):
queue.append(node)
#print(node)
if len(queue) != 0:
q_node = queue.pop()
for _node,_node_relations in graph_edge.iteritems():
if _node_relations.has_key(q_node) and not visited.has_key(_node):
visited[_node] = 1
#visited[_node] = 1
DFS(_node, queue)
visited = {}
queue = []
node = to
DFS(node,queue)
return visited
"""
find x->from ->to ,[from]+[x]->from_list
"""
def find_to_path_all_depth_order_graph(graph,_from):
def DFS(node,queue):
queue.append(node)
print(node)
visited[node]=1
if len(queue) != 0:
q_node = queue.pop()
for neighbour in graph[q_node]:
queue.append(neighbour)
#if graph.has_key(neighbour) and not visited.has_key(neighbour):
if graph.has_key(neighbour):
DFS(neighbour,queue)
visited = {}
queue = []
node = _from
DFS(node,queue)
return visited
def find_shortest_path(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
def gen_graph_png(graph,file_name):
A=pgv.AGraph(directed=True,strict=True)
for node,node_relations_list in graph.iteritems():
for neighbour in node_relations_list:
A.add_edge(node,neighbour)
A.graph_attr['epsilon']='0.001'
print A.string() # print dot file to standard output
A.write(file_name+'.dot')
A.layout('dot') # layout with dot
A.draw(file_name+'.png') # write to file
print 'success'
def graph_tree_test():
A=pgv.AGraph(directed=True,strict=True)
A.add_edge(1,2)
A.add_edge(1,3)
A.add_edge(2,4)
A.add_edge(2,5)
A.add_edge(5,6)
A.add_edge(5,7)
A.add_edge(3,8)
A.add_edge(3,9)
A.add_edge(8,10)
A.add_edge(8,11)
A.graph_attr['epsilon']='0.001'
print A.string() # print dot file to standard output
A.write('tree.dot')
A.layout('dot') # layout with dot
A.draw('tree.png') # write to file
print 'success'
def test_find(graph):
print '--find A-E one path--'
t= find_path(graph, 'A', 'E')
print t
t= find_path2(graph, 'A', 'E')
print t
print '--find A-E all paths-'
t = find_all_paths(graph, 'A', 'E')
print t
print '--find A-E short path-'
t= find_shortest_path(graph, 'A', 'E')
print t
def test_find_from_path(graph):
print '--find from node directly--'
t=find_from_path(graph,'D')
print t
print '--find_from_path_all--'
t = find_from_path_all(graph,'D')
print t
t = find_from_path_all_depth_order_graph(graph,'D')
print t
t = find_to_path_all_depth_order_graph(graph,'A')
print t
print graph
def test_update(graph):
graph = add_node(graph,'D','E')
print graph
"""
{'A': ['B', 'C'], 'C': ['D'], 'B': ['C', 'D'], 'D': ['E']}
"""
print '--add_node-E-F-'
graph = add_node(graph,'E','F')
print '--del_node leaf-E-F'
del_node(graph,'E','F')
print graph
print '--del_node leaf-C-D'
del_node(graph,'C','D')
print graph
print '--del_node leaf-B-D'
del_node(graph,'B','D')
print graph
depth_order_graph(graph)
def test_order(graph):
print '--width_order_graph--'
width_order_graph(graph)
print '--depth_order_graph--'
depth_order_graph(graph)
def test_utils(graph):
g = build_graph(gdata)
print g
print '--gen all graph_edge relations-'
graph_edge = gen_edge_relations(graph)
print graph_edge
print '--check_is_edge--'
t= check_is_edge(graph_edge,'A','B')
print t
if __name__=='__main__':
#test_find_from_path(graph)
#graph_tree_test()
gen_graph_png(graph,'test')
| apache-2.0 | -5,159,982,726,066,468,000 | 26.302941 | 89 | 0.531078 | false |
edagar/censorship-analyser | tcpconnect.py | 1 | 1629 | # -*- coding: utf-8 -*-
from twisted.internet.error import ConnectionRefusedError
from ooni.utils import log
from ooni.templates import tcpt
from twisted.python import usage
class UsageOptions(usage.Options):
optParameters = [
['target', 't', None, 'Specify a single host to test.'],
['port', 'p', None, 'Specify port.']
]
class TCPConnect(tcpt.TCPTest):
usageOptions = UsageOptions
def setUp(self):
if self.input:
self.target = self.input
elif self.localOptions['target']:
self.target = self.localOptions['target']
else:
self.target = "www.torproject.org"
if self.localOptions['port']:
self.targetPort = int(self.localOptions['port'])
else:
self.targetPort = 443
self.report['host'] = self.target
self.report['port'] = self.targetPort
def test_hello(self):
"""
A TCP connection to torproject.org port 443 is attempted
"""
def got_response(response):
if response is not None:
self.report['TestStatus'] = 'OK'
self.report['response'] = response
def connection_failed(failure):
self.report['TestStatus'] = 'FAILED'
self.report['TestException'] = '%s' % failure.getErrorMessage()
failure.trap(ConnectionRefusedError)
self.address = self.target
self.port = self.targetPort
payload = "Hello \n\r"
d = self.sendPayload(payload)
d.addErrback(connection_failed)
d.addCallback(got_response)
return d
| bsd-3-clause | 8,497,879,571,738,538,000 | 29.166667 | 75 | 0.59423 | false |
chrislit/abydos | tests/distance/test_distance_canberra.py | 1 | 2809 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_canberra.
This module contains unit tests for abydos.distance.Canberra
"""
import unittest
from abydos.distance import Canberra
class CanberraTestCases(unittest.TestCase):
"""Test Canberra functions.
abydos.distance.Canberra
"""
cmp = Canberra()
def test_canberra_dist(self):
"""Test abydos.distance.Canberra.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.0)
self.assertEqual(self.cmp.dist('a', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'a'), 1.0)
self.assertEqual(self.cmp.dist('abc', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.5)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.5)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.5)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.5)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.3333333333
)
def test_canberra_sim(self):
"""Test abydos.distance.Canberra.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 1.0)
self.assertEqual(self.cmp.sim('a', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'a'), 0.0)
self.assertEqual(self.cmp.sim('abc', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.5)
self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.5)
self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.5)
self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.5)
self.assertAlmostEqual(
self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.6666666667
)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 1,225,034,253,349,493,200 | 36.453333 | 70 | 0.641865 | false |
sxjscience/tvm | python/tvm/topi/nn/conv2d_transpose.py | 1 | 6465 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
import tvm
from tvm import te
from tvm import relay
from .dilate import dilate
from .pad import pad
from .util import get_pad_tuple
from ..util import simplify
def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return declaration_conv2d_transpose_impl(
Input, Filter, strides, padding, out_dtype, output_padding=output_padding
)
def conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype, output_padding):
"""Preprocess data and kernel to make the compute pattern
of conv2d_transpose the same as conv2d"""
batch, in_c, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_c, filter_h, filter_w),
lambda o, i, h, w: kernel[i][o][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
return data_pad, kernel_transform
def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):
"""Implementation of conv2d transpose"""
data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
batch, in_c, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_c = simplify(out_c)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_c), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
Output = te.compute(
(batch, out_c, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[b, dc, h + dh, w + dw].astype(out_dtype)
* kernel_transform[c, dc, dh, dw].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="conv2d_transpose_nchw",
)
return Output
@tvm.target.generic_func
def conv2d_transpose_legalize(attrs, inputs, types):
"""Legalizes Transposed 2D convolution op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed 2D convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
if attrs["data_layout"] == "NHWC":
data, kernel = inputs
kernel_layout = attrs["kernel_layout"]
# Convert Kernel layout to IOHW
# kernel_layout is different from input kernel layout - IO is swapped
if kernel_layout == "HWIO":
# input kernel layout is swapped to HWOI
# output kernel layout will be IOHW
kernel = relay.transpose(kernel, axes=(3, 2, 0, 1))
elif kernel_layout == "HWOI":
# input kernel layout is swapped to HWIO
# output kernel layout will be IOHW
kernel = relay.transpose(kernel, axes=(2, 3, 0, 1))
elif kernel_layout == "IOHW":
# input kernel layout is swapped to OIHW
# output kernel layout will be IOHW
kernel = relay.transpose(kernel, axes=(1, 0, 2, 3))
elif kernel_layout == "OIHW":
# input kernel layout is swapped to IOHW
# output kernel layout will be IOHW
pass
else:
# Skip legalize. Let relay.nn.conv2d_transpose to handle the case
return None
# Set new attrs for conv2d_transpose.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["data_layout"] = "NCHW"
# layout of kernel should be IOHW, but kernel_layout should be swapped - OIHW
new_attrs["kernel_layout"] = "OIHW"
# Convert data to NCHW.
data = relay.transpose(data, axes=(0, 3, 1, 2))
deconv = relay.nn.conv2d_transpose(data, kernel, **new_attrs)
# Convert back to original NHWC layout.
out = relay.transpose(deconv, axes=(0, 2, 3, 1))
return out
return None
| apache-2.0 | -5,046,389,322,234,244,000 | 35.942857 | 98 | 0.636195 | false |
amitjamadagni/sympy | sympy/statistics/distributions.py | 1 | 14654 | from sympy.core import sympify, Lambda, Dummy, Integer, Rational, oo, Float, pi
from sympy.functions import sqrt, exp, erf
from sympy.printing import sstr
import random
class Sample(tuple):
"""
Sample([x1, x2, x3, ...]) represents a collection of samples.
Sample parameters like mean, variance and stddev can be accessed as
properties.
The sample will be sorted.
Examples
========
>>> from sympy.statistics.distributions import Sample
>>> Sample([0, 1, 2, 3])
Sample([0, 1, 2, 3])
>>> Sample([8, 3, 2, 4, 1, 6, 9, 2])
Sample([1, 2, 2, 3, 4, 6, 8, 9])
>>> s = Sample([1, 2, 3, 4, 5])
>>> s.mean
3
>>> s.stddev
sqrt(2)
>>> s.median
3
>>> s.variance
2
"""
def __new__(cls, sample):
s = tuple.__new__(cls, sorted(sample))
s.mean = mean = sum(s) / Integer(len(s))
s.variance = sum([(x - mean)**2 for x in s]) / Integer(len(s))
s.stddev = sqrt(s.variance)
if len(s) % 2:
s.median = s[len(s)//2]
else:
s.median = sum(s[len(s)//2 - 1:len(s)//2 + 1]) / Integer(2)
return s
def __repr__(self):
return sstr(self)
def __str__(self):
return sstr(self)
class ContinuousProbability(object):
"""Base class for continuous probability distributions"""
def probability(s, a, b):
"""
Calculate the probability that a random number x generated
from the distribution satisfies a <= x <= b
Examples
========
>>> from sympy.statistics import Normal
>>> from sympy.core import oo
>>> Normal(0, 1).probability(-1, 1)
erf(sqrt(2)/2)
>>> Normal(0, 1).probability(1, oo)
-erf(sqrt(2)/2)/2 + 1/2
"""
return s.cdf(b) - s.cdf(a)
def random(s, n=None):
"""
random() -- generate a random number from the distribution.
random(n) -- generate a Sample of n random numbers.
Examples
========
>>> from sympy.statistics import Uniform
>>> x = Uniform(1, 5).random()
>>> x < 5 and x > 1
True
>>> x = Uniform(-4, 2).random()
>>> x < 2 and x > -4
True
"""
if n is None:
return s._random()
else:
return Sample([s._random() for i in xrange(n)])
def __repr__(self):
return sstr(self)
def __str__(self):
return sstr(self)
class Normal(ContinuousProbability):
"""
Normal(mu, sigma) represents the normal or Gaussian distribution
with mean value mu and standard deviation sigma.
Examples
========
>>> from sympy.statistics import Normal
>>> from sympy import oo
>>> N = Normal(1, 2)
>>> N.mean
1
>>> N.variance
4
>>> N.probability(-oo, 1) # probability on an interval
1/2
>>> N.probability(1, oo)
1/2
>>> N.probability(-oo, oo)
1
>>> N.probability(-1, 3)
erf(sqrt(2)/2)
>>> _.evalf()
0.682689492137086
"""
def __init__(self, mu, sigma):
self.mu = sympify(mu)
self.sigma = sympify(sigma)
mean = property(lambda s: s.mu)
median = property(lambda s: s.mu)
mode = property(lambda s: s.mu)
stddev = property(lambda s: s.sigma)
variance = property(lambda s: s.sigma**2)
def pdf(s, x):
"""
Return the probability density function as an expression in x
Examples
========
>>> from sympy.statistics import Normal
>>> Normal(1, 2).pdf(0)
sqrt(2)*exp(-1/8)/(4*sqrt(pi))
>>> from sympy.abc import x
>>> Normal(1, 2).pdf(x)
sqrt(2)*exp(-(x - 1)**2/8)/(4*sqrt(pi))
"""
x = sympify(x)
return 1/(s.sigma*sqrt(2*pi)) * exp(-(x - s.mu)**2 / (2*s.sigma**2))
def cdf(s, x):
"""
Return the cumulative density function as an expression in x
Examples
========
>>> from sympy.statistics import Normal
>>> Normal(1, 2).cdf(0)
-erf(sqrt(2)/4)/2 + 1/2
>>> from sympy.abc import x
>>> Normal(1, 2).cdf(x)
erf(sqrt(2)*(x - 1)/4)/2 + 1/2
"""
x = sympify(x)
return (1 + erf((x - s.mu)/(s.sigma*sqrt(2))))/2
def _random(s):
return random.gauss(float(s.mu), float(s.sigma))
def confidence(s, p):
"""Return a symmetric (p*100)% confidence interval. For example,
p=0.95 gives a 95% confidence interval. Currently this function
only handles numerical values except in the trivial case p=1.
For example, one standard deviation:
>>> from sympy.statistics import Normal
>>> N = Normal(0, 1)
>>> N.confidence(0.68)
(-0.994457883209753, 0.994457883209753)
>>> N.probability(*_).evalf()
0.680000000000000
Two standard deviations:
>>> N = Normal(0, 1)
>>> N.confidence(0.95)
(-1.95996398454005, 1.95996398454005)
>>> N.probability(*_).evalf()
0.950000000000000
"""
if p == 1:
return (-oo, oo)
assert p <= 1
# In terms of n*sigma, we have n = sqrt(2)*ierf(p). The inverse
# error function is not yet implemented in SymPy but can easily be
# computed numerically
from sympy.mpmath import mpf, erfinv
# calculate y = ierf(p) by solving erf(y) - p = 0
y = erfinv(mpf(p))
t = Float(str(mpf(float(s.sigma)) * mpf(2)**0.5 * y))
mu = s.mu.evalf()
return (mu - t, mu + t)
@staticmethod
def fit(sample):
"""
Create a normal distribution fit to the mean and standard
deviation of the given distribution or sample.
Examples
========
>>> from sympy.statistics import Normal
>>> Normal.fit([1,2,3,4,5])
Normal(3, sqrt(2))
>>> from sympy.abc import x, y
>>> Normal.fit([x, y])
Normal(x/2 + y/2, sqrt((-x/2 + y/2)**2/2 + (x/2 - y/2)**2/2))
"""
if not hasattr(sample, "stddev"):
sample = Sample(sample)
return Normal(sample.mean, sample.stddev)
class Uniform(ContinuousProbability):
"""
Uniform(a, b) represents a probability distribution with uniform
probability density on the interval [a, b] and zero density
everywhere else.
"""
def __init__(self, a, b):
self.a = sympify(a)
self.b = sympify(b)
mean = property(lambda s: (s.a + s.b)/2)
median = property(lambda s: (s.a + s.b)/2)
mode = property(lambda s: (s.a + s.b)/2) # arbitrary
variance = property(lambda s: (s.b - s.a)**2 / 12)
stddev = property(lambda s: sqrt(s.variance))
def pdf(s, x):
"""
Return the probability density function as an expression in x
Examples
========
>>> from sympy.statistics import Uniform
>>> Uniform(1, 5).pdf(1)
1/4
>>> Uniform(2, 4).pdf(2)
1/2
"""
x = sympify(x)
if not x.is_Number:
raise NotImplementedError("SymPy does not yet support"
"piecewise functions")
if x < s.a or x > s.b:
return Rational(0)
return 1/(s.b - s.a)
def cdf(s, x):
"""
Return the cumulative density function as an expression in x
Examples
========
>>> from sympy.statistics import Uniform
>>> Uniform(1, 5).cdf(2)
1/4
>>> Uniform(1, 5).cdf(4)
3/4
"""
x = sympify(x)
if not x.is_Number:
raise NotImplementedError("SymPy does not yet support"
"piecewise functions")
if x <= s.a:
return Rational(0)
if x >= s.b:
return Rational(1)
return (x - s.a)/(s.b - s.a)
def _random(s):
return Float(random.uniform(float(s.a), float(s.b)))
def confidence(s, p):
"""Generate a symmetric (p*100)% confidence interval.
>>> from sympy import Rational
>>> from sympy.statistics import Uniform
>>> U = Uniform(1, 2)
>>> U.confidence(1)
(1, 2)
>>> U.confidence(Rational(1,2))
(5/4, 7/4)
"""
p = sympify(p)
assert p <= 1
d = (s.b - s.a)*p / 2
return (s.mean - d, s.mean + d)
@staticmethod
def fit(sample):
"""
Create a uniform distribution fit to the mean and standard
deviation of the given distribution or sample.
Examples
========
>>> from sympy.statistics import Uniform
>>> Uniform.fit([1, 2, 3, 4, 5])
Uniform(-sqrt(6) + 3, sqrt(6) + 3)
>>> Uniform.fit([1, 2])
Uniform(-sqrt(3)/2 + 3/2, sqrt(3)/2 + 3/2)
"""
if not hasattr(sample, "stddev"):
sample = Sample(sample)
m = sample.mean
d = sqrt(12*sample.variance)/2
return Uniform(m - d, m + d)
class PDF(ContinuousProbability):
"""
PDF(func, (x, a, b)) represents continuous probability distribution
with probability distribution function func(x) on interval (a, b)
If func is not normalized so that integrate(func, (x, a, b)) == 1,
it can be normalized using PDF.normalize() method
Examples
========
>>> from sympy import Symbol, exp, oo
>>> from sympy.statistics.distributions import PDF
>>> from sympy.abc import x
>>> a = Symbol('a', positive=True)
>>> exponential = PDF(exp(-x/a)/a, (x,0,oo))
>>> exponential.pdf(x)
exp(-x/a)/a
>>> exponential.cdf(x)
1 - exp(-x/a)
>>> exponential.mean
a
>>> exponential.variance
a**2
"""
def __init__(self, pdf, var):
#XXX maybe add some checking of parameters
if isinstance(var, (tuple, list)):
self.pdf = Lambda(var[0], pdf)
self.domain = tuple(var[1:])
else:
self.pdf = Lambda(var, pdf)
self.domain = (-oo, oo)
self._cdf = None
self._mean = None
self._variance = None
self._stddev = None
def normalize(self):
"""
Normalize the probability distribution function so that
integrate(self.pdf(x), (x, a, b)) == 1
Examples
========
>>> from sympy import Symbol, exp, oo
>>> from sympy.statistics.distributions import PDF
>>> from sympy.abc import x
>>> a = Symbol('a', positive=True)
>>> exponential = PDF(exp(-x/a), (x,0,oo))
>>> exponential.normalize().pdf(x)
exp(-x/a)/a
"""
norm = self.probability(*self.domain)
if norm != 1:
w = Dummy('w', real=True)
return self.__class__(self.pdf(w)/norm, (w, self.domain[0], self.domain[1]))
#self._cdf = Lambda(w, (self.cdf(w) - self.cdf(self.domain[0]))/norm)
#if self._mean is not None:
# self._mean /= norm
#if self._variance is not None:
# self._variance = (self._variance + (self._mean*norm)**2)/norm - self.mean**2
#if self._stddev is not None:
# self._stddev = sqrt(self._variance)
else:
return self
def cdf(self, x):
"""
Return the cumulative density function as an expression in x
Examples
========
>>> from sympy.statistics.distributions import PDF
>>> from sympy import exp, oo
>>> from sympy.abc import x, y
>>> PDF(exp(-x/y), (x,0,oo)).cdf(4)
y - y*exp(-4/y)
>>> PDF(2*x + y, (x, 10, oo)).cdf(0)
-10*y - 100
"""
x = sympify(x)
if self._cdf is not None:
return self._cdf(x)
else:
from sympy import integrate
w = Dummy('w', real=True)
self._cdf = integrate(self.pdf(w), w)
self._cdf = Lambda(
w, self._cdf - self._cdf.subs(w, self.domain[0]))
return self._cdf(x)
def _get_mean(self):
if self._mean is not None:
return self._mean
else:
from sympy import integrate
w = Dummy('w', real=True)
self._mean = integrate(
self.pdf(w)*w, (w, self.domain[0], self.domain[1]))
return self._mean
def _get_variance(self):
if self._variance is not None:
return self._variance
else:
from sympy import integrate, simplify
w = Dummy('w', real=True)
self._variance = integrate(self.pdf(
w)*w**2, (w, self.domain[0], self.domain[1])) - self.mean**2
self._variance = simplify(self._variance)
return self._variance
def _get_stddev(self):
if self._stddev is not None:
return self._stddev
else:
self._stddev = sqrt(self.variance)
return self._stddev
mean = property(_get_mean)
variance = property(_get_variance)
stddev = property(_get_stddev)
def _random(s):
raise NotImplementedError
def transform(self, func, var):
"""
Return a probability distribution of random variable func(x)
currently only some simple injective functions are supported
Examples
========
>>> from sympy.statistics.distributions import PDF
>>> from sympy import oo
>>> from sympy.abc import x, y
>>> PDF(2*x + y, (x, 10, oo)).transform(x, y)
PDF(0, ((_w,), x, x))
"""
w = Dummy('w', real=True)
from sympy import solve
from sympy import S
inverse = solve(func - w, var)
newPdf = S.Zero
funcdiff = func.diff(var)
#TODO check if x is in domain
for x in inverse:
# this assignment holds only for x in domain
# in general it would require implementing
# piecewise defined functions in sympy
newPdf += (self.pdf(var)/abs(funcdiff)).subs(var, x)
return PDF(newPdf, (w, func.subs(var, self.domain[0]), func.subs(var, self.domain[1])))
| bsd-3-clause | 5,455,225,138,428,859,000 | 27.621094 | 95 | 0.503071 | false |
lucidmotifs/auto-aoc | .venv/lib/python3.5/site-packages/pylint/test/unittest_checker_typecheck.py | 1 | 8353 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2014 Holger Peters <[email protected]>
# Copyright (c) 2015-2016 Claudiu Popa <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016 Derek Gustafson <[email protected]>
# Copyright (c) 2016 Filipe Brandenburger <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Unittest for the type checker."""
import sys
import pytest
import astroid
from pylint.checkers import typecheck
from pylint.testutils import CheckerTestCase, Message, set_config
def c_extension_missing():
"""Coverage module has C-extension, which we can reuse for test"""
try:
import coverage.tracer as _
return False
except ImportError:
_ = None
return True
needs_c_extension = pytest.mark.skipif(c_extension_missing(),
reason='Requires coverage (source of C-extension)')
class TestTypeChecker(CheckerTestCase):
"Tests for pylint.checkers.typecheck"
CHECKER_CLASS = typecheck.TypeChecker
def test_no_member_in_getattr(self):
"""Make sure that a module attribute access is checked by pylint.
"""
node = astroid.extract_node("""
import optparse
optparse.THIS_does_not_EXIST
""")
with self.assertAddsMessages(
Message(
'no-member',
node=node,
args=('Module', 'optparse', 'THIS_does_not_EXIST', ''))):
self.checker.visit_attribute(node)
@set_config(ignored_modules=('argparse',))
def test_no_member_in_getattr_ignored(self):
"""Make sure that a module attribute access check is omitted with a
module that is configured to be ignored.
"""
node = astroid.extract_node("""
import argparse
argparse.THIS_does_not_EXIST
""")
with self.assertNoMessages():
self.checker.visit_attribute(node)
@set_config(ignored_classes=('xml.etree.', ))
def test_ignored_modules_invalid_pattern(self):
node = astroid.extract_node('''
import xml
xml.etree.Lala
''')
message = Message('no-member', node=node,
args=('Module', 'xml.etree', 'Lala', ''))
with self.assertAddsMessages(message):
self.checker.visit_attribute(node)
@set_config(ignored_modules=('xml.etree*', ))
def test_ignored_modules_patterns(self):
node = astroid.extract_node('''
import xml
xml.etree.portocola #@
''')
with self.assertNoMessages():
self.checker.visit_attribute(node)
@set_config(ignored_classes=('xml.*', ))
def test_ignored_classes_no_recursive_pattern(self):
node = astroid.extract_node('''
import xml
xml.etree.ElementTree.Test
''')
message = Message('no-member', node=node,
args=('Module', 'xml.etree.ElementTree', 'Test', ''))
with self.assertAddsMessages(message):
self.checker.visit_attribute(node)
@set_config(ignored_classes=('optparse.Values', ))
def test_ignored_classes_qualified_name(self):
"""Test that ignored-classes supports qualified name for ignoring."""
node = astroid.extract_node('''
import optparse
optparse.Values.lala
''')
with self.assertNoMessages():
self.checker.visit_attribute(node)
@set_config(ignored_classes=('Values', ))
def test_ignored_classes_only_name(self):
"""Test that ignored_classes works with the name only."""
node = astroid.extract_node('''
import optparse
optparse.Values.lala
''')
with self.assertNoMessages():
self.checker.visit_attribute(node)
@set_config(suggestion_mode=False)
@needs_c_extension
def test_nomember_on_c_extension_error_msg(self):
node = astroid.extract_node('''
from coverage import tracer
tracer.CTracer #@
''')
message = Message('no-member', node=node,
args=('Module', 'coverage.tracer', 'CTracer', ''))
with self.assertAddsMessages(message):
self.checker.visit_attribute(node)
@set_config(suggestion_mode=True)
@needs_c_extension
def test_nomember_on_c_extension_info_msg(self):
node = astroid.extract_node('''
from coverage import tracer
tracer.CTracer #@
''')
message = Message('c-extension-no-member', node=node,
args=('Module', 'coverage.tracer', 'CTracer', ''))
with self.assertAddsMessages(message):
self.checker.visit_attribute(node)
@set_config(contextmanager_decorators=('contextlib.contextmanager',
'.custom_contextmanager'))
def test_custom_context_manager(self):
"""Test that @custom_contextmanager is recognized as configured."""
node = astroid.extract_node('''
from contextlib import contextmanager
def custom_contextmanager(f):
return contextmanager(f)
@custom_contextmanager
def dec():
yield
with dec():
pass
''')
with self.assertNoMessages():
self.checker.visit_with(node)
def test_invalid_metaclass(self):
module = astroid.parse('''
import six
class InvalidAsMetaclass(object):
pass
@six.add_metaclass(int)
class FirstInvalid(object):
pass
@six.add_metaclass(InvalidAsMetaclass)
class SecondInvalid(object):
pass
@six.add_metaclass(2)
class ThirdInvalid(object):
pass
''')
for class_obj, metaclass_name in (('ThirdInvalid', '2'),
('SecondInvalid', 'InvalidAsMetaclass'),
('FirstInvalid', 'int')):
classdef = module[class_obj]
message = Message('invalid-metaclass', node=classdef, args=(metaclass_name, ))
with self.assertAddsMessages(message):
self.checker.visit_classdef(classdef)
@pytest.mark.skipif(sys.version_info[0] < 3, reason='Needs Python 3.')
def test_invalid_metaclass_function_metaclasses(self):
module = astroid.parse('''
def invalid_metaclass_1(name, bases, attrs):
return int
def invalid_metaclass_2(name, bases, attrs):
return 1
class Invalid(metaclass=invalid_metaclass_1):
pass
class InvalidSecond(metaclass=invalid_metaclass_2):
pass
''')
for class_obj, metaclass_name in (('Invalid', 'int'), ('InvalidSecond', '1')):
classdef = module[class_obj]
message = Message('invalid-metaclass', node=classdef, args=(metaclass_name, ))
with self.assertAddsMessages(message):
self.checker.visit_classdef(classdef)
@pytest.mark.skipif(sys.version_info < (3, 5), reason='Needs Python 3.5.')
def test_typing_namedtuple_not_callable_issue1295(self):
module = astroid.parse("""
import typing
Named = typing.NamedTuple('Named', [('foo', int), ('bar', int)])
named = Named(1, 2)
""")
call = module.body[-1].value
callables = call.func.infered()
assert len(callables) == 1
assert callables[0].callable()
with self.assertNoMessages():
self.checker.visit_call(call)
@pytest.mark.skipif(sys.version_info < (3, 5), reason='Needs Python 3.5.')
def test_typing_namedtuple_unsubscriptable_object_issue1295(self):
module = astroid.parse("""
import typing
MyType = typing.Tuple[str, str]
""")
subscript = module.body[-1].value
with self.assertNoMessages():
self.checker.visit_subscript(subscript)
| mit | 8,456,009,336,349,886,000 | 35.155844 | 90 | 0.597222 | false |
pgurumur/netconf | core/lib/ip.py | 1 | 12491 | # Copyright (c) 2015 Prabhu Gurumurthy <[email protected]>
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# XXX: Parts of this code, marked with %{ %} are under
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
__version__ = "$Revision: da23e41c6f10 $"
__author__ = "$Author: pgurumur $"
__modified__ = "$Date: 2013-06-16 08:41:30Z $"
# import everything from netaddr 0.7.x
from netaddr import IPNetwork, IPAddress, AddrFormatError, ZEROFILL
from exception import IPError
# unified IPv4/IPv6 class for handling network and address type for both the
# families
class ip(object):
# Class takes in one argument, this argument can be an IPv4/IPv6 subnet
# or it can be an IPv4/IPv6 address
def __init__(self, IPAddr, **Keywords):
# Main instance of either CIDR/IP class from netaddr
self._ip = None
# Whether given argument is a network or an address
self._isNetwork = False
if IPAddr is not None:
if isinstance(IPAddr, (IPNetwork, ip)):
self._ipAddr(str(IPAddr))
elif isinstance(IPAddr, IPAddress):
self._ip = IPAddr
elif isinstance(IPAddr, (int, long)):
if "version" in Keywords:
ipaddr = None
# This function takes in a integer and converts into IPaddress
# if prefix is given, then make it into a network and
# instantiates the same class, raises exception.IPError,
# if there is a problem
# Works for both IPv4 and IPv6
if int(Keywords["version"]) == 4:
# For IPv4 addresses usually
octets = []
for _ in xrange(4):
octets.insert(0, str(IPAddr & 0xFF))
IPAddr >>= 8
ipaddr = ".".join(octets)
elif int(Keywords["version"]) == 6:
"""
# For IPv6 addresses usually
hexstr = "%32x" %IPAddr
hextets = []
for ix in range(0, 32, 4):
hextets.append("%x" %int(hexstr[ix:ix + 4], 16))
hextets = self._compress(hextets)
ipaddr = ":".join(hextets)
"""
hextets = '0' * 32 + hex(IPAddr)[2:-1]
temp = ""
for ix in xrange(1, 33):
temp = hextets[-ix] + temp
if ix % 4 == 0:
temp = ':' + temp
ipaddr = temp[1:]
else:
raise IPError("unknown IP version")
if "prefix" in Keywords:
if Keywords['prefix']:
ipaddr = "%s/%d" %(ipaddr, Keywords["prefix"])
self._ipAddr(ipaddr)
else:
raise IPError("no version defined!")
elif isinstance(IPAddr, (str, unicode)):
self._ipAddr(str(IPAddr))
else:
raise IPError("unknown object instance: %s" %type(IPAddr))
else:
raise IPError("no ip address/subnet defined!")
def _ipAddr(self, Str):
# Function that forms either IPNetwork or IPaddress instantiation
# based on given string, used for constructor with following objects
# IPNetwork
# IPAddress
# ip
# String
if isinstance(Str, str):
# Check to see whether we have a forward slash, if we do, mostly
# it is a network, we still will verify the prefixlen if the
# prefixlen is 32 or 128, then it is automatically converted into
# IPAddress format
iplen = len(Str.split("/"))
try:
if iplen == 2:
# String with forward slash
self._ip = IPNetwork(Str, implicit_prefix = True,
flags = ZEROFILL)
prefix = self._ip.prefixlen
# if the prefixlen is 32 or 128 it is an IPAddress
if (prefix == 32) or (prefix == 128):
self._ip = IPAddress(Str.split("/")[0], flags = ZEROFILL)
# The following if block is necessary, otherwise for a
# /32 bit IPv6 address, it would be treated as an address
# instead of subnet
if (prefix == 32) and (self._ip.version != 4):
self._ip = IPNetwork(Str, implicit_prefix = True,
flags = ZEROFILL)
self._isNetwork = True
else:
# Toggle to the network flag
self._isNetwork = True
elif iplen == 1:
self._ip = IPAddress(Str, flags = ZEROFILL)
else:
raise IPError("invalid IPv4/IPv6 address: %s" %Str)
except ValueError as err:
raise IPError(str(err))
except AddrFormatError as err:
raise IPError(str(err))
def _compress(self, hextets):
# %{
# From ipaddr.py
if hextets:
"""
Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index in range(len(hextets)):
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
# %}
return hextets
def subnets(self, SubnetValue = 0):
try:
temp = int(SubnetValue)
except ValueError as err:
raise IPError(err)
else:
if self._isNetwork:
try:
for items in list(self._ip.subnet(SubnetValue)):
yield ip(items)
except IPError as err:
raise IPError(err)
def netmask(self, Wildcard = False):
# Only IPv4 functions and the names are self explantory
retval = None
if self._isNetwork and (self._ip.version == 4):
if Wildcard:
retval = self._ip.hostmask
else:
retval = self._ip.netmask
return ip(retval)
@property
def broadcast(self):
# Only IPv4 functions and the names are self explantory
retval = None
if self._isNetwork and (self._ip.version == 4):
retval = ip(self._ip.broadcast)
return retval
# Get the size of the network
@property
def size(self):
retval = 1
if self._isNetwork:
retval = self._ip.size
return retval
# Binary values (in bits) of the IPv4/IPv6 address
@property
def binary(self):
retval = None
if not self._isNetwork:
retval = self._ip.bits()
return retval
# used for 'in' operand
def __contains__(self, IPAddr):
retval = False
if self._isNetwork:
temp = None
try:
temp = ip(IPAddr)
except IPError as err:
raise IPError(err)
else:
address = None
if temp._isNetwork:
address = IPNetwork(str(IPAddr), flags = ZEROFILL)
else:
address = IPAddress(str(IPAddr), flags = ZEROFILL)
if address in self._ip:
retval = True
return retval
# for int function
def __int__(self):
retval = None
if self._isNetwork:
retval = self._ip.value
else:
retval = int(self._ip)
return retval
# For list function
def __iter__(self):
if self._isNetwork:
try:
for items in list(self._ip):
yield ip(items)
except IndexError as err:
raise IPError(err)
else:
yield "%s" %self
# for len function
def __len__(self):
retval = 0
if self._isNetwork:
retval = self._ip.prefixlen
else:
if self.version == 4:
retval = 32
elif self.version == 6:
retval = 128
return retval
# for str function
def __str__(self):
return str(self._ip).encode('utf-8')
def __repr__(self):
return repr(str(self))
def __unicode__(self):
return unicode(str(self))
# for hex function
def __hex__(self):
retval = None
if self._isNetwork:
retval = hex(self._ip.value)
else:
retval = hex(self._ip)
return retval
def __oct__(self):
return oct(int(self))
def __add__(self, obj):
if isinstance(obj, str):
temp = str(self._ip) + "%s" %obj
self._ipAddr(temp)
else:
raise IPError("invalid type ('%s') to add" %type(obj))
return self._ip
def __getitem__(self, Key):
retval = None
if isinstance(Key, int):
try:
if self.size > 1:
retval = str(self._ip[int(Key)])
else:
retval = str(self._ip)
except ValueError as err:
raise IPError(err)
except IndexError as err:
raise IPError(err)
else:
raise IPError("cannot get index value for non integer type key")
return retval
def __eq__(self, other):
retval = False
if int(self) == int(other):
retval = True
return retval
@property
def reverse(self):
retval = None
if len(self) == 32:
retval = self._ip.reverse_dns.split(".in-addr")[0]
elif len(self) == 128:
retval = self._ip.reverse_dns.split(".ip6")[0]
return retval
ismulticast = property(fget = lambda self: self._ip.is_multicast())
isreserved = property(fget = lambda self: self._ip.is_reserved())
version = property(fget = lambda self: self._ip.version)
value = property(fget = lambda self: int(self))
length = property(fget = lambda self: len(self))
private = property(fget = lambda self: self._ip.is_private())
| isc | -6,283,409,211,468,774,000 | 31.698953 | 77 | 0.553519 | false |
osmr/utct | TFLearn/feed_dict_flow_cp.py | 1 | 1917 | from tflearn import data_flow
class FeedDictFlowCp(data_flow.FeedDictFlow):
"""
Wrapper of TFLearn's FeedDictFlow for some types of augmentation.
"""
def __init__(self,
feed_dict,
coord,
batch_size=128,
num_threads=8,
max_queue=32,
shuffle=False,
continuous=False,
ensure_data_order=False,
dprep_dict=None,
daug_dict=None,
index_array=None):
super(FeedDictFlowCp, self).__init__(feed_dict,
coord,
batch_size,
num_threads,
max_queue,
shuffle,
continuous,
ensure_data_order,
dprep_dict,
daug_dict,
index_array)
def fill_feed_dict_queue(self):
while not self.coord.should_stop() and not self.interrupted:
batch_ids = self.batch_ids_queue.get()
if batch_ids is False:
break
data = self.retrieve_data(batch_ids)
# Apply augmentation according to daug dict
if self.daug_dict:
for k in self.daug_dict:
data = self.daug_dict[k].apply(data)
# Apply preprocessing according to dprep dict
if self.dprep_dict:
for k in self.dprep_dict:
data[k] = self.dprep_dict[k].apply(data[k])
# all prepped, put the data into the queue
self.feed_dict_queue.put(data) | mit | 4,391,301,540,408,282,000 | 36.607843 | 69 | 0.411581 | false |
mveyrenc/mveyrenc.github.io | docker/conf.py | 1 | 9766 | # -*- coding: utf-8 -*-
#
# Docker build configuration file, created by
# sphinx-quickstart on Thu Jul 23 09:30:31 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.extlinks',
'sphinx_rtd_theme',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Docker'
copyright = u'Toute reproduction interdite sauf autorisation'
author = u'Madeline Veyrenc'
subtitle=u'Documentation'
reference=u'OWSI-PHP-INT'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'fr'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'subtitle': subtitle,
'reference': reference
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Docker'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Docker.tex', u'Docker',
u'Madeline Veyrenc', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
latex_appendices = [
"appendices/commandes",
"appendices/dockerfile",
"appendices/glossaire",
"appendices/references",
"appendices/roadmap",
"appendices/versions",
]
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'demo', u'Docker',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Docker', u'Docker',
author, 'Docker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
extlinks = {'docker_doc': ('https://docs.docker.com/%s', 'Documentation officielle ')} | gpl-2.0 | -5,326,288,314,645,905,000 | 30.711039 | 86 | 0.704792 | false |
HybridF5/jacket | jacket/db/storage/api.py | 1 | 44700 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
Functions in this module are imported into the storage.db namespace. Call these
functions from storage.db namespace, not the storage.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/storage/storage.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo_config import cfg
from oslo_db import concurrency as db_concurrency
from oslo_db import options as db_options
from jacket.api.storage import common
from jacket.common.storage import constants
from jacket.storage.i18n import _
db_opts = [
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('volume_name_template',
default='volume-%s',
help='Template string to be used to generate volume names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
cfg.StrOpt('backup_name_template',
default='backup-%s',
help='Template string to be used to generate backup names'), ]
CONF = cfg.CONF
CONF.register_opts(db_opts)
db_options.set_defaults(CONF)
CONF.set_default('sqlite_db', 'jacket.db.storage.sqlite', group='database')
_BACKEND_MAPPING = {'sqlalchemy': 'jacket.db.storage.sqlalchemy.api'}
IMPL = db_concurrency.TpoolDbapiWrapper(CONF, _BACKEND_MAPPING)
# The maximum value a signed INT type may have
MAX_INT = constants.DB_MAX_INT
###################
def dispose_engine():
"""Force the engine to establish new connections."""
# FIXME(jdg): When using sqlite if we do the dispose
# we seem to lose our DB here. Adding this check
# means we don't do the dispose, but we keep our sqlite DB
# This likely isn't the best way to handle this
if 'sqlite' not in IMPL.get_engine().name:
return IMPL.dispose_engine()
else:
return
###################
#def service_destroy(context, service_id):
# """Destroy the service or raise if it does not exist."""
# return IMPL.service_destroy(context, service_id)
#def service_get(context, service_id):
# """Get a service or raise if it does not exist."""
# return IMPL.service_get(context, service_id)
#def service_get_by_host_and_topic(context, host, topic):
# """Get a service by host it's on and topic it listens to."""
# return IMPL.service_get_by_host_and_topic(context, host, topic)
#
#
#def service_get_all(context, filters=None):
# """Get all services."""
# return IMPL.service_get_all(context, filters)
#def service_get_all_by_topic(context, topic, disabled=None):
# """Get all services for a given topic."""
# return IMPL.service_get_all_by_topic(context, topic, disabled=disabled)
#def service_get_all_by_binary(context, binary, disabled=None):
# """Get all services for a given binary."""
# return IMPL.service_get_all_by_binary(context, binary, disabled)
#def service_get_by_args(context, host, binary):
# """Get the state of a service by node name and binary."""
# return IMPL.service_get_by_args(context, host, binary)
#def service_create(context, values):
# """Create a service from the values dictionary."""
# return IMPL.service_create(context, values)
#def service_update(context, service_id, values):
# """Set the given properties on an service and update it.
# Raises NotFound if service does not exist.
# """
# return IMPL.service_update(context, service_id, values)
###############
def volume_attach(context, values):
"""Attach a volume."""
return IMPL.volume_attach(context, values)
def volume_attached(context, volume_id, instance_id, host_name, mountpoint,
attach_mode='rw'):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, host_name,
mountpoint, attach_mode)
def volume_create(context, values):
"""Create a volume from the values dictionary."""
return IMPL.volume_create(context, values)
def volume_data_get_for_host(context, host, count_only=False):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_host(context,
host,
count_only)
def volume_data_get_for_project(context, project_id):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_project(context, project_id)
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id, attachment_id):
"""Ensure that a volume is set as detached."""
return IMPL.volume_detached(context, volume_id, attachment_id)
def volume_get(context, volume_id):
"""Get a volume or raise if it does not exist."""
return IMPL.volume_get(context, volume_id)
def volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None,
filters=None, offset=None):
"""Get all volumes."""
return IMPL.volume_get_all(context, marker, limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, filters=filters,
offset=offset)
def volume_get_all_by_host(context, host, filters=None):
"""Get all volumes belonging to a host."""
return IMPL.volume_get_all_by_host(context, host, filters=filters)
def volume_get_all_by_group(context, group_id, filters=None):
"""Get all volumes belonging to a consistency group."""
return IMPL.volume_get_all_by_group(context, group_id, filters=filters)
def volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None,
offset=None):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
def volume_update(context, volume_id, values):
"""Set the given properties on a volume and update it.
Raises NotFound if volume does not exist.
"""
return IMPL.volume_update(context, volume_id, values)
def volume_attachment_update(context, attachment_id, values):
return IMPL.volume_attachment_update(context, attachment_id, values)
def volume_attachment_get(context, attachment_id, session=None):
return IMPL.volume_attachment_get(context, attachment_id, session)
def volume_attachment_get_used_by_volume_id(context, volume_id):
return IMPL.volume_attachment_get_used_by_volume_id(context, volume_id)
def volume_attachment_get_by_host(context, volume_id, host):
return IMPL.volume_attachment_get_by_host(context, volume_id, host)
def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid):
return IMPL.volume_attachment_get_by_instance_uuid(context, volume_id,
instance_uuid)
def volume_update_status_based_on_attachment(context, volume_id):
"""Update volume status according to attached instance id"""
return IMPL.volume_update_status_based_on_attachment(context, volume_id)
def volume_has_snapshots_filter():
return IMPL.volume_has_snapshots_filter()
def volume_has_undeletable_snapshots_filter():
return IMPL.volume_has_undeletable_snapshots_filter()
def volume_has_attachments_filter():
return IMPL.volume_has_attachments_filter()
####################
def snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.snapshot_create(context, values)
def snapshot_destroy(context, snapshot_id):
"""Destroy the snapshot or raise if it does not exist."""
return IMPL.snapshot_destroy(context, snapshot_id)
def snapshot_get(context, snapshot_id):
"""Get a snapshot or raise if it does not exist."""
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_all(context, filters=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
"""Get all snapshots."""
return IMPL.snapshot_get_all(context, filters, marker, limit, sort_keys,
sort_dirs, offset)
def snapshot_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None):
"""Get all snapshots belonging to a project."""
return IMPL.snapshot_get_all_by_project(context, project_id, filters,
marker, limit, sort_keys,
sort_dirs, offset)
def snapshot_get_by_host(context, host, filters=None):
"""Get all snapshots belonging to a host.
:param host: Include include snapshots only for specified host.
:param filters: Filters for the query in the form of key/value.
"""
return IMPL.snapshot_get_by_host(context, host, filters)
def snapshot_get_all_for_cgsnapshot(context, project_id):
"""Get all snapshots belonging to a cgsnapshot."""
return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id)
def snapshot_get_all_for_volume(context, volume_id):
"""Get all snapshots for a volume."""
return IMPL.snapshot_get_all_for_volume(context, volume_id)
def snapshot_update(context, snapshot_id, values):
"""Set the given properties on an snapshot and update it.
Raises NotFound if snapshot does not exist.
"""
return IMPL.snapshot_update(context, snapshot_id, values)
def snapshot_data_get_for_project(context, project_id, volume_type_id=None):
"""Get count and gigabytes used for snapshots for specified project."""
return IMPL.snapshot_data_get_for_project(context,
project_id,
volume_type_id)
def snapshot_get_active_by_window(context, begin, end=None, project_id=None):
"""Get all the snapshots inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.snapshot_get_active_by_window(context, begin, end, project_id)
####################
def snapshot_metadata_get(context, snapshot_id):
"""Get all metadata for a snapshot."""
return IMPL.snapshot_metadata_get(context, snapshot_id)
def snapshot_metadata_delete(context, snapshot_id, key):
"""Delete the given metadata item."""
return IMPL.snapshot_metadata_delete(context, snapshot_id, key)
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.snapshot_metadata_update(context, snapshot_id,
metadata, delete)
####################
def volume_metadata_get(context, volume_id):
"""Get all metadata for a volume."""
return IMPL.volume_metadata_get(context, volume_id)
def volume_metadata_delete(context, volume_id, key,
meta_type=common.METADATA_TYPES.user):
"""Delete the given metadata item."""
return IMPL.volume_metadata_delete(context, volume_id,
key, meta_type)
def volume_metadata_update(context, volume_id, metadata,
delete, meta_type=common.METADATA_TYPES.user):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_metadata_update(context, volume_id, metadata,
delete, meta_type)
##################
def volume_admin_metadata_get(context, volume_id):
"""Get all administration metadata for a volume."""
return IMPL.volume_admin_metadata_get(context, volume_id)
def volume_admin_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
return IMPL.volume_admin_metadata_delete(context, volume_id, key)
def volume_admin_metadata_update(context, volume_id, metadata, delete,
add=True, update=True):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_admin_metadata_update(context, volume_id, metadata,
delete, add, update)
##################
def volume_type_create(context, values, projects=None):
"""Create a new volume type."""
return IMPL.volume_type_create(context, values, projects)
def volume_type_update(context, volume_type_id, values):
return IMPL.volume_type_update(context, volume_type_id, values)
def volume_type_get_all(context, inactive=False, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None, list_result=False):
"""Get all volume types.
:param context: context to query under
:param inactive: Include inactive volume types to the result set
:param filters: Filters for the query in the form of key/value.
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:is_public: Filter volume types based on visibility:
* **True**: List public volume types only
* **False**: List private volume types only
* **None**: List both public and private volume types
:returns: list/dict of matching volume types
"""
return IMPL.volume_type_get_all(context, inactive, filters, marker=marker,
limit=limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, offset=offset,
list_result=list_result)
def volume_type_get(context, id, inactive=False, expected_fields=None):
"""Get volume type by id.
:param context: context to query under
:param id: Volume type id to get.
:param inactive: Consider inactive volume types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: volume type
"""
return IMPL.volume_type_get(context, id, inactive, expected_fields)
def volume_type_get_by_name(context, name):
"""Get volume type by name."""
return IMPL.volume_type_get_by_name(context, name)
def volume_types_get_by_name_or_id(context, volume_type_list):
"""Get volume types by name or id."""
return IMPL.volume_types_get_by_name_or_id(context, volume_type_list)
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
"""Get volume types that are associated with specific qos specs."""
return IMPL.volume_type_qos_associations_get(context,
qos_specs_id,
inactive)
def volume_type_qos_associate(context, type_id, qos_specs_id):
"""Associate a volume type with specific qos specs."""
return IMPL.volume_type_qos_associate(context, type_id, qos_specs_id)
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate a volume type from specific qos specs."""
return IMPL.volume_type_qos_disassociate(context, qos_specs_id, type_id)
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types from specific qos specs."""
return IMPL.volume_type_qos_disassociate_all(context,
qos_specs_id)
def volume_type_qos_specs_get(context, type_id):
"""Get all qos specs for given volume type."""
return IMPL.volume_type_qos_specs_get(context, type_id)
def volume_type_destroy(context, id):
"""Delete a volume type."""
return IMPL.volume_type_destroy(context, id)
def volume_get_active_by_window(context, begin, end=None, project_id=None):
"""Get all the volumes inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.volume_get_active_by_window(context, begin, end, project_id)
def volume_type_access_get_all(context, type_id):
"""Get all volume type access of a volume type."""
return IMPL.volume_type_access_get_all(context, type_id)
def volume_type_access_add(context, type_id, project_id):
"""Add volume type access for project."""
return IMPL.volume_type_access_add(context, type_id, project_id)
def volume_type_access_remove(context, type_id, project_id):
"""Remove volume type access for project."""
return IMPL.volume_type_access_remove(context, type_id, project_id)
####################
def volume_type_extra_specs_get(context, volume_type_id):
"""Get all extra specs for a volume type."""
return IMPL.volume_type_extra_specs_get(context, volume_type_id)
def volume_type_extra_specs_delete(context, volume_type_id, key):
"""Delete the given extra specs item."""
return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs):
"""Create or update volume type extra specs.
This adds or modifies the key/value pairs specified in the extra specs dict
argument.
"""
return IMPL.volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs)
###################
def volume_type_encryption_get(context, volume_type_id, session=None):
return IMPL.volume_type_encryption_get(context, volume_type_id, session)
def volume_type_encryption_delete(context, volume_type_id):
return IMPL.volume_type_encryption_delete(context, volume_type_id)
def volume_type_encryption_create(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_create(context, volume_type_id,
encryption_specs)
def volume_type_encryption_update(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_update(context, volume_type_id,
encryption_specs)
def volume_type_encryption_volume_get(context, volume_type_id, session=None):
return IMPL.volume_type_encryption_volume_get(context, volume_type_id,
session)
def volume_encryption_metadata_get(context, volume_id, session=None):
return IMPL.volume_encryption_metadata_get(context, volume_id, session)
###################
def qos_specs_create(context, values):
"""Create a qos_specs."""
return IMPL.qos_specs_create(context, values)
def qos_specs_get(context, qos_specs_id):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get(context, qos_specs_id)
def qos_specs_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all qos_specs."""
return IMPL.qos_specs_get_all(context, filters=filters, marker=marker,
limit=limit, offset=offset,
sort_keys=sort_keys, sort_dirs=sort_dirs)
def qos_specs_get_by_name(context, name):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get_by_name(context, name)
def qos_specs_associations_get(context, qos_specs_id):
"""Get all associated volume types for a given qos_specs."""
return IMPL.qos_specs_associations_get(context, qos_specs_id)
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate qos_specs from volume type."""
return IMPL.qos_specs_associate(context, qos_specs_id, type_id)
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate qos_specs from volume type."""
return IMPL.qos_specs_disassociate(context, qos_specs_id, type_id)
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate qos_specs from all entities."""
return IMPL.qos_specs_disassociate_all(context, qos_specs_id)
def qos_specs_delete(context, qos_specs_id):
"""Delete the qos_specs."""
return IMPL.qos_specs_delete(context, qos_specs_id)
def qos_specs_item_delete(context, qos_specs_id, key):
"""Delete specified key in the qos_specs."""
return IMPL.qos_specs_item_delete(context, qos_specs_id, key)
def qos_specs_update(context, qos_specs_id, specs):
"""Update qos specs.
This adds or modifies the key/value pairs specified in the
specs dict argument for a given qos_specs.
"""
return IMPL.qos_specs_update(context, qos_specs_id, specs)
###################
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for the specified volume."""
return IMPL.volume_glance_metadata_create(context,
volume_id,
key,
value)
def volume_glance_metadata_bulk_create(context, volume_id, metadata):
"""Add Glance metadata for specified volume (multiple pairs)."""
return IMPL.volume_glance_metadata_bulk_create(context, volume_id,
metadata)
def volume_glance_metadata_get_all(context):
"""Return the glance metadata for all volumes."""
return IMPL.volume_glance_metadata_get_all(context)
def volume_glance_metadata_get(context, volume_id):
"""Return the glance metadata for a volume."""
return IMPL.volume_glance_metadata_get(context, volume_id)
def volume_glance_metadata_list_get(context, volume_id_list):
"""Return the glance metadata for a volume list."""
return IMPL.volume_glance_metadata_list_get(context, volume_id_list)
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id)
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This will copy all of the key:value pairs from the originating volume,
to ensure that a volume created from the snapshot will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id,
volume_id)
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update the Glance metadata from a volume (created from a snapshot).
This will copy all of the key:value pairs from the originating snapshot,
to ensure that the Glance metadata from the original volume is retained.
"""
return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id,
snapshot_id)
def volume_glance_metadata_delete_by_volume(context, volume_id):
"""Delete the glance metadata for a volume."""
return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id)
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
"""Delete the glance metadata for a snapshot."""
return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id,
volume_id):
"""Update the Glance metadata for a volume.
Update the Glance metadata for a volume by copying all of the key:value
pairs from the originating volume.
This is so that a volume created from the volume (clone) will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_from_volume_to_volume(
context,
src_volume_id,
volume_id)
###################
def quota_create(context, project_id, resource, limit, allocated=0):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit,
allocated=allocated)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_allocated_get_all_by_project(context, project_id):
"""Retrieve all allocated quotas associated with a given project."""
return IMPL.quota_allocated_get_all_by_project(context, project_id)
def quota_allocated_update(context, project_id,
resource, allocated):
"""Update allocated quota to subprojects or raise if it does not exist.
:raises: storage.exception.ProjectQuotaNotFound
"""
return IMPL.quota_allocated_update(context, project_id,
resource, allocated)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
def quota_update_resource(context, old_res, new_res):
"""Update resource of quotas."""
return IMPL.quota_update_resource(context, old_res, new_res)
def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
return IMPL.quota_destroy(context, project_id, resource)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
"""Retrieve all default quotas."""
return IMPL.quota_class_get_default(context)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
def quota_class_update_resource(context, resource, new_resource):
"""Update resource name in quota_class."""
return IMPL.quota_class_update_resource(context, resource, new_resource)
def quota_class_destroy(context, class_name, resource):
"""Destroy the quota class or raise if it does not exist."""
return IMPL.quota_class_destroy(context, class_name, resource)
def quota_class_destroy_all_by_name(context, class_name):
"""Destroy all quotas associated with a given quota class."""
return IMPL.quota_class_destroy_all_by_name(context, class_name)
###################
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
###################
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None,
is_allocated_reserve=False):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=project_id,
is_allocated_reserve=is_allocated_reserve)
def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id)
def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id)
def quota_destroy_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
def quota_usage_update_resource(context, old_res, new_res):
"""Update resource field in quota_usages."""
return IMPL.quota_usage_update_resource(context, old_res, new_res)
###################
def backup_get(context, backup_id, read_deleted=None, project_only=True):
"""Get a backup or raise if it does not exist."""
return IMPL.backup_get(context, backup_id, read_deleted, project_only)
def backup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all backups."""
return IMPL.backup_get_all(context, filters=filters, marker=marker,
limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def backup_get_all_by_host(context, host):
"""Get all backups belonging to a host."""
return IMPL.backup_get_all_by_host(context, host)
def backup_create(context, values):
"""Create a backup from the values dictionary."""
return IMPL.backup_create(context, values)
def backup_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):
"""Get all backups belonging to a project."""
return IMPL.backup_get_all_by_project(context, project_id,
filters=filters, marker=marker,
limit=limit, offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def backup_get_all_by_volume(context, volume_id, filters=None):
"""Get all backups belonging to a volume."""
return IMPL.backup_get_all_by_volume(context, volume_id,
filters=filters)
def backup_update(context, backup_id, values):
"""Set the given properties on a backup and update it.
Raises NotFound if backup does not exist.
"""
return IMPL.backup_update(context, backup_id, values)
def backup_destroy(context, backup_id):
"""Destroy the backup or raise if it does not exist."""
return IMPL.backup_destroy(context, backup_id)
###################
def transfer_get(context, transfer_id):
"""Get a volume transfer record or raise if it does not exist."""
return IMPL.transfer_get(context, transfer_id)
def transfer_get_all(context):
"""Get all volume transfer records."""
return IMPL.transfer_get_all(context)
def transfer_get_all_by_project(context, project_id):
"""Get all volume transfer records for specified project."""
return IMPL.transfer_get_all_by_project(context, project_id)
def transfer_create(context, values):
"""Create an entry in the transfers table."""
return IMPL.transfer_create(context, values)
def transfer_destroy(context, transfer_id):
"""Destroy a record in the volume transfer table."""
return IMPL.transfer_destroy(context, transfer_id)
def transfer_accept(context, transfer_id, user_id, project_id):
"""Accept a volume transfer."""
return IMPL.transfer_accept(context, transfer_id, user_id, project_id)
###################
def consistencygroup_get(context, consistencygroup_id):
"""Get a consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_get(context, consistencygroup_id)
def consistencygroup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all consistencygroups."""
return IMPL.consistencygroup_get_all(context, filters=filters,
marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def consistencygroup_create(context, values):
"""Create a consistencygroup from the values dictionary."""
return IMPL.consistencygroup_create(context, values)
def consistencygroup_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Get all consistencygroups belonging to a project."""
return IMPL.consistencygroup_get_all_by_project(context, project_id,
filters=filters,
marker=marker, limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def consistencygroup_update(context, consistencygroup_id, values):
"""Set the given properties on a consistencygroup and update it.
Raises NotFound if consistencygroup does not exist.
"""
return IMPL.consistencygroup_update(context, consistencygroup_id, values)
def consistencygroup_destroy(context, consistencygroup_id):
"""Destroy the consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_destroy(context, consistencygroup_id)
###################
def cgsnapshot_get(context, cgsnapshot_id):
"""Get a cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_get(context, cgsnapshot_id)
def cgsnapshot_get_all(context, filters=None):
"""Get all cgsnapshots."""
return IMPL.cgsnapshot_get_all(context, filters)
def cgsnapshot_create(context, values):
"""Create a cgsnapshot from the values dictionary."""
return IMPL.cgsnapshot_create(context, values)
def cgsnapshot_get_all_by_group(context, group_id, filters=None):
"""Get all cgsnapshots belonging to a consistency group."""
return IMPL.cgsnapshot_get_all_by_group(context, group_id, filters)
def cgsnapshot_get_all_by_project(context, project_id, filters=None):
"""Get all cgsnapshots belonging to a project."""
return IMPL.cgsnapshot_get_all_by_project(context, project_id, filters)
def cgsnapshot_update(context, cgsnapshot_id, values):
"""Set the given properties on a cgsnapshot and update it.
Raises NotFound if cgsnapshot does not exist.
"""
return IMPL.cgsnapshot_update(context, cgsnapshot_id, values)
def cgsnapshot_destroy(context, cgsnapshot_id):
"""Destroy the cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_destroy(context, cgsnapshot_id)
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than given age from storage tables
Raises InvalidParameterValue if age_in_days is incorrect.
:returns: number of deleted rows
"""
return IMPL.purge_deleted_rows(context, age_in_days=age_in_days)
def get_booleans_for_table(table_name):
return IMPL.get_booleans_for_table(table_name)
###################
def driver_initiator_data_update(context, initiator, namespace, updates):
"""Create DriverPrivateData from the values dictionary."""
return IMPL.driver_initiator_data_update(context, initiator,
namespace, updates)
def driver_initiator_data_get(context, initiator, namespace):
"""Query for an DriverPrivateData that has the specified key"""
return IMPL.driver_initiator_data_get(context, initiator, namespace)
###################
def image_volume_cache_create(context, host, image_id, image_updated_at,
volume_id, size):
"""Create a new image volume cache entry."""
return IMPL.image_volume_cache_create(context,
host,
image_id,
image_updated_at,
volume_id,
size)
def image_volume_cache_delete(context, volume_id):
"""Delete an image volume cache entry specified by volume id."""
return IMPL.image_volume_cache_delete(context, volume_id)
def image_volume_cache_get_and_update_last_used(context, image_id, host):
"""Query for an image volume cache entry."""
return IMPL.image_volume_cache_get_and_update_last_used(context,
image_id,
host)
def image_volume_cache_get_by_volume_id(context, volume_id):
"""Query to see if a volume id is an image-volume contained in the cache"""
return IMPL.image_volume_cache_get_by_volume_id(context, volume_id)
def image_volume_cache_get_all_for_host(context, host):
"""Query for all image volume cache entry for a host."""
return IMPL.image_volume_cache_get_all_for_host(context, host)
###################
def get_model_for_versioned_object(versioned_object):
return IMPL.get_model_for_versioned_object(versioned_object)
def get_by_id(context, model, id, *args, **kwargs):
return IMPL.get_by_id(context, model, id, *args, **kwargs)
class Condition(object):
"""Class for normal condition values for conditional_update."""
def __init__(self, value, field=None):
self.value = value
# Field is optional and can be passed when getting the filter
self.field = field
def get_filter(self, model, field=None):
return IMPL.condition_db_filter(model, self._get_field(field),
self.value)
def _get_field(self, field=None):
# We must have a defined field on initialization or when called
field = field or self.field
if not field:
raise ValueError(_('Condition has no field.'))
return field
class Not(Condition):
"""Class for negated condition values for conditional_update.
By default NULL values will be treated like Python treats None instead of
how SQL treats it.
So for example when values are (1, 2) it will evaluate to True when we have
value 3 or NULL, instead of only with 3 like SQL does.
"""
def __init__(self, value, field=None, auto_none=True):
super(Not, self).__init__(value, field)
self.auto_none = auto_none
def get_filter(self, model, field=None):
# If implementation has a specific method use it
if hasattr(IMPL, 'condition_not_db_filter'):
return IMPL.condition_not_db_filter(model, self._get_field(field),
self.value, self.auto_none)
# Otherwise non negated object must adming ~ operator for not
return ~super(Not, self).get_filter(model, field)
class Case(object):
"""Class for conditional value selection for conditional_update."""
def __init__(self, whens, value=None, else_=None):
self.whens = whens
self.value = value
self.else_ = else_
def is_orm_value(obj):
"""Check if object is an ORM field."""
return IMPL.is_orm_value(obj)
def conditional_update(context, model, values, expected_values, filters=(),
include_deleted='no', project_only=False):
"""Compare-and-swap conditional update.
Update will only occur in the DB if conditions are met.
We have 4 different condition types we can use in expected_values:
- Equality: {'status': 'available'}
- Inequality: {'status': vol_obj.Not('deleting')}
- In range: {'status': ['available', 'error']
- Not in range: {'status': vol_obj.Not(['in-use', 'attaching'])
Method accepts additional filters, which are basically anything that
can be passed to a sqlalchemy query's filter method, for example:
[~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)]
We can select values based on conditions using Case objects in the
'values' argument. For example:
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = db.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
db.conditional_update(context, models.Volume, {'status': case_values},
{'status': 'available'})
And we can use DB fields for example to store previous status in the
corresponding field even though we don't know which value is in the db
from those we allowed:
db.conditional_update(context, models.Volume,
{'status': 'deleting',
'previous_status': models.Volume.status},
{'status': ('available', 'error')})
WARNING: SQLAlchemy does not allow selecting order of SET clauses, so
for now we cannot do things like
{'previous_status': model.status, 'status': 'retyping'}
because it will result in both previous_status and status being set to
'retyping'. Issue has been reported [1] and a patch to fix it [2] has
been submitted.
[1]: https://bitbucket.org/zzzeek/sqlalchemy/issues/3541/
[2]: https://github.com/zzzeek/sqlalchemy/pull/200
:param values: Dictionary of key-values to update in the DB.
:param expected_values: Dictionary of conditions that must be met
for the update to be executed.
:param filters: Iterable with additional filters
:param include_deleted: Should the update include deleted items, this
is equivalent to read_deleted
:param project_only: Should the query be limited to context's project.
:returns number of db rows that were updated
"""
return IMPL.conditional_update(context, model, values, expected_values,
filters, include_deleted, project_only)
| apache-2.0 | 5,485,486,144,737,427,000 | 35.135812 | 79 | 0.643982 | false |
HelloLily/hellolily | lily/search/scan_search.py | 1 | 1365 | import inspect
from django.conf import settings
from elasticutils.contrib.django import MappingType
from lily.search.base_mapping import BaseMapping
class ModelMappings(object):
mappings = []
model_to_mappings = {}
app_to_mappings = {}
@classmethod
def scan(cls, apps_to_scan=settings.INSTALLED_APPS):
mappings = []
apps = []
models = []
for app in apps_to_scan:
# Try because not every app has a search.py.
try:
# Import the child module 'search', hence the additional
# parameters. (Otherwise only the top module is returned).
search_module = __import__('%s.search' % app, globals(), locals(), ['search'])
for name_member in inspect.getmembers(search_module, inspect.isclass):
member = name_member[1]
# Check if we defined a mapping class. We shall exclude
# members of BaseMapping or MappingType itself.
if issubclass(member, MappingType) and member is not BaseMapping and member is not MappingType:
cls.mappings.append(member)
cls.model_to_mappings[member.get_model()] = member
cls.app_to_mappings[app] = member
except Exception:
pass
| agpl-3.0 | -6,483,834,906,151,082,000 | 38 | 115 | 0.578755 | false |
jobovy/galpy | galpy/potential/interpSphericalPotential.py | 1 | 4568 | ###################3###################3###################3##################
# interpSphericalPotential.py: build spherical potential through interpolation
###################3###################3###################3##################
import numpy
from scipy import interpolate
from .SphericalPotential import SphericalPotential
from .Potential import _evaluateRforces, _evaluatePotentials
from ..util.conversion import physical_compatible, get_physical
class interpSphericalPotential(SphericalPotential):
"""__init__(self,rforce=None,rgrid=numpy.geomspace(0.01,20,101),Phi0=None,ro=None,vo=None)
Class that interpolates a spherical potential on a grid"""
def __init__(self,rforce=None,rgrid=numpy.geomspace(0.01,20,101),Phi0=None,
ro=None,vo=None):
"""__init__(self,rforce=None,rgrid=numpy.geomspace(0.01,20,101),Phi0=None,ro=None,vo=None)
NAME:
__init__
PURPOSE:
initialize an interpolated, spherical potential
INPUT:
rforce= (None) Either a) function that gives the radial force as a function of r or b) a galpy Potential instance or list thereof
rgrid= (numpy.geomspace(0.01,20,101)) radial grid on which to evaluate the potential for interpolation (note that beyond rgrid[-1], the potential is extrapolated as -GM(<rgrid[-1])/r)
Phi0= (0.) value of the potential at rgrid[0] (only necessary when rforce is a function, for galpy potentials automatically determined)
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2020-07-13 - Written - Bovy (UofT)
"""
SphericalPotential.__init__(self,amp=1.,ro=ro,vo=vo)
self._rgrid= rgrid
# Determine whether rforce is a galpy Potential or list thereof
try:
_evaluateRforces(rforce,1.,0.)
except:
_rforce= rforce
Phi0= 0. if Phi0 is None else Phi0
else:
_rforce= lambda r: _evaluateRforces(rforce,r,0.)
# Determine Phi0
Phi0= _evaluatePotentials(rforce,rgrid[0],0.)
# Also check that unit systems are compatible
if not physical_compatible(self,rforce):
raise RuntimeError('Unit conversion factors ro and vo incompatible between Potential to be interpolated and the factors given to interpSphericalPotential')
# If set for the parent, set for the interpolated
phys= get_physical(rforce,include_set=True)
if phys['roSet']:
self.turn_physical_on(ro=phys['ro'])
if phys['voSet']:
self.turn_physical_on(vo=phys['vo'])
self._rforce_grid= numpy.array([_rforce(r) for r in rgrid])
self._force_spline= interpolate.InterpolatedUnivariateSpline(
self._rgrid,self._rforce_grid,k=3,ext=0)
# Get potential and r2deriv as splines for the integral and derivative
self._pot_spline= self._force_spline.antiderivative()
self._Phi0= Phi0+self._pot_spline(self._rgrid[0])
self._r2deriv_spline= self._force_spline.derivative()
# Extrapolate as mass within rgrid[-1]
self._rmin= rgrid[0]
self._rmax= rgrid[-1]
self._total_mass= -self._rmax**2.*self._force_spline(self._rmax)
self._Phimax= -self._pot_spline(self._rmax)+self._Phi0\
+self._total_mass/self._rmax
self.hasC= True
self.hasC_dxdv= True
self.hasC_dens= True
return None
def _revaluate(self,r,t=0.):
out= numpy.empty_like(r)
out[r >= self._rmax]= -self._total_mass/r[r >= self._rmax]+self._Phimax
out[r < self._rmax]= -self._pot_spline(r[r < self._rmax])+self._Phi0
return out
def _rforce(self,r,t=0.):
out= numpy.empty_like(r)
out[r >= self._rmax]= -self._total_mass/r[r >= self._rmax]**2.
out[r < self._rmax]= self._force_spline(r[r < self._rmax])
return out
def _r2deriv(self,r,t=0.):
out= numpy.empty_like(r)
out[r >= self._rmax]= -2.*self._total_mass/r[r >= self._rmax]**3.
out[r < self._rmax]= -self._r2deriv_spline(r[r < self._rmax])
return out
def _rdens(self,r,t=0.):
out= numpy.empty_like(r)
out[r >= self._rmax]= 0.
# Fall back onto Poisson eqn., implemented in SphericalPotential
out[r < self._rmax]= SphericalPotential._rdens(self,r[r < self._rmax])
return out
| bsd-3-clause | -5,407,083,161,345,688,000 | 42.09434 | 194 | 0.601576 | false |
OpenTechFund/WebApp | opentech/apply/funds/migrations/0045_new_workflow.py | 1 | 1533 | # Generated by Django 2.0.8 on 2018-10-24 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('funds', '0044_add_named_blocks'),
]
operations = [
migrations.AlterField(
model_name='applicationbase',
name='workflow_name',
field=models.CharField(choices=[('single', 'Request'), ('single_ext', 'Request with external review'), ('double', 'Concept & Proposal')], default='single', max_length=100, verbose_name='Workflow'),
),
migrations.AlterField(
model_name='applicationsubmission',
name='workflow_name',
field=models.CharField(choices=[('single', 'Request'), ('single_ext', 'Request with external review'), ('double', 'Concept & Proposal')], default='single', max_length=100, verbose_name='Workflow'),
),
migrations.AlterField(
model_name='labbase',
name='workflow_name',
field=models.CharField(choices=[('single', 'Request'), ('single_ext', 'Request with external review'), ('double', 'Concept & Proposal')], default='single', max_length=100, verbose_name='Workflow'),
),
migrations.AlterField(
model_name='roundbase',
name='workflow_name',
field=models.CharField(choices=[('single', 'Request'), ('single_ext', 'Request with external review'), ('double', 'Concept & Proposal')], default='single', max_length=100, verbose_name='Workflow'),
),
]
| gpl-2.0 | 7,132,013,135,087,195,000 | 45.454545 | 209 | 0.609263 | false |
ActiveState/code | recipes/Python/496993_Separating_Pattern_ImplementatiYour/recipe-496993.py | 1 | 3324 | ##########
# pattern_impl.py
##########
from installmethod import installmethod # the installmethod from recipe: 223613
class ObserverPattern:
"""
A reusable implementation of the Observer pattern.
"""
theSubject = None
observers = {}
class Subject:
def __init__(self):
self.observers = []
def attach(self, observer):
self.observers.append(observer)
def detach(self, observer):
self.observers.remove(observer)
def notify(self):
for observer in self.observers:
observer.update(self)
def decoration(self):
self.decorated_trigger()
self.notify()
class Observer:
def __init__(self, subject):
subject.attach(self)
def update(self, observer):
currentState = observer.get_current_state()
self.react_to_observation(currentState)
def specify_subject(self, subject):
self.theSubject = subject
self.make_generalization(subject, self.Subject)
def add_observer(self, observer):
self.observers[observer.__name__] = observer
self.make_generalization(observer, self.Observer)
def make_generalization(self, childClass, parentClass):
bases = list(childClass.__bases__)
bases.append(parentClass)
childClass.__bases__ = tuple(bases)
def make_observation(self, changeObservation, changeReaction):
func = getattr(self.theSubject, changeObservation)
installmethod(func, self.theSubject, "get_current_state")
for observer in self.observers.keys():
func = getattr(self.observers[observer], changeReaction)
installmethod(func, self.observers[observer], "react_to_observation")
def add_trigger(self, trigger):
func = getattr(self.theSubject, trigger)
installmethod(func, self.theSubject, "decorated_trigger")
func = getattr(self.theSubject, "decoration")
installmethod(func, self.theSubject, trigger)
##########
# example.py
##########
class ClockTimer:
def get_time(self):
# get current state of the subject
return self.currentTime
def tick(self):
# update internal time-keeping state
import time
self.currentTime = time.ctime()
class DigitalClock:
def draw(self, currentTime):
# display currentTime as a digital clock
print "DigitalClock: current time is", currentTime
class AnalogClock:
def draw(self, currentTime):
# display currentTime as an analog clock
print "AnalogClock: current time is", currentTime
if __name__ == '__main__':
from pattern_impl import ObserverPattern
observerPattern = ObserverPattern()
observerPattern.specify_subject(ClockTimer)
observerPattern.add_observer(DigitalClock)
observerPattern.add_observer(AnalogClock)
observerPattern.make_observation("get_time", "draw")
observerPattern.add_trigger("tick")
aTimer = ClockTimer()
dClock = DigitalClock(aTimer)
aClock = AnalogClock(aTimer)
import time
for i in range(10):
print "\nTick!"
aTimer.tick()
time.sleep(1)
| mit | -542,397,210,227,763,600 | 28.945946 | 81 | 0.619134 | false |
bitmovin/bitmovin-python | tests/bitmovin/services/encodings/drms/playready_drm_tests.py | 1 | 16848 | import unittest
import uuid
import json
from bitmovin import Bitmovin, Response, Stream, StreamInput, EncodingOutput, ACLEntry, Encoding, \
FMP4Muxing, MuxingStream, PlayReadyDRM, SelectionMode, ACLPermission, PlayReadyDRMAdditionalInformation
from bitmovin.errors import BitmovinApiError, InvalidTypeError
from tests.bitmovin import BitmovinTestCase
from bitmovin.resources.enums import PlayReadyMethod
class PlayReadyDRMTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
self.sampleEncoding = self._create_sample_encoding() # type: Encoding
def tearDown(self):
super().tearDown()
def test_create_drm(self):
fmp4_muxing = self._create_muxing() # type: FMP4Muxing
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
def test_create_drm_with_additional_information(self):
fmp4_muxing = self._create_muxing() # type: FMP4Muxing
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready_with_additional_information()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self.assertIsNotNone(drm_resource.additionalInformation)
self._compare_drms(sample_drm, drm_resource)
def test_create_playready_piff(self):
fmp4_muxing = self._create_muxing() # type: FMP4Muxing
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready_piff()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
def test_create_playready_key(self):
fmp4_muxing = self._create_muxing() # type: FMP4Muxing
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready_key()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
def test_assign_unsuitable_playready_method(self):
sample_drm = self._get_sample_drm_playready_piff()
with self.assertRaises(InvalidTypeError):
sample_drm.method = ACLPermission.PRIVATE
def test_create_drm_without_name(self):
fmp4_muxing = self._create_muxing() # type: FMP4Muxing
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.name = None
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
def test_retrieve_drm(self):
fmp4_muxing = self._create_muxing()
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
retrieved_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.retrieve(
encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id, drm_id=drm_resource.id)
self.assertIsNotNone(retrieved_drm_response)
self.assertIsNotNone(retrieved_drm_response.resource)
self._compare_drms(retrieved_drm_response.resource, created_drm_response.resource)
def test_delete_drm(self):
fmp4_muxing = self._create_muxing()
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
deleted_minimal_resource = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.delete(
muxing_id=fmp4_muxing.id, encoding_id=self.sampleEncoding.id, drm_id=drm_resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.retrieve(encoding_id=self.sampleEncoding.id,
muxing_id=fmp4_muxing.id, drm_id=drm_resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving muxing after deleting it should not be possible.'
)
except BitmovinApiError:
pass
def test_list_drms(self):
fmp4_muxing = self._create_muxing()
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.outputs = fmp4_muxing.outputs
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
drms = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.list(encoding_id=self.sampleEncoding.id,
muxing_id=fmp4_muxing.id)
self.assertIsNotNone(drms)
self.assertIsNotNone(drms.resource)
self.assertIsNotNone(drms.response)
self.assertIsInstance(drms.resource, list)
self.assertIsInstance(drms.response, Response)
self.assertGreater(drms.resource.__sizeof__(), 1)
def test_retrieve_stream_custom_data(self):
fmp4_muxing = self._create_muxing()
self.assertIsNotNone(fmp4_muxing.id)
sample_drm = self._get_sample_drm_playready()
sample_drm.outputs = fmp4_muxing.outputs
sample_drm.customData = 'my_fancy_awesome_custom_data'
created_drm_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=fmp4_muxing.id)
self.assertIsNotNone(created_drm_response)
self.assertIsNotNone(created_drm_response.resource)
self.assertIsNotNone(created_drm_response.resource.id)
drm_resource = created_drm_response.resource # type: PlayReadyDRM
self._compare_drms(sample_drm, drm_resource)
custom_data_response = self.bitmovin.encodings.Muxing.FMP4.DRM.PlayReady.retrieve_custom_data(
muxing_id=fmp4_muxing.id,
encoding_id=self.sampleEncoding.id,
drm_id=drm_resource.id
)
custom_data = custom_data_response.resource
self.assertEqual(sample_drm.customData, json.loads(custom_data.customData))
def _create_muxing(self):
sample_muxing = self._get_sample_muxing()
created_muxing_response = self.bitmovin.encodings.Muxing.FMP4.create(object_=sample_muxing,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(created_muxing_response)
self.assertIsNotNone(created_muxing_response.resource)
self.assertIsNotNone(created_muxing_response.resource.id)
self._compare_muxings(sample_muxing, created_muxing_response.resource)
return created_muxing_response.resource
def _compare_drms(self, first: PlayReadyDRM, second: PlayReadyDRM):
"""
:param first:
:param second:
:return: bool
"""
self.assertEqual(first.kid, second.kid)
self.assertEqual(first.keySeed, second.keySeed)
self.assertEqual(first.key, second.key)
self.assertEqual(first.method, second.method)
self.assertEqual(first.laUrl, second.laUrl)
self.assertEqual(len(first.outputs), len(second.outputs))
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
if first.additionalInformation is None and second.additionalInformation is None:
return True
self.assertEqual(first.additionalInformation.wrmHeaderCustomAttributes,
second.additionalInformation.wrmHeaderCustomAttributes)
return True
def _compare_muxings(self, first: FMP4Muxing, second: FMP4Muxing):
"""
:param first: Stream
:param second: Stream
:return: bool
"""
self.assertEqual(first.segmentLength, second.segmentLength)
self.assertEqual(first.segmentNaming, second.segmentNaming)
self.assertEqual(len(first.outputs), len(second.outputs))
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
return True
def _get_sample_drm_playready(self):
playready_drm_settings = self.settings.get('sampleObjects').get('drmConfigurations').get('PlayReady')
drm = PlayReadyDRM(key_seed=playready_drm_settings[0].get('keySeed'),
kid=playready_drm_settings[0].get('kid'),
method=playready_drm_settings[0].get('method'),
la_url=playready_drm_settings[0].get('laUrl'),
name='Sample Playready DRM')
return drm
def _get_sample_drm_playready_with_additional_information(self):
playready_drm_settings = self.settings.get('sampleObjects').get('drmConfigurations').get('PlayReady')
drm = PlayReadyDRM(key_seed=playready_drm_settings[0].get('keySeed'),
kid=playready_drm_settings[0].get('kid'),
method=playready_drm_settings[0].get('method'),
la_url=playready_drm_settings[0].get('laUrl'),
additional_information=PlayReadyDRMAdditionalInformation(
wrm_header_custom_attributes="<custom><tag1>text</tag1></custom>"),
name='Sample Playready DRM')
return drm
def _get_sample_drm_playready_piff(self):
playready_drm_settings = self.settings.get('sampleObjects').get('drmConfigurations').get('PlayReady')
drm = PlayReadyDRM(key_seed=playready_drm_settings[0].get('keySeed'),
kid=playready_drm_settings[0].get('kid'),
method=PlayReadyMethod.PIFF_CTR,
la_url=playready_drm_settings[0].get('laUrl'),
name='Sample Playready PIFF DRM')
return drm
def _get_sample_drm_playready_key(self):
playready_drm_settings = self.settings.get('sampleObjects').get('drmConfigurations').get('PlayReady')
drm = PlayReadyDRM(key=playready_drm_settings[0].get('key'),
kid=playready_drm_settings[0].get('kid'),
method=playready_drm_settings[0].get('method'),
la_url=playready_drm_settings[0].get('laUrl'),
name='Sample Playready DRM')
return drm
def _get_sample_muxing(self):
stream = self._get_sample_stream()
create_stream_response = self.bitmovin.encodings.Stream.create(object_=stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(create_stream_response)
self.assertIsNotNone(create_stream_response.resource)
self.assertIsNotNone(create_stream_response.resource.id)
muxing_stream = MuxingStream(stream_id=create_stream_response.resource.id)
muxing = FMP4Muxing(streams=[muxing_stream], segment_length=4, segment_naming='seg_%number%.ts',
outputs=stream.outputs, name='Sample FMP4 Muxing')
return muxing
def _get_sample_stream(self):
sample_codec_configuration = self.utils.get_sample_h264_codec_configuration()
h264_codec_configuration = self.bitmovin.codecConfigurations.H264.create(sample_codec_configuration)
(sample_input, sample_files) = self.utils.get_sample_s3_input()
s3_input = self.bitmovin.inputs.S3.create(sample_input)
stream_input = StreamInput(input_id=s3_input.resource.id,
input_path=sample_files.get('854b9c98-17b9-49ed-b75c-3b912730bfd1'),
selection_mode=SelectionMode.AUTO)
acl_entry = ACLEntry(scope='string', permission=ACLPermission.PUBLIC_READ)
sample_output = self.utils.get_sample_s3_output()
s3_output = self.bitmovin.outputs.S3.create(sample_output)
encoding_output = EncodingOutput(output_id=s3_output.resource.id,
output_path='/bitmovin-python/StreamTests/' + str(uuid.uuid4()),
acl=[acl_entry])
stream = Stream(codec_configuration_id=h264_codec_configuration.resource.id,
input_streams=[stream_input],
outputs=[encoding_output],
name='Sample Stream')
self.assertIsNotNone(stream.codecConfigId)
self.assertIsNotNone(stream.inputStreams)
self.assertIsNotNone(stream.outputs)
return stream
def _create_sample_encoding(self):
sample_encoding = self.utils.get_sample_encoding()
resource_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
return resource_response.resource
if __name__ == '__main__':
unittest.main()
| unlicense | -688,011,659,537,507,000 | 45.541436 | 120 | 0.657882 | false |
emory-libraries/eulfedora | eulfedora/server.py | 1 | 19607 | # file eulfedora/server.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:class:`eulfedora.server.Repository` has the capability to
automatically use connection configuration parameters pulled from
Django settings, when available, but it can also be used without Django.
When you create an instance of :class:`~eulfedora.server.Repository`,
if you do not specify connection parameters, it will attempt to
initialize the repository connection based on Django settings, using
the configuration names documented below.
If you are writing unit tests that use :mod:`eulfedora`, you may want
to take advantage of
:class:`eulfedora.testutil.FedoraTestSuiteRunner`, which has logic to
set up and switch configurations between a development fedora
repository and a test repository.
Projects that use this module should include the following settings in their
``settings.py``::
# Fedora Repository settings
FEDORA_ROOT = 'http://fedora.host.name:8080/fedora/'
FEDORA_USER = 'user'
FEDORA_PASSWORD = 'password'
FEDORA_PIDSPACE = 'changeme'
FEDORA_TEST_ROOT = 'http://fedora.host.name:8180/fedora/'
FEDORA_TEST_PIDSPACE = 'testme'
# optional retry setting (default is 3)
FEDORA_CONNECTION_RETRIES = None
If username and password are not specified, the Repository instance
will be initialized without credentials and access Fedora as an
anonymous user. If pidspace is not specified, the Repository will use
the default pidspace for the configured Fedora instance.
Projects that need unit test setup and clean-up tasks (syncrepo and
test object removal) to access Fedora with different credentials than
the configured Fedora credentials should use the following settings::
FEDORA_TEST_USER = 'testuser'
FEDORA_TEST_PASSWORD = 'testpassword'
----
"""
from __future__ import unicode_literals
import logging
import requests
import warnings
import six
from eulfedora.rdfns import model as modelns
from eulfedora.api import ApiFacade, ResourceIndex
from eulfedora.models import DigitalObject
from eulfedora.util import parse_xml_object
from eulfedora.xml import SearchResults, NewPids
logger = logging.getLogger(__name__)
# a repository object, basically a handy facade for easy api access
class Repository(object):
"""Pythonic interface to a single Fedora Commons repository instance.
Connect to a single Fedora Repository by passing in connection
parameters or based on configuration in a Django settings file.
If username and password are specified, they will override any
fedora credentials configuredin Django settings.
If a request object is passed in and the user is logged in, this
class will look for credentials in the session, as set by
:meth:`~eulcore.django.fedora.views.login_and_store_credentials_in_session`
(see method documentation for more details and potential security
risks).
Order of precedence for credentials:
* If a request object is passed in and user credentials are
available in the session, that will be used first.
* Explicit username and password parameters will be used next.
* If none of these options are available, fedora credentials
will be set in django settings will be used.
If a *retries* value is specified, this will override the default
set in :attr:`Repository.retries` which is used to configure the
maximum number of requests retries for connection errors (see
http://docs.python-requests.org/en/master/api/#requests.adapters.HTTPAdapter).
Retries can also be specified via Django settings as
**FEDORA_CONNECTION_RETRIES**; if an iniitalization parameter is specified,
that will override the Django setting.
"""
default_object_type = DigitalObject
"Default type to use for methods that return fedora objects - :class:`DigitalObject`"
default_pidspace = None
#: default number of retries to request for API connections; see
#: http://docs.python-requests.org/en/master/api/#requests.adapters.HTTPAdapter
retries = 3
default_retry_option = object()
# default retry option, so None can be recognized as an option
search_fields = [
'pid', 'label', 'state', 'ownerId', 'cDate', 'mDate',
'dcmDate', 'title', 'creator', 'subject', 'description', 'publisher',
'contributor', 'date', 'type', 'format', 'identifier', 'source',
'language', 'relation', 'coverage', 'rights']
"fields that can be searched against in :meth:`find_objects`"
search_fields_aliases = {
'owner': 'ownerId',
'created': 'cDate',
'modified': 'mDate',
'dc_modified': 'dcmDate'
}
"human-readable aliases for oddly-named fedora search fields"
def __init__(self, root=None, username=None, password=None, request=None,
retries=default_retry_option):
# when initialized via django, settings should be pulled from django conf
if root is None:
try:
from django.conf import settings
from eulfedora import cryptutil
root = getattr(settings, 'FEDORA_ROOT', None)
if root is None:
raise Exception('Cannot initialize a Fedora connection without specifying ' +
'Fedora root url directly or in Django settings as FEDORA_ROOT')
# if username and password are not set, attempt to pull from django conf
if username is None and password is None:
if request is not None and request.user.is_authenticated() and \
FEDORA_PASSWORD_SESSION_KEY in request.session:
username = request.user.username
password = cryptutil.decrypt(request.session[FEDORA_PASSWORD_SESSION_KEY])
if username is None and hasattr(settings, 'FEDORA_USER'):
username = settings.FEDORA_USER
if password is None and hasattr(settings, 'FEDORA_PASSWORD'):
password = settings.FEDORA_PASSWORD
if hasattr(settings, 'FEDORA_PIDSPACE'):
self.default_pidspace = settings.FEDORA_PIDSPACE
# if retries is specified in
if hasattr(settings, 'FEDORA_CONNECTION_RETRIES'):
self.retries = settings.FEDORA_CONNECTION_RETRIES
except ImportError:
pass
# if retries is specified in init options, that should override
# default value or django setting
if retries is not self.default_retry_option:
self.retries = retries
if root is None:
raise Exception('Could not determine Fedora root url from django settings or parameter')
logger.debug("Connecting to fedora at %s %s", root,
'as %s' % username if username
else '(no user credentials)')
self.api = ApiFacade(root, username, password)
self.fedora_root = self.api.base_url
self.username = username
self.password = password
self._risearch = None
@property
def risearch(self):
"instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials"
if self._risearch is None:
self._risearch = ResourceIndex(self.fedora_root, self.username, self.password)
return self._risearch
def get_next_pid(self, namespace=None, count=None):
"""
Request next available pid or pids from Fedora, optionally in a specified
namespace. Calls :meth:`ApiFacade.getNextPID`.
.. deprecated :: 0.14
Mint pids for new objects with
:func:`eulfedora.models.DigitalObject.get_default_pid`
instead, or call :meth:`ApiFacade.getNextPID` directly.
:param namespace: (optional) get the next pid in the specified pid namespace;
otherwise, Fedora will return the next pid in the configured default namespace.
:param count: (optional) get the specified number of pids; by default, returns 1 pid
:rtype: string or list of strings
"""
# this method should no longer be needed - default pid logic moved to DigitalObject
warnings.warn("""get_next_pid() method is deprecated; you should mint new pids via DigitalObject or ApiFacade.getNextPID() instead.""",
DeprecationWarning)
kwargs = {}
if namespace:
kwargs['namespace'] = namespace
elif self.default_pidspace:
kwargs['namespace'] = self.default_pidspace
if count:
kwargs['numPIDs'] = count
r = self.api.getNextPID(**kwargs)
nextpids = parse_xml_object(NewPids, r.content, r.url)
if count is None:
return nextpids.pids[0]
else:
return nextpids.pids
def ingest(self, text, log_message=None):
"""
Ingest a new object into Fedora. Returns the pid of the new object on
success. Calls :meth:`ApiFacade.ingest`.
:param text: full text content of the object to be ingested
:param log_message: optional log message
:rtype: string
"""
kwargs = {'text': text}
if log_message:
kwargs['logMessage'] = log_message
response = self.api.ingest(**kwargs)
return response.content
def purge_object(self, pid, log_message=None):
"""
Purge an object from Fedora. Calls :meth:`ApiFacade.purgeObject`.
:param pid: pid of the object to be purged
:param log_message: optional log message
:rtype: boolean
"""
kwargs = {'pid': pid}
if log_message:
kwargs['logMessage'] = log_message
response = self.api.purgeObject(**kwargs)
return response.status_code == requests.codes.ok
def get_objects_with_cmodel(self, cmodel_uri, type=None):
"""
Find objects in Fedora with the specified content model.
:param cmodel_uri: content model URI (should be full URI in info:fedora/pid:### format)
:param type: type of object to return (e.g., class:`DigitalObject`)
:rtype: list of objects
"""
uris = self.risearch.get_subjects(modelns.hasModel, cmodel_uri)
return [self.get_object(uri, type) for uri in uris]
def get_object(self, pid=None, type=None, create=None):
"""
Initialize a single object from Fedora, or create a new one, with the
same Fedora configuration and credentials.
:param pid: pid of the object to request, or a function that can be
called to get one. if not specified, :meth:`get_next_pid`
will be called if a pid is needed
:param type: type of object to return; defaults to :class:`DigitalObject`
:rtype: single object of the type specified
:create: boolean: create a new object? (if not specified, defaults
to False when pid is specified, and True when it is not)
"""
objtype = type or self.default_object_type
if pid is None:
if create is None:
create = True
else:
if create is None:
create = False
return objtype(self.api, pid, create,
default_pidspace=self.default_pidspace)
def infer_object_subtype(self, api, pid=None, create=False, default_pidspace=None):
"""Construct a DigitalObject or appropriate subclass, inferring the
appropriate subtype using :meth:`best_subtype_for_object`. Note that
this method signature has been selected to match the
:class:`~eulfedora.models.DigitalObject` constructor so that this
method might be passed directly to :meth:`get_object` as a `type`::
>>> obj = repo.get_object(pid, type=repo.infer_object_subtype)
See also: :class:`TypeInferringRepository`
"""
obj = DigitalObject(api, pid, create, default_pidspace)
if create:
return obj
if not obj.exists:
return obj
match_type = self.best_subtype_for_object(obj)
return match_type(api, pid)
def best_subtype_for_object(self, obj, content_models=None):
"""Given a :class:`~eulfedora.models.DigitalObject`, examine the
object to select the most appropriate subclass to instantiate. This
generic implementation examines the object's content models and
compares them against the defined subclasses of
:class:`~eulfedora.models.DigitalObject` to pick the best match.
Projects that have a more nuanced understanding of their particular
objects should override this method in a :class:`Repository`
subclass. This method is intended primarily for use by
:meth:`infer_object_subtype`.
:param obj: a :class:`~eulfedora.models.DigitalObject` to inspect
:param content_models: optional list of content models, if they are known
ahead of time (e.g., from a Solr search result), to avoid
an additional Fedora look-up
:rtype: a subclass of :class:`~eulfedora.models.DigitalObject`
"""
if content_models is None:
obj_models = set(str(m) for m in obj.get_models())
else:
obj_models = content_models
# go through registered DigitalObject subtypes looking for what type
# this object might be. use the first longest match: that is, look
# for classes we qualify for by having all of their cmodels, and use
# the class with the longest set of cmodels. if there's a tie, warn
# and pick one.
# TODO: store these at registration in a way that doesn't require
# this manual search every time
# TODO: eventually we want to handle the case where a DigitalObject
# can use multiple unrelated cmodels, though we need some major
# changes beyond here to support that
match_len, matches = 0, []
for obj_type in DigitalObject.defined_types.values():
type_model_list = getattr(obj_type, 'CONTENT_MODELS', None)
if not type_model_list:
continue
type_models = set(type_model_list)
if type_models.issubset(obj_models):
if len(type_models) > match_len:
match_len, matches = len(type_models), [obj_type]
elif len(type_models) == match_len:
matches.append(obj_type)
if not matches:
return DigitalObject
if len(matches) > 1:
# Check to see if there happens to be an end subclass to the list of matches.
for obj_type in matches:
is_root_subclass = True
for possible_parent_type in matches:
if not issubclass(obj_type, possible_parent_type):
is_root_subclass = False
if is_root_subclass:
return obj_type
logger.warn('%s has %d potential classes with no root subclass for the list. using the first: %s',
obj, len(matches), repr(matches))
return matches[0]
def find_objects(self, terms=None, type=None, chunksize=None, **kwargs):
"""
Find objects in Fedora. Find query should be generated via keyword
args, based on the fields in Fedora documentation. By default, the
query uses a contains (~) search for all search terms. Calls
:meth:`ApiFacade.findObjects`. Results seem to return consistently
in ascending PID order.
Example usage - search for all objects where the owner contains 'jdoe'::
repository.find_objects(ownerId='jdoe')
Supports all search operators provided by Fedora findObjects query (exact,
gt, gte, lt, lte, and contains). To specify the type of query for
a particular search term, call find_objects like this::
repository.find_objects(ownerId__exact='lskywalker')
repository.find_objects(date__gt='20010302')
:param type: type of objects to return; defaults to :class:`DigitalObject`
:param chunksize: number of objects to return at a time
:rtype: generator for list of objects
"""
type = type or self.default_object_type
find_opts = {'chunksize' : chunksize}
search_operators = {
'exact': '=',
'gt': '>',
'gte': '>=',
'lt': '<',
'lte': '<=',
'contains': '~'
}
if terms is not None:
find_opts['terms'] = terms
else:
conditions = []
for field, value in six.iteritems(kwargs):
if '__' in field:
field, filtr = field.split('__')
if filtr not in search_operators:
raise Exception("Unsupported search filter '%s'" % filtr)
op = search_operators[filtr]
else:
op = search_operators['contains'] # default search mode
if field in self.search_fields_aliases:
field = self.search_fields_aliases[field]
if field not in self.search_fields:
raise Exception("Error generating Fedora findObjects query: unknown search field '%s'" \
% field)
if ' ' in value:
# if value contains whitespace, it must be delimited with single quotes
value = "'%s'" % value
conditions.append("%s%s%s" % (field, op, value))
query = ' '.join(conditions)
find_opts['query'] = query
r = self.api.findObjects(**find_opts)
chunk = parse_xml_object(SearchResults, r.content, r.url)
while True:
for result in chunk.results:
yield type(self.api, result.pid)
if chunk.session_token:
r = self.api.findObjects(session_token=chunk.session_token, **find_opts)
chunk = parse_xml_object(SearchResults, r.content, r.url)
else:
break
class TypeInferringRepository(Repository):
"""A simple :class:`Repository` subclass whose default object type for
:meth:`~Repository.get_object` is
:meth:`~Repository.infer_object_subtype`. Thus, each call to
:meth:`~Repository.get_object` on a repository such as this will
automatically use :meth:`~Repository.best_subtype_for_object` (or a
subclass override) to infer the object's proper type.
"""
default_object_type = Repository.infer_object_subtype
# session key for storing a user password that will be used for Fedora access
# - used here and in eulcore.django.fedora.views
FEDORA_PASSWORD_SESSION_KEY = 'eulfedora_password'
| apache-2.0 | -3,356,252,382,455,029,000 | 40.80597 | 143 | 0.636711 | false |
turon/openthread | tools/harness-automation/cases/router_5_2_7.py | 1 | 1879 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Router_5_2_7(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '5 2 7'
golden_devices_required = 16
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -737,141,814,733,381,500 | 39.847826 | 77 | 0.761575 | false |
Cepave/portal | web/controller/host.py | 1 | 6380 | # -*- coding:utf-8 -*-
__author__ = 'Ulric Qin'
from flask import jsonify, request, render_template, g, make_response
from web import app
from web.model.host_group import HostGroup
from web.model.group_host import GroupHost
from web.model.grp_tpl import GrpTpl
from web.model.host import Host
from web.model.template import Template
from frame import config
from fe_api import post2FeUpdateEventCase
import time
import logging
log = logging.getLogger(__name__)
@app.route('/group/<group_id>/hosts.txt')
def group_hosts_export(group_id):
group_id = int(group_id)
group = HostGroup.read(where='id = %s', params=[group_id])
if not group:
return jsonify(msg='no such group %s' % group_id)
vs, _ = Host.query(1, 10000000, '', '0', group_id)
names = [v.hostname for v in vs]
response = make_response('\n'.join(names))
response.headers["content-type"] = "text/plain"
return response
@app.route('/group/<group_id>/hosts')
def group_hosts_list(group_id):
g.xbox = request.args.get('xbox', '')
group_id = int(group_id)
group = HostGroup.read(where='id = %s', params=[group_id])
if not group:
return jsonify(msg='no such group %s' % group_id)
page = int(request.args.get('p', 1))
limit = int(request.args.get('limit', 10))
query = request.args.get('q', '')
maintaining = request.args.get('maintaining', '0')
vs, total = Host.query(page, limit, query, maintaining, group_id)
return render_template(
'host/index.html',
data={
'vs': vs,
'total': total,
'query': query,
'limit': limit,
'page': page,
'maintaining': maintaining,
'group': group,
},
config=config
)
@app.route('/host/remove', methods=['POST'])
def host_remove_post():
group_id = int(request.form['grp_id'].strip())
host_ids = request.form['host_ids'].strip()
alarmAdUrl = config.JSONCFG['shortcut']['falconUIC'] + "/api/v1/alarmadjust/whenendpointunbind"
GroupHost.unbind(group_id, host_ids)
for host_id in host_ids.split(","):
data = {'hostgroupId': group_id, 'hostId': host_id}
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg='delete host is failed , please try again!')
return jsonify(msg='')
@app.route('/host/maintain', methods=['POST'])
def host_maintain_post():
begin = int(request.form['begin'].strip())
end = int(request.form['end'].strip())
host_ids = request.form['host_ids'].strip()
alarmAdUrl = config.JSONCFG['shortcut']['falconUIC'] + "/api/v1/alarmadjust/whenendpointonmaintain"
if begin <= 0 or end <= 0:
return jsonify(msg='begin or end is invalid')
for host_id in host_ids.split(","):
data = {'hostId': host_id, 'maintainBegin': begin, 'maintainEnd': end}
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg=Host.maintain(begin, end, host_ids))
# 取消maintain时间
@app.route('/host/reset', methods=['POST'])
def host_reset_post():
host_ids = request.form['host_ids'].strip()
return jsonify(msg=Host.no_maintain(host_ids))
@app.route('/host/add')
def host_add_get():
group_id = request.args.get('group_id', '')
if not group_id:
return jsonify(msg='no group_id given')
group_id = int(group_id)
group = HostGroup.read('id = %s', [group_id])
if not group:
return jsonify(msg='no such group')
return render_template('host/add.html', group=group, config=config)
@app.route('/host/add', methods=['POST'])
def host_add_post():
group_id = request.form['group_id']
if not group_id:
return jsonify(msg='no group_id given')
group_id = int(group_id)
group = HostGroup.read('id = %s', [group_id])
if not group:
return jsonify(msg='no such group')
hosts = request.form['hosts'].strip()
if not hosts:
return jsonify(msg='hosts is blank')
host_arr = hosts.splitlines()
safe_host_arr = [h for h in host_arr if h]
if not safe_host_arr:
return jsonify(msg='hosts is blank')
success = []
failure = []
for h in safe_host_arr:
msg = GroupHost.bind(group_id, h)
if not msg:
success.append('%s<br>' % h)
else:
failure.append('%s %s<br>' % (h, msg))
data = '<div class="alert alert-danger" role="alert">failure:<hr>' + ''.join(
failure) + '</div><div class="alert alert-success" role="alert">success:<hr>' + ''.join(success) + '</div>'
return jsonify(msg='', data=data)
# 展示某个机器bind的group
@app.route('/host/<host_id>/groups')
def host_groups_get(host_id):
host_id = int(host_id)
h = Host.read('id = %s', params=[host_id])
if not h:
return jsonify(msg='no such host')
group_ids = GroupHost.group_ids(h.id)
groups = [HostGroup.read('id = %s', [group_id]) for group_id in group_ids]
return render_template('host/groups.html', groups=groups, host=h, config=config)
@app.route('/host/<host_id>/templates')
def host_templates_get(host_id):
host_id = int(host_id)
h = Host.read('id = %s', params=[host_id])
if not h:
return jsonify(msg='no such host')
group_ids = GroupHost.group_ids(h.id)
templates = GrpTpl.tpl_set(group_ids)
for v in templates:
v.parent = Template.get(v.parent_id)
return render_template('host/templates.html', config=config, **locals())
@app.route('/host/unbind')
def host_unbind_get():
host_id = request.args.get('host_id', '').strip()
data = {'hostgroupId': group_id, 'hostId': host_id}
alarmAdUrl = config.JSONCFG['shortcut']['falconUIC'] + "/api/v1/alarmadjust/whenendpointunbind"
if not host_id:
return jsonify(msg='host_id is blank')
group_id = request.args.get('group_id', '').strip()
if not group_id:
return jsonify(msg='group_id is blank')
GroupHost.unbind(int(group_id), host_id)
respCode = post2FeUpdateEventCase(alarmAdUrl, data)
if respCode != 200:
log.error(alarmAdUrl + " got " + str(respCode) + " with " + str(data))
return jsonify(msg='')
| apache-2.0 | -4,357,326,261,906,748,400 | 31.943005 | 115 | 0.619534 | false |
InitialState/python_appender | ISStreamer/Streamer.py | 1 | 10347 | # local config helper stuff
try:
import ISStreamer.configutil as configutil
except ImportError:
import configutil
try:
import ISStreamer.version as version
except ImportError:
import version
import uuid
# python 2 and 3 conversion support
import sys
if (sys.version_info < (2,7,0)):
sys.stderr.write("You need at least python 2.7.0 to use the ISStreamer")
exit(1)
elif (sys.version_info >= (3,0)):
import http.client as httplib
else:
import httplib
import json
# time stuff
import datetime
import time
# performance stuff
import threading
import collections
import csv
class Streamer:
BucketName = ""
AccessKey = ""
Channel = ""
BufferSize = 10
StreamApiBase = ""
LogQueue = None
DebugLevel = 0
BucketKey = ""
IsClosed = True
Offline = False
Async = True
LocalFile = None
ApiVersion = '<=0.0.4'
MissedEvents = None
def __init__(self, bucket_name="", bucket_key="", access_key="", ini_file_location=None, debug_level=0, buffer_size=10, offline=None, use_async=True):
config = configutil.getConfig(ini_file_location)
if (offline != None):
self.Offline = offline
else:
if (config["offline_mode"] == "false"):
self.Offline = False
else:
self.Offline = True
self.Async = use_async
if (self.Offline):
try:
file_location = "{}.csv".format(config["offline_file"])
self.LocalFileHandler = open(file_location, 'w')
self.LocalFile = csv.writer(self.LocalFileHandler)
except:
print("There was an issue opening the file (nees more description)")
if (config == None and bucket_name=="" and access_key == ""):
raise Exception("config not found and arguments empty")
if (bucket_name == ""):
bucket_name = config["bucket"]
else:
bucket_name = bucket_name
if (access_key == ""):
self.AccessKey = config["access_key"]
else:
self.AccessKey = access_key
#self.LogQueue = Queue.Queue(self.BufferSize)
self.BucketKey = bucket_key
self.BufferSize = buffer_size
self.LogQueue = collections.deque()
self.StreamApiBase = config["stream_api_base"]
self.set_bucket(bucket_name, bucket_key)
self.DebugLevel = debug_level
self.IsClosed = False
self.console_message("access_key: {accessKey}".format(accessKey=self.AccessKey))
self.console_message("stream_api_base: {api}".format(api=self.StreamApiBase))
def ship_to_api(self, resource, contents):
api_base = self.StreamApiBase
headers = {
'Content-Type': 'application/json',
'User-Agent': 'PyStreamer v' + version.__version__,
'Accept-Version': self.ApiVersion,
'X-IS-AccessKey': self.AccessKey,
'X-IS-BucketKey': self.BucketKey
}
def __ship(retry_attempts, wait=0):
conn = None
response = None
if (self.StreamApiBase.startswith('https://')):
api_base = self.StreamApiBase[8:]
self.console_message("ship {resource}: stream api base domain: {domain}".format(domain=api_base, resource=resource), level=2)
conn = httplib.HTTPSConnection(api_base, timeout=120)
else:
api_base = self.StreamApiBase[7:]
self.console_message("ship {resource}: stream api base domain: {domain}".format(domain=api_base, resource=resource), level=2)
conn = httplib.HTTPConnection(api_base, timeout=120)
retry_attempts = retry_attempts - 1
if (retry_attempts < 0):
if (self.DebugLevel >= 2):
raise Exception("shipping failed.. network issue?")
else:
self.console_message("ship: ISStreamer failed to ship after a number of attempts.", level=0)
if (self.MissedEvents == None):
self.MissedEvents = open("err_missed_events.txt", 'w+')
if (self.MissedEvents != None):
self.MissedEvents.write("{}\n".format(json.dumps(contents)))
return
try:
if (wait > 0):
self.console_message("ship-debug: pausing thread for {wait} seconds".format(wait=wait))
time.sleep(wait)
conn.request('POST', resource, json.dumps(contents), headers)
response = conn.getresponse()
response_body = response.read()
if (response.status >= 200 and response.status < 300):
self.console_message("ship: status: " + str(response.status) + "\nheaders: " + str(response.msg), level=2)
self.console_message("ship: body: " + str(response_body), level=3)
elif (response.status == 400):
json_err = None
try:
json_err = json.loads(response_body)
except Exception as ex:
pass
if json_err != None:
if (json_err["message"]["error"]["type"] == "BUCKET_REMOVED"):
self.console_message("Bucket Creation Failed: " + json_err["message"]["error"]["message"])
elif (response.status == 401 or response.status == 403):
self.console_message("ERROR: unauthorized access_key: " + self.AccessKey)
elif (response.status == 402):
self.console_message("AccessKey exceeded limit for month, check account")
raise Exception("PAYMENT_REQUIRED")
elif (response.status == 429):
if "Retry-After" in response.msg:
retry_after = response.msg["Retry-After"]
self.console_message("Request limit exceeded, wait {limit} seconds before trying again".format(limit=retry_after))
__ship(retry_attempts, int(retry_after)+1)
else:
self.console_message("Request limit exceeded")
else:
self.console_message("ship: failed on attempt {atmpt} (StatusCode: {sc}; Reason: {r})".format(sc=response.status, r=response.reason, atmpt=retry_attempts))
raise Exception("ship exception")
except Exception as ex:
if (len(ex.args) > 0 and ex.args[0] == "PAYMENT_REQUIRED"):
raise Exception("Either account is capped or an upgrade is required.")
self.console_message("ship: exception shipping on attempt {atmpt}.".format(atmpt=retry_attempts))
if (self.DebugLevel > 1):
raise ex
else:
self.console_message("exception gobbled: {}".format(str(ex)))
__ship(retry_attempts, 1)
__ship(3)
def set_bucket(self, bucket_name="", bucket_key="", retries=3):
def __create_bucket(new_bucket_name, new_bucket_key, access_key):
self.ship_to_api("/api/buckets", {'bucketKey': new_bucket_key, 'bucketName': new_bucket_name})
if (bucket_key == None or bucket_key == ""):
bucket_key = str(uuid.uuid4())
self.BucketKey = bucket_key
self.BucketName = bucket_name
if (not self.Offline):
if (self.Async):
t = threading.Thread(target=__create_bucket, args=(bucket_name, bucket_key, self.AccessKey))
t.daemon = False
t.start()
else:
__create_bucket(bucket_name, bucket_key, self.AccessKey)
else:
self.console_message("Working in offline mode.", level=0)
def console_message(self, message, level=1):
if (self.DebugLevel >= level):
print(message)
def ship_messages(self, messages, retries=3):
self.ship_to_api("/api/events", messages)
def flush(self):
if (self.Offline):
self.console_message("flush: no need, in offline mode", level=2)
return
messages = []
self.console_message("flush: checking queue", level=2)
isEmpty = False
while not isEmpty:
try:
m = self.LogQueue.popleft()
messages.append(m)
except IndexError:
isEmpty = True
self.console_message("flush: queue empty...", level=2)
if len(messages) > 0:
self.console_message("flush: queue not empty, shipping", level=2)
self.ship_messages(messages)
self.console_message("flush: finished flushing queue", level=2)
def log_object(self, obj, key_prefix=None, epoch=None):
if (epoch == None):
epoch = time.time()
if (key_prefix == None):
key_prefix = "{}_".format(str(type(obj).__name__))
elif (key_prefix != None and key_prefix != ""):
key_prefix = "{}_".format(key_prefix)
else:
key_prefix = ""
if (type(obj).__name__ == 'list'):
i = 0
for val in obj:
key_name = "{}{}".format(key_prefix, i)
self.log(key_name, val, epoch=epoch)
i += 1
elif (type(obj).__name__ == 'dict'):
for key in obj:
key_name = "{}{}".format(key_prefix, key)
self.log(key_name, obj[key], epoch=epoch)
else:
for attr in dir(obj):
if not isinstance(getattr(type(obj), attr, None), property):
continue
key_name = "{}{}".format(key_prefix, attr)
self.log(key_name, getattr(obj, attr), epoch=epoch)
def log(self, key, value, epoch=None):
def __ship_buffer():
i = self.BufferSize
messages = []
while(i > 0):
try:
m = self.LogQueue.popleft()
messages.append(m)
except IndexError:
i = 0
self.console_message("ship_buffer: queue empty")
i = i - 1
self.console_message("ship_buffer: shipping", level=2)
self.ship_messages(messages)
self.console_message("ship_buffer: finished shipping", level=2)
timeStamp = time.time()
gmtime = datetime.datetime.fromtimestamp(timeStamp)
if epoch != None:
try:
gmtime = datetime.datetime.fromtimestamp(epoch)
timeStamp = epoch
except:
self.console_message("epoch was overriden with invalid time, using current timstamp instead")
formatted_gmTime = gmtime.strftime('%Y-%m-%d %H:%M:%S.%f')
self.console_message("{time}: {key} {value}".format(key=key, value=value, time=formatted_gmTime))
if (not self.Offline):
if (len(self.LogQueue) >= self.BufferSize):
self.console_message("log: queue size approximately at or greater than buffer size, shipping!", level=10)
self.console_message("log: async is {}".format(self.Async))
if (self.Async):
self.console_message("log: spawning ship thread", level=3)
t = threading.Thread(target=__ship_buffer)
t.daemon = False
t.start()
else:
__ship_buffer()
self.console_message("log: queueing log item", level=2)
log_item = {
"key": key,
"value": value,
"epoch": timeStamp
}
self.LogQueue.append(log_item)
else:
self.LocalFile.writerow([timeStamp, key, value])
def close(self):
self.IsClosed = True
self.flush()
if (self.MissedEvents != None):
self.MissedEvents.close()
if (self.Offline):
self.console_message("closing local file handler", level=2)
self.LocalFileHandler.close()
def __del__(self):
"""Try to close/flush the cache before destruction"""
try:
if (not self.IsClosed):
self.close()
except:
if (self.DebugLevel >= 2):
raise Exception("failed to close the buffer, make sure to explicitly call close() on the Streamer")
else:
self.console_message("failed to close the buffer, make sure to explicitly call close() on the Streamer", level=1)
| mit | -6,042,125,597,051,966,000 | 31.034056 | 160 | 0.667826 | false |
Guts/isogeo-api-py-minsdk | isogeo_pysdk/enums/limitation_types.py | 1 | 1993 | # -*- coding: UTF-8 -*-
#! python3
"""
Isogeo API v1 - Enums for Limitation types entity accepted values.
See: http://help.isogeo.com/api/complete/index.html
"""
# #############################################################################
# ########## Libraries #############
# ##################################
# standard library
from enum import auto, Enum
# #############################################################################
# ########## Classes ###############
# ##################################
class LimitationTypes(Enum):
"""Closed list of accepted types for limitations in Isogeo API.
:Example:
>>> # parse members and values
>>> print("{0:<30} {1:>20}".format("Enum", "Value"))
>>> for tag in LimitationTypes:
>>> print("{0:<30} {1:>20}".format(tag, tag.value))
Enum Value
LimitationTypes.legal 1
LimitationTypes.security 2
>>> # check if a var is an accepted value
>>> print("legal" in LimitationTypes.__members__)
True
>>> print("Legal" in LimitationTypes.__members__) # case sensitive
False
>>> print("security" in LimitationTypes.__members__)
True
See: https://docs.python.org/3/library/enum.html
"""
legal = auto()
security = auto()
# ##############################################################################
# ##### Stand alone program ########
# ##################################
if __name__ == "__main__":
""" standalone execution """
print("{0:<30} {1:>30}".format("Enum", "Value"))
for tag in LimitationTypes:
print("{0:<30} {1:>30}".format(tag, tag.value))
print(len(LimitationTypes))
print("legal" in LimitationTypes.__members__)
print("Legal" in LimitationTypes.__members__)
print("coordinateSystem" in LimitationTypes.__members__)
| gpl-3.0 | -1,665,953,534,875,158,300 | 31.145161 | 80 | 0.438033 | false |
15Dkatz/pants | src/python/pants/engine/build_files.py | 1 | 13721 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import collections
from os.path import dirname, join
import six
from pants.base.project_tree import Dir
from pants.base.specs import (AscendantAddresses, DescendantAddresses, SiblingAddresses,
SingleAddress)
from pants.build_graph.address import Address, BuildFileAddress
from pants.engine.addressable import (AddressableDescriptor, BuildFileAddresses, Collection,
Exactly, TypeConstraintError)
from pants.engine.fs import FilesContent, PathGlobs, Snapshot
from pants.engine.mapper import AddressFamily, AddressMap, AddressMapper, ResolveError
from pants.engine.objects import Locatable, SerializableFactory, Validatable
from pants.engine.rules import RootRule, SingletonRule, TaskRule, rule
from pants.engine.selectors import Select, SelectDependencies, SelectProjection
from pants.engine.struct import Struct
from pants.util.objects import datatype
_SPECS_CONSTRAINT = Exactly(SingleAddress,
SiblingAddresses,
DescendantAddresses,
AscendantAddresses)
class ResolvedTypeMismatchError(ResolveError):
"""Indicates a resolved object was not of the expected type."""
def _key_func(entry):
key, value = entry
return key
class BuildDirs(datatype('BuildDirs', ['dependencies'])):
"""A list of Stat objects for directories containing build files."""
class BuildFiles(datatype('BuildFiles', ['files_content'])):
"""The FileContents of BUILD files in some directory"""
class BuildFileGlobs(datatype('BuildFilesGlobs', ['path_globs'])):
"""A wrapper around PathGlobs that are known to match a build file pattern."""
@rule(BuildFiles,
[SelectProjection(FilesContent, PathGlobs, 'path_globs', BuildFileGlobs)])
def build_files(files_content):
return BuildFiles(files_content)
@rule(BuildFileGlobs, [Select(AddressMapper), Select(Dir)])
def buildfile_path_globs_for_dir(address_mapper, directory):
patterns = address_mapper.build_patterns
return BuildFileGlobs(PathGlobs.create(directory.path, include=patterns, exclude=()))
@rule(AddressFamily, [Select(AddressMapper), Select(Dir), Select(BuildFiles)])
def parse_address_family(address_mapper, path, build_files):
"""Given the contents of the build files in one directory, return an AddressFamily.
The AddressFamily may be empty, but it will not be None.
"""
files_content = build_files.files_content.dependencies
if not files_content:
raise ResolveError('Directory "{}" does not contain build files.'.format(path))
address_maps = []
paths = (f.path for f in files_content)
ignored_paths = set(address_mapper.build_ignore_patterns.match_files(paths))
for filecontent_product in files_content:
if filecontent_product.path in ignored_paths:
continue
address_maps.append(AddressMap.parse(filecontent_product.path,
filecontent_product.content,
address_mapper.parser))
return AddressFamily.create(path.path, address_maps)
class UnhydratedStruct(datatype('UnhydratedStruct', ['address', 'struct', 'dependencies'])):
"""A product type that holds a Struct which has not yet been hydrated.
A Struct counts as "hydrated" when all of its members (which are not themselves dependencies
lists) have been resolved from the graph. This means that hydrating a struct is eager in terms
of inline addressable fields, but lazy in terms of the complete graph walk represented by
the `dependencies` field of StructWithDeps.
"""
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
return self.struct == other.struct
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.struct)
def _raise_did_you_mean(address_family, name):
possibilities = '\n '.join(':{}'.format(a.target_name) for a in address_family.addressables)
raise ResolveError('"{}" was not found in namespace "{}". '
'Did you mean one of:\n {}'
.format(name, address_family.namespace, possibilities))
@rule(UnhydratedStruct,
[Select(AddressMapper),
SelectProjection(AddressFamily, Dir, 'spec_path', Address),
Select(Address)])
def resolve_unhydrated_struct(address_mapper, address_family, address):
"""Given an Address and its AddressFamily, resolve an UnhydratedStruct.
Recursively collects any embedded addressables within the Struct, but will not walk into a
dependencies field, since those are requested explicitly by tasks using SelectDependencies.
"""
struct = address_family.addressables.get(address)
addresses = address_family.addressables
if not struct or address not in addresses:
_raise_did_you_mean(address_family, address.target_name)
dependencies = []
def maybe_append(outer_key, value):
if isinstance(value, six.string_types):
if outer_key != 'dependencies':
dependencies.append(Address.parse(value,
relative_to=address.spec_path,
subproject_roots=address_mapper.subproject_roots))
elif isinstance(value, Struct):
collect_dependencies(value)
def collect_dependencies(item):
for key, value in sorted(item._asdict().items(), key=_key_func):
if not AddressableDescriptor.is_addressable(item, key):
continue
if isinstance(value, collections.MutableMapping):
for _, v in sorted(value.items(), key=_key_func):
maybe_append(key, v)
elif isinstance(value, collections.MutableSequence):
for v in value:
maybe_append(key, v)
else:
maybe_append(key, value)
collect_dependencies(struct)
return UnhydratedStruct(
filter(lambda build_address: build_address == address, addresses)[0], struct, dependencies)
def hydrate_struct(address_mapper, unhydrated_struct, dependencies):
"""Hydrates a Struct from an UnhydratedStruct and its satisfied embedded addressable deps.
Note that this relies on the guarantee that DependenciesNode provides dependencies in the
order they were requested.
"""
address = unhydrated_struct.address
struct = unhydrated_struct.struct
def maybe_consume(outer_key, value):
if isinstance(value, six.string_types):
if outer_key == 'dependencies':
# Don't recurse into the dependencies field of a Struct, since those will be explicitly
# requested by tasks. But do ensure that their addresses are absolute, since we're
# about to lose the context in which they were declared.
value = Address.parse(value,
relative_to=address.spec_path,
subproject_roots=address_mapper.subproject_roots)
else:
value = dependencies[maybe_consume.idx]
maybe_consume.idx += 1
elif isinstance(value, Struct):
value = consume_dependencies(value)
return value
# NB: Some pythons throw an UnboundLocalError for `idx` if it is a simple local variable.
maybe_consume.idx = 0
# 'zip' the previously-requested dependencies back together as struct fields.
def consume_dependencies(item, args=None):
hydrated_args = args or {}
for key, value in sorted(item._asdict().items(), key=_key_func):
if not AddressableDescriptor.is_addressable(item, key):
hydrated_args[key] = value
continue
if isinstance(value, collections.MutableMapping):
container_type = type(value)
hydrated_args[key] = container_type((k, maybe_consume(key, v))
for k, v in sorted(value.items(), key=_key_func))
elif isinstance(value, collections.MutableSequence):
container_type = type(value)
hydrated_args[key] = container_type(maybe_consume(key, v) for v in value)
else:
hydrated_args[key] = maybe_consume(key, value)
return _hydrate(type(item), address.spec_path, **hydrated_args)
return consume_dependencies(struct, args={'address': address})
def _hydrate(item_type, spec_path, **kwargs):
# If the item will be Locatable, inject the spec_path.
if issubclass(item_type, Locatable):
kwargs['spec_path'] = spec_path
try:
item = item_type(**kwargs)
except TypeConstraintError as e:
raise ResolvedTypeMismatchError(e)
# Let factories replace the hydrated object.
if isinstance(item, SerializableFactory):
item = item.create()
# Finally make sure objects that can self-validate get a chance to do so.
if isinstance(item, Validatable):
item.validate()
return item
@rule(BuildFileAddresses,
[Select(AddressMapper),
SelectDependencies(AddressFamily, BuildDirs, field_types=(Dir,)),
Select(_SPECS_CONSTRAINT)])
def addresses_from_address_families(address_mapper, address_families, spec):
"""Given a list of AddressFamilies and a Spec, return matching Addresses.
Raises a ResolveError if:
- there were no matching AddressFamilies, or
- the Spec matches no addresses for SingleAddresses.
"""
if not address_families:
raise ResolveError('Path "{}" contains no BUILD files.'.format(spec.directory))
def exclude_address(address):
if address_mapper.exclude_patterns:
address_str = address.spec
return any(p.search(address_str) is not None for p in address_mapper.exclude_patterns)
return False
if type(spec) in (DescendantAddresses, SiblingAddresses, AscendantAddresses):
addresses = tuple(a
for af in address_families
for a in af.addressables.keys()
if not exclude_address(a))
elif type(spec) is SingleAddress:
# TODO Could assert len(address_families) == 1, as it should always be true in this case.
addresses = tuple(a
for af in address_families
for a in af.addressables.keys()
if a.target_name == spec.name and not exclude_address(a))
if not addresses:
if len(address_families) == 1:
_raise_did_you_mean(address_families[0], spec.name)
else:
raise ValueError('Unrecognized Spec type: {}'.format(spec))
return BuildFileAddresses(addresses)
@rule(BuildDirs, [Select(AddressMapper), Select(Snapshot)])
def filter_build_dirs(address_mapper, snapshot):
"""Given a Snapshot matching a build pattern, return parent directories as BuildDirs."""
dirnames = set(dirname(f.stat.path) for f in snapshot.files)
ignored_dirnames = address_mapper.build_ignore_patterns.match_files('{}/'.format(dirname) for dirname in dirnames)
ignored_dirnames = set(d.rstrip('/') for d in ignored_dirnames)
return BuildDirs(tuple(Dir(d) for d in dirnames if d not in ignored_dirnames))
@rule(PathGlobs, [Select(AddressMapper), Select(_SPECS_CONSTRAINT)])
def spec_to_globs(address_mapper, spec):
"""Given a Spec object, return a PathGlobs object for the build files that it matches."""
if type(spec) is DescendantAddresses:
directory = spec.directory
patterns = [join('**', pattern) for pattern in address_mapper.build_patterns]
elif type(spec) in (SiblingAddresses, SingleAddress):
directory = spec.directory
patterns = address_mapper.build_patterns
elif type(spec) is AscendantAddresses:
directory = ''
patterns = [
join(f, pattern)
for pattern in address_mapper.build_patterns
for f in _recursive_dirname(spec.directory)
]
else:
raise ValueError('Unrecognized Spec type: {}'.format(spec))
return PathGlobs.create(directory, include=patterns, exclude=[])
def _recursive_dirname(f):
"""Given a relative path like 'a/b/c/d', yield all ascending path components like:
'a/b/c/d'
'a/b/c'
'a/b'
'a'
''
"""
while f:
yield f
f = dirname(f)
yield ''
BuildFilesCollection = Collection.of(BuildFiles)
def create_graph_rules(address_mapper, symbol_table):
"""Creates tasks used to parse Structs from BUILD files.
:param address_mapper_key: The subject key for an AddressMapper instance.
:param symbol_table: A SymbolTable instance to provide symbols for Address lookups.
"""
symbol_table_constraint = symbol_table.constraint()
return [
TaskRule(BuildFilesCollection,
[SelectDependencies(BuildFiles, BuildDirs, field_types=(Dir,))],
BuildFilesCollection),
# A singleton to provide the AddressMapper.
SingletonRule(AddressMapper, address_mapper),
# Support for resolving Structs from Addresses.
TaskRule(
symbol_table_constraint,
[Select(AddressMapper),
Select(UnhydratedStruct),
SelectDependencies(symbol_table_constraint, UnhydratedStruct, field_types=(Address,))],
hydrate_struct
),
resolve_unhydrated_struct,
# BUILD file parsing.
parse_address_family,
build_files,
buildfile_path_globs_for_dir,
# Spec handling: locate directories that contain build files, and request
# AddressFamilies for each of them.
addresses_from_address_families,
filter_build_dirs,
spec_to_globs,
# Root rules representing parameters that might be provided via root subjects.
RootRule(Address),
RootRule(BuildFileAddress),
RootRule(AscendantAddresses),
RootRule(DescendantAddresses),
RootRule(SiblingAddresses),
RootRule(SingleAddress),
]
| apache-2.0 | 2,093,579,768,712,662,000 | 37.434174 | 116 | 0.693535 | false |
julianofischer/caederm | caed/models.py | 1 | 2456 | # encoding: utf-8
# Author: Juliano Fischer Naves
# julianofischer at gmail dot com
# April, 2014
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class StudentClass(models.Model):
name = models.CharField(max_length=30,verbose_name='Turma')
def __unicode__(self):
return self.name
class Meta:
#Translators: The student class
verbose_name = _("Turma")
class Student(models.Model):
cpf = models.CharField(max_length=11,verbose_name="CPF")
name = models.CharField(max_length=60,verbose_name="Nome")
mother_name = models.CharField(max_length=60,verbose_name="Nome da mãe")
father_name = models.CharField(max_length=60,verbose_name="Nome do pai")
father_phone = models.CharField(max_length=60,verbose_name="Telefone do pai")
mother_phone = models.CharField(max_length=11,verbose_name="Telefone da mãe")
home_phone = models.CharField(max_length=11,verbose_name="Telefone de casa")
student_class = models.ForeignKey('StudentClass',verbose_name="Turma")
def __unicode__(self):
return self.name
class Meta:
verbose_name = _("Estudante")
verbose_name_plural = _("Estudantes")
#Ocorrência
class Incident(models.Model):
title = models.CharField(max_length=50,verbose_name=u"Título")
type = models.ForeignKey('IncidentType',verbose_name="Tipo")
description = models.TextField(verbose_name=u"Descrição")
measure_taken = models.TextField(verbose_name="Medida tomada")
student = models.ForeignKey('Student',verbose_name="Estudante")
student_class = models.ForeignKey('StudentClass',verbose_name='Turma')
date_time = models.DateTimeField(verbose_name='Data e hora')
archived = models.BooleanField(default=False,verbose_name='Arquivado')
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if self.pk is None:
self.student_class = self.student.student_class
super(Incident, self).save(*args,**kwargs)
class Meta:
verbose_name = _(u"Ocorrência")
verbose_name_plural = _(u"Ocorrências")
class IncidentType(models.Model):
title = models.CharField(max_length=30,verbose_name=u"Tipo de Ocorrência")
def __unicode__(self):
return self.title
class Meta:
verbose_name=("Tipo")
| gpl-3.0 | 1,138,426,551,920,276,600 | 34.985294 | 81 | 0.667348 | false |
jorisvandenbossche/numpy | numpy/core/shape_base.py | 1 | 28964 | from __future__ import division, absolute_import, print_function
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
'stack', 'vstack']
import functools
import operator
import warnings
from . import numeric as _nx
from . import overrides
from ._asarray import array, asanyarray
from .multiarray import normalize_axis_index
from . import fromnumeric as _from_nx
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _atleast_1d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_1d_dispatcher)
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1)
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_2d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_2d_dispatcher)
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_3d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_3d_dispatcher)
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print(arr, arr.shape) # doctest: +SKIP
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :, _nx.newaxis]
elif ary.ndim == 2:
result = ary[:, :, _nx.newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
warnings.warn('arrays to stack must be passed as a "sequence" type '
'such as list or tuple. Support for non-sequence '
'iterables such as generators is deprecated as of '
'NumPy 1.16 and will raise an error in the future.',
FutureWarning, stacklevel=stacklevel)
return ()
return arrays
def _vhstack_dispatcher(tup):
return _arrays_for_stack_dispatcher(tup)
@array_function_dispatch(_vhstack_dispatcher)
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
See Also
--------
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays along an existing axis.
vsplit : Split array into a list of multiple sub-arrays vertically.
block : Assemble arrays from blocks.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_2d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
return _nx.concatenate(arrs, 0)
@array_function_dispatch(_vhstack_dispatcher)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis, except for 1-D
arrays where it concatenates along the first axis. Rebuilds arrays divided
by `hsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis,
except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays along an existing axis.
hsplit : Split array along second axis.
block : Assemble arrays from blocks.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_1d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
def _stack_dispatcher(arrays, axis=None, out=None):
arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_dispatch(_stack_dispatcher)
def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
The ``axis`` parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the first
dimension and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no
out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
block : Assemble arrays from blocks.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.stack((a, b))
array([[1, 2, 3],
[2, 3, 4]])
>>> np.stack((a, b), axis=-1)
array([[1, 2],
[2, 3],
[3, 4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(arrays, stacklevel=2)
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = {arr.shape for arr in arrays}
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
axis = normalize_axis_index(axis, result_ndim)
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
return _nx.concatenate(expanded_arrays, axis=axis, out=out)
# Internal functions to eliminate the overhead of repeated dispatch in one of
# the two possible paths inside np.block.
# Use getattr to protect against __array_function__ being disabled.
_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
def _block_format_index(index):
"""
Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
"""
idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
return 'arrays' + idx_str
def _block_check_depths_match(arrays, parent_index=[]):
"""
Recursive function checking that the depths of nested lists in `arrays`
all match. Mismatch raises a ValueError as described in the block
docstring below.
The entire index (rather than just the depth) needs to be calculated
for each innermost list, in case an error needs to be raised, so that
the index of the offending list can be printed as part of the error.
Parameters
----------
arrays : nested list of arrays
The arrays to check
parent_index : list of int
The full index of `arrays` within the nested lists passed to
`_block_check_depths_match` at the top of the recursion.
Returns
-------
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
refer to it, and the last index along the empty axis will be None.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
The number of elements in the final array. This is used the motivate
the choice of algorithm used using benchmarking wisdom.
"""
if type(arrays) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
'{} is a tuple. '
'Only lists can be used to arrange blocks, and np.block does '
'not allow implicit conversion from tuple to ndarray.'.format(
_block_format_index(parent_index)
)
)
elif type(arrays) is list and len(arrays) > 0:
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
for i, arr in enumerate(arrays))
first_index, max_arr_ndim, final_size = next(idxs_ndims)
for index, ndim, size in idxs_ndims:
final_size += size
if ndim > max_arr_ndim:
max_arr_ndim = ndim
if len(index) != len(first_index):
raise ValueError(
"List depths are mismatched. First element was at depth "
"{}, but there is an element at depth {} ({})".format(
len(first_index),
len(index),
_block_format_index(index)
)
)
# propagate our flag that indicates an empty list at the bottom
if index[-1] is None:
first_index = index
return first_index, max_arr_ndim, final_size
elif type(arrays) is list and len(arrays) == 0:
# We've 'bottomed out' on an empty list
return parent_index + [None], 0, 0
else:
# We've 'bottomed out' - arrays is either a scalar or an array
size = _size(arrays)
return parent_index, _ndim(arrays), size
def _atleast_nd(a, ndim):
# Ensures `a` has at least `ndim` dimensions by prepending
# ones to `a.shape` as necessary
return array(a, ndmin=ndim, copy=False, subok=True)
def _accumulate(values):
# Helper function because Python 2.7 doesn't have
# itertools.accumulate
value = 0
accumulated = []
for v in values:
value += v
accumulated.append(value)
return accumulated
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
These help in nested concatation.
Returns
-------
shape: tuple of int
This tuple satisfies:
```
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
shape == concatenate(arrs, axis).shape
```
slice_prefixes: tuple of (slice(start, end), )
For a list of arrays being concatenated, this returns the slice
in the larger array at axis that needs to be sliced into.
For example, the following holds:
```
ret = concatenate([a, b, c], axis)
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
ret[(slice(None),) * axis + sl_a] == a
ret[(slice(None),) * axis + sl_b] == b
ret[(slice(None),) * axis + sl_c] == c
```
These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
shape_at_axis = [shape[axis] for shape in shapes]
# Take a shape, any shape
first_shape = shapes[0]
first_shape_pre = first_shape[:axis]
first_shape_post = first_shape[axis+1:]
if any(shape[:axis] != first_shape_pre or
shape[axis+1:] != first_shape_post for shape in shapes):
raise ValueError(
'Mismatched array shapes in block along axis {}.'.format(axis))
shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
offsets_at_axis = _accumulate(shape_at_axis)
slice_prefixes = [(slice(start, end),)
for start, end in zip([0] + offsets_at_axis,
offsets_at_axis)]
return shape, slice_prefixes
def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
"""
Returns the shape of the final array, along with a list
of slices and a list of arrays that can be used for assignment inside the
new array
Parameters
----------
arrays : nested list of arrays
The arrays to check
max_depth : list of int
The number of nested lists
result_ndim: int
The number of dimensions in thefinal array.
Returns
-------
shape : tuple of int
The shape that the final array will take on.
slices: list of tuple of slices
The slices into the full array required for assignment. These are
required to be prepended with ``(Ellipsis, )`` to obtain to correct
final index.
arrays: list of ndarray
The data to assign to each slice of the full array
"""
if depth < max_depth:
shapes, slices, arrays = zip(
*[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
for arr in arrays])
axis = result_ndim - max_depth + depth
shape, slice_prefixes = _concatenate_shapes(shapes, axis)
# Prepend the slice prefix and flatten the slices
slices = [slice_prefix + the_slice
for slice_prefix, inner_slices in zip(slice_prefixes, slices)
for the_slice in inner_slices]
# Flatten the array list
arrays = functools.reduce(operator.add, arrays)
return shape, slices, arrays
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
# Return the slice and the array inside a list to be consistent with
# the recursive case.
arr = _atleast_nd(arrays, result_ndim)
return arr.shape, [()], [arr]
def _block(arrays, max_depth, result_ndim, depth=0):
"""
Internal implementation of block based on repeated concatenation.
`arrays` is the argument passed to
block. `max_depth` is the depth of nested lists within `arrays` and
`result_ndim` is the greatest of the dimensions of the arrays in
`arrays` and the depth of the lists in `arrays` (see block docstring
for details).
"""
if depth < max_depth:
arrs = [_block(arr, max_depth, result_ndim, depth+1)
for arr in arrays]
return _concatenate(arrs, axis=-(max_depth-depth))
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
return _atleast_nd(arrays, result_ndim)
def _block_dispatcher(arrays):
# Use type(...) is list to match the behavior of np.block(), which special
# cases list specifically rather than allowing for generic iterables or
# tuple. Also, we know that list.__array_function__ will never exist.
if type(arrays) is list:
for subarrays in arrays:
for subarray in _block_dispatcher(subarrays):
yield subarray
else:
yield arrays
@array_function_dispatch(_block_dispatcher)
def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
Blocks in the innermost lists are concatenated (see `concatenate`) along
the last dimension (-1), then these are concatenated along the
second-last dimension (-2), and so on until the outermost list is reached.
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
the same for all blocks. This is primarily useful for working with scalars,
and means that code like ``np.block([v, 1])`` is valid, where
``v.ndim == 1``.
When the nested list is two levels deep, this allows block matrices to be
constructed from their components.
.. versionadded:: 1.13.0
Parameters
----------
arrays : nested list of array_like or scalars (but not tuples)
If passed a single ndarray or scalar (a nested list of depth 0), this
is returned unmodified (and not copied).
Elements shapes must match along the appropriate axes (without
broadcasting), but leading 1s will be prepended to the shape as
necessary to make the dimensions match.
Returns
-------
block_array : ndarray
The array assembled from the given blocks.
The dimensionality of the output is equal to the greatest of:
* the dimensionality of all the inputs
* the depth to which the input list is nested
Raises
------
ValueError
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
illegal, and should be spelt ``[[a, b], [c]]``
* If lists are empty - for instance, ``[[a, b], []]``
See Also
--------
concatenate : Join a sequence of arrays together.
stack : Stack arrays in sequence along a new dimension.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
When called with only scalars, ``np.block`` is equivalent to an ndarray
call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
``np.array([[1, 2], [3, 4]])``.
This function does not enforce that the blocks lie on a fixed grid.
``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
AAAbb
AAAbb
cccDD
But is also allowed to produce, for some ``a, b, c, d``::
AAAbb
AAAbb
cDDDD
Since concatenation happens along the last axis first, `block` is _not_
capable of producing the following directly::
AAAbb
cccbb
cccDD
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
Examples
--------
The most common use of this function is to build a block matrix
>>> A = np.eye(2) * 2
>>> B = np.eye(3) * 3
>>> np.block([
... [A, np.zeros((2, 3))],
... [np.ones((3, 2)), B ]
... ])
array([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[1., 1., 3., 0., 0.],
[1., 1., 0., 3., 0.],
[1., 1., 0., 0., 3.]])
With a list of depth 1, `block` can be used as `hstack`
>>> np.block([1, 2, 3]) # hstack([1, 2, 3])
array([1, 2, 3])
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.block([a, b, 10]) # hstack([a, b, 10])
array([ 1, 2, 3, 2, 3, 4, 10])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([A, B]) # hstack([A, B])
array([[1, 1, 2, 2],
[1, 1, 2, 2]])
With a list of depth 2, `block` can be used in place of `vstack`:
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.block([[a], [b]]) # vstack([a, b])
array([[1, 2, 3],
[2, 3, 4]])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([[A], [B]]) # vstack([A, B])
array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
It can also be used in places of `atleast_1d` and `atleast_2d`
>>> a = np.array(0)
>>> b = np.array([1])
>>> np.block([a]) # atleast_1d(a)
array([0])
>>> np.block([b]) # atleast_1d(b)
array([1])
>>> np.block([[a]]) # atleast_2d(a)
array([[0]])
>>> np.block([[b]]) # atleast_2d(b)
array([[1]])
"""
arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
# It was found through benchmarking that making an array of final size
# around 256x256 was faster by straight concatenation on a
# i7-7700HQ processor and dual channel ram 2400MHz.
# It didn't seem to matter heavily on the dtype used.
#
# A 2D array using repeated concatenation requires 2 copies of the array.
#
# The fastest algorithm will depend on the ratio of CPU power to memory
# speed.
# One can monitor the results of the benchmark
# https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
# to tune this parameter until a C version of the `_block_info_recursion`
# algorithm is implemented which would likely be faster than the python
# version.
if list_ndim * final_size > (2 * 512 * 512):
return _block_slicing(arrays, list_ndim, result_ndim)
else:
return _block_concatenate(arrays, list_ndim, result_ndim)
# These helper functions are mostly used for testing.
# They allow us to write tests that directly call `_block_slicing`
# or `_block_concatenate` without blocking large arrays to force the wisdom
# to trigger the desired path.
def _block_setup(arrays):
"""
Returns
(`arrays`, list_ndim, result_ndim, final_size)
"""
bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
list_ndim = len(bottom_index)
if bottom_index and bottom_index[-1] is None:
raise ValueError(
'List at {} cannot be empty'.format(
_block_format_index(bottom_index)
)
)
result_ndim = max(arr_ndim, list_ndim)
return arrays, list_ndim, result_ndim, final_size
def _block_slicing(arrays, list_ndim, result_ndim):
shape, slices, arrays = _block_info_recursion(
arrays, list_ndim, result_ndim)
dtype = _nx.result_type(*[arr.dtype for arr in arrays])
# Test preferring F only in the case that all input arrays are F
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = _nx.empty(shape=shape, dtype=dtype, order=order)
# Note: In a c implementation, the function
# PyArray_CreateMultiSortedStridePerm could be used for more advanced
# guessing of the desired order.
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
def _block_concatenate(arrays, list_ndim, result_ndim):
result = _block(arrays, list_ndim, result_ndim)
if list_ndim == 0:
# Catch an edge case where _block returns a view because
# `arrays` is a single numpy array and not a list of numpy arrays.
# This might copy scalars or lists twice, but this isn't a likely
# usecase for those interested in performance
result = result.copy()
return result
| bsd-3-clause | 642,279,398,689,061,400 | 30.969095 | 81 | 0.589698 | false |
LabD/django-postcode-lookup | docs/conf.py | 1 | 10306 | # -*- coding: utf-8 -*-
#
# Django PostcodeLookup documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 10 17:06:14 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import pkg_resources
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django PostcodeLookup'
project = u'Zeep'
copyright = u'2016, <a href="https://www.mvantellingen.nl/">Michael van Tellingen</a>'
author = u'Michael van Tellingen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = '0.4.0'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'github_user': 'labd',
'github_banner': True,
'github_repo': 'django-postcode-lookup',
'travis_button': True,
'codecov_button': True,
'analytics_id': '',
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Django PostcodeLookup v0.4.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'*': [
'sidebar-intro.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoPostcodeLookupdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoPostcodeLookup.tex', u'Django PostcodeLookup Documentation',
u'Michael van Tellingen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wsgibasicauth', u'Django PostcodeLookup Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoPostcodeLookup', u'Django PostcodeLookup Documentation',
author, 'DjangoPostcodeLookup', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| mit | -5,578,861,523,643,388,000 | 28.112994 | 86 | 0.692897 | false |
Parcks/core | test/domain/post_install/shell/test_root_shell_command_runner.py | 1 | 2885 | """
Scriptable Packages Installer - Parcks
Copyright (C) 2017 JValck - Setarit
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Setarit - parcks[at]setarit.com
"""
from __future__ import absolute_import
import unittest
from src.domain.log.logger import Logger
from src.domain.model.post_install.shell_command import ShellCommand
from src.domain.post_install.shell.root_shell_command_runner import RootShellCommandRunner
from src.domain.post_install.shell.shell_command_runnable import ShellCommandRunnable
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class TestRootShellCommandRunner(unittest.TestCase):
def setUp(self):
Logger.disable_all()
self.runner = RootShellCommandRunner(ShellCommand(["pwd"]))
self.runner_multiple = RootShellCommandRunner(ShellCommand(["pwd", "pwd"]))
def tearDown(self):
Logger.enable()
@patch('subprocess.Popen')
@patch.object(ShellCommandRunnable, 'create_executable_command_array')
def test_run_calls_create_executable_command_array_once_if_one_command(self, mock, subprocess_mock):
self.runner.run()
self.assertEqual(1, mock.call_count)
@patch('subprocess.Popen')
@patch.object(ShellCommandRunnable, 'create_executable_command_array')
def test_run_calls_create_executable_command_array_twice_if_two_commands(self, mock, subprocess_mock):
self.runner_multiple.run()
self.assertEqual(2, mock.call_count)
@patch('subprocess.Popen')
@patch.object(ShellCommandRunnable, 'handle_result')
def test_run_calls_handle_result_once_if_one_command(self, mock, subprocess_mock):
self.runner.run()
self.assertEqual(1, mock.call_count)
@patch('subprocess.Popen')
@patch.object(ShellCommandRunnable, 'handle_result')
def test_run_calls_handle_result_twice_if_two_commands(self, mock, subprocess_mock):
self.runner_multiple.run()
self.assertEqual(2, mock.call_count)
def test_create_root_executable_command_array_add_sudo_prefix(self):
command_list = self.runner.create_root_executable_command_array("ls -al")
self.assertTrue("sudo" in command_list)
| gpl-2.0 | -4,424,503,969,292,120,600 | 40.214286 | 108 | 0.727903 | false |
harshays/southwest | southwest/utils.py | 1 | 1537 | import os, sys, argparse
import datetime as dt
import threading
from functools import wraps
def _caffeinate():
os.system('caffeinate')
def caffeinate(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if sys.platform == 'darwin':
thrd = threading.Thread(target = _caffeinate, args = ())
# 'service' thread. does not stop process from terminating.
thrd.daemon = True
thrd.start()
fn(*args, **kwargs)
return wrapper
def get_single_args():
parser = argparse.ArgumentParser(description = "CLI for single southwest check-in")
parser.add_argument('firstname', help = "first name")
parser.add_argument('lastname', help = "last name")
parser.add_argument('code', help = "southwest code")
parser.add_argument('-d', '--date', help = "date (format is mm/dd/yyyy, default is today's date)", default = dt.datetime.now())
parser.add_argument('-t', '--time', help = "time (format is hh:mm, default is current time)", default = dt.datetime.now())
args = parser.parse_args()
if isinstance(args.date, dt.datetime):
args.date = args.date.strftime('%m/%d/%Y')
if isinstance(args.time, dt.datetime):
args.time = args.time.strftime('%H:%M')
return args
def get_multiple_args():
parser = argparse.ArgumentParser(description = "CLI for multiple southwest check ins")
parser.add_argument('csv', help = "csv file full path")
args = parser.parse_args()
return args
if __name__ == '__main__':
pass
| mit | -6,317,331,149,440,066,000 | 28.557692 | 131 | 0.635654 | false |
bjodah/sym | sym/tests/test_cse.py | 1 | 1882 | from .. import Backend
import pytest
backends = []
for bk in Backend.backends.keys():
try:
_be = Backend(bk)
except ImportError:
continue
_x = _be.Symbol('x')
try:
_be.cse([_x])
except:
continue
backends.append(bk)
def _inverse_cse(subs_cses, cse_exprs):
subs = dict(subs_cses)
return [expr.subs(subs) for expr in cse_exprs]
@pytest.mark.parametrize('key', backends)
def test_basic_cse(key):
be = Backend(key)
x, y = map(be.Symbol, "xy")
exprs = [x**2 + y**2 + 3, be.exp(x**2 + y**2)]
subs_cses, cse_exprs = be.cse(exprs)
subs, cses = zip(*subs_cses)
assert cses[0] == x**2 + y**2
for cse_expr in cse_exprs:
assert x not in cse_expr.atoms()
assert y not in cse_expr.atoms()
assert _inverse_cse(subs_cses, cse_exprs) == exprs
@pytest.mark.parametrize('key', backends)
def test_moot_cse(key):
be = Backend(key)
x, y = map(be.Symbol, "xy")
exprs = [x**2 + y**2, y]
subs_cses, cse_exprs = be.cse(exprs)
assert not subs_cses
assert _inverse_cse(subs_cses, cse_exprs) == exprs
@pytest.mark.parametrize('key', backends)
def test_cse_with_symbols(key):
be = Backend(key)
x = be.Symbol('x')
exprs = [x**2, 1/(1 + x**2), be.log(x + 2), be.exp(x + 2)]
subs_cses, cse_exprs = be.cse(exprs, symbols=be.numbered_symbols('y'))
subs, cses = zip(*subs_cses)
assert subs[0] == be.Symbol('y0')
assert subs[1] == be.Symbol('y1')
assert _inverse_cse(subs_cses, cse_exprs) == exprs
@pytest.mark.parametrize('key', backends)
def test_cse_with_symbols_overlap(key):
be = Backend(key)
x0, x1, y = map(be.Symbol, "x0 x1 y".split())
exprs = [x0**2, x0**2 + be.exp(y)**2 + 3, x1 * be.exp(y), be.sin(x1 * be.exp(y) + 1)]
subs_cses, cse_exprs = be.cse(exprs)
assert _inverse_cse(subs_cses, cse_exprs) == exprs
| bsd-2-clause | 5,155,013,612,389,622,000 | 26.676471 | 89 | 0.592455 | false |
samhoo/askbot-realworld | askbot/utils/forms.py | 1 | 8152 | import re
from django import forms
from django.http import str_to_unicode
from django.contrib.auth.models import User
from django.conf import settings
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from askbot.conf import settings as askbot_settings
from askbot.utils.slug import slugify
from askbot import const
import logging
import urllib
DEFAULT_NEXT = '/' + getattr(settings, 'ASKBOT_URL')
def clean_next(next, default = None):
if next is None or not next.startswith('/'):
if default:
return default
else:
return DEFAULT_NEXT
next = str_to_unicode(urllib.unquote(next), 'utf-8')
next = next.strip()
logging.debug('next url is %s' % next)
return next
def get_next_url(request, default = None):
return clean_next(request.REQUEST.get('next'), default)
class StrippedNonEmptyCharField(forms.CharField):
def clean(self, value):
value = value.strip()
if self.required and value == '':
raise forms.ValidationError(_('this field is required'))
return value
class NextUrlField(forms.CharField):
def __init__(self):
super(
NextUrlField,
self
).__init__(
max_length = 255,
widget = forms.HiddenInput(),
required = False
)
def clean(self,value):
return clean_next(value)
login_form_widget_attrs = { 'class': 'required login' }
class UserNameField(StrippedNonEmptyCharField):
RESERVED_NAMES = (u'fuck', u'shit', u'ass', u'sex', u'add',
u'edit', u'save', u'delete', u'manage', u'update', 'remove', 'new')
def __init__(
self,
db_model=User,
db_field='username',
must_exist=False,
skip_clean=False,
label=_('choose a username'),
**kw
):
self.must_exist = must_exist
self.skip_clean = skip_clean
self.db_model = db_model
self.db_field = db_field
self.user_instance = None
error_messages={
'required': _('user name is required'),
'taken': _('sorry, this name is taken, please choose another'),
'forbidden': _('sorry, this name is not allowed, please choose another'),
'missing': _('sorry, there is no user with this name'),
'multiple-taken': _('sorry, we have a serious error - user name is taken by several users'),
'invalid': _('user name can only consist of letters, empty space and underscore'),
'meaningless': _('please use at least some alphabetic characters in the user name'),
}
if 'error_messages' in kw:
error_messages.update(kw['error_messages'])
del kw['error_messages']
super(UserNameField,self).__init__(max_length=30,
widget=forms.TextInput(attrs=login_form_widget_attrs),
label=label,
error_messages=error_messages,
**kw
)
def clean(self,username):
""" validate username """
if self.skip_clean == True:
logging.debug('username accepted with no validation')
return username
if self.user_instance is None:
pass
elif isinstance(self.user_instance, User):
if username == self.user_instance.username:
logging.debug('username valid')
return username
else:
raise TypeError('user instance must be of type User')
try:
username = super(UserNameField, self).clean(username)
except forms.ValidationError:
raise forms.ValidationError(self.error_messages['required'])
username_regex = re.compile(const.USERNAME_REGEX_STRING, re.UNICODE)
if self.required and not username_regex.search(username):
raise forms.ValidationError(self.error_messages['invalid'])
if username in self.RESERVED_NAMES:
raise forms.ValidationError(self.error_messages['forbidden'])
if slugify(username, force_unidecode = True) == '':
raise forms.ValidationError(self.error_messages['meaningless'])
try:
user = self.db_model.objects.get(
**{'%s' % self.db_field : username}
)
if user:
if self.must_exist:
logging.debug('user exists and name accepted b/c here we validate existing user')
return username
else:
raise forms.ValidationError(self.error_messages['taken'])
except self.db_model.DoesNotExist:
if self.must_exist:
logging.debug('user must exist, so raising the error')
raise forms.ValidationError(self.error_messages['missing'])
else:
logging.debug('user name valid!')
return username
except self.db_model.MultipleObjectsReturned:
logging.debug('error - user with this name already exists')
raise forms.ValidationError(self.error_messages['multiple-taken'])
class UserEmailField(forms.EmailField):
def __init__(self,skip_clean=False,**kw):
self.skip_clean = skip_clean
super(UserEmailField,self).__init__(widget=forms.TextInput(attrs=dict(login_form_widget_attrs,
maxlength=200)), label=mark_safe(_('your email address')),
error_messages={'required':_('email address is required'),
'invalid':_('please enter a valid email address'),
'taken':_('this email is already used by someone else, please choose another'),
},
**kw
)
def clean(self,email):
""" validate if email exist in database
from legacy register
return: raise error if it exist """
email = super(UserEmailField,self).clean(email.strip())
if self.skip_clean:
return email
if askbot_settings.EMAIL_UNIQUE == True:
try:
user = User.objects.get(email = email)
logging.debug('email taken')
raise forms.ValidationError(self.error_messages['taken'])
except User.DoesNotExist:
logging.debug('email valid')
return email
except User.MultipleObjectsReturned:
logging.debug('email taken many times over')
raise forms.ValidationError(self.error_messages['taken'])
else:
return email
class SetPasswordForm(forms.Form):
password1 = forms.CharField(widget=forms.PasswordInput(attrs=login_form_widget_attrs),
label=_('choose password'),
error_messages={'required':_('password is required')},
)
password2 = forms.CharField(widget=forms.PasswordInput(attrs=login_form_widget_attrs),
label=mark_safe(_('retype password')),
error_messages={'required':_('please, retype your password'),
'nomatch':_('sorry, entered passwords did not match, please try again')},
)
def __init__(self, data=None, user=None, *args, **kwargs):
super(SetPasswordForm, self).__init__(data, *args, **kwargs)
def clean_password2(self):
"""
Validates that the two password inputs match.
"""
if 'password1' in self.cleaned_data:
if self.cleaned_data['password1'] == self.cleaned_data['password2']:
self.password = self.cleaned_data['password2']
self.cleaned_data['password'] = self.cleaned_data['password2']
return self.cleaned_data['password2']
else:
del self.cleaned_data['password2']
raise forms.ValidationError(self.fields['password2'].error_messages['nomatch'])
else:
return self.cleaned_data['password2']
| gpl-3.0 | -7,291,696,706,646,717,000 | 40.591837 | 121 | 0.57765 | false |
e-baumer/sampling | sampling/stratified_rand.py | 1 | 5349 | from __future__ import division
from collections import defaultdict
import numpy as np
from base_sample import BaseSample
from sklearn.cluster import AffinityPropagation as AP
import pandas as pd
from collections import Counter
class StratifiedRandom(BaseSample):
def __init__(self, data_frame, number_arms=2):
super(StratifiedRandom, self).__init__(data_frame, number_arms)
def create_stratum(self, column_names, **kwargs):
'''
Use affinity propagation to find number of strata for each column.
column_names is a list of the covariates to be split into strata and
used for classification. This funciton adds a column to the data frame
for each column as column_name_strata that gives the strata designation
for that variable. The whole data frame is returned.
'''
for colname in column_names:
X = self.data[colname].reshape(-1, 1)
if np.isnan(X).any():
raise ValueError("There are NaN values in self.data[%s] that the \
clustering algorithm can't handle" % colname)
elif np.unique(self.data[colname]).shape[0] <=2:
string_name = colname+'_strata'
self.data[string_name] = self.data[colname].astype(int)
else:
af_model = AP(damping = 0.9)
strata_groups = af_model.fit(X)
#cluster_centers_indices = af.cluster_centers_indices_
#n_clusters_ = len(cluster_centers_indices)
string_name = colname+'_strata'
self.data[string_name] = strata_groups.labels_
return self.data
#In the main function, you need to call create_stratum before create_unique_strata
def create_unique_strata(self, column_names):
'''
The input should be self.data that has had the strata for each column
name assigned and had a pre-seeded randomization, meaning each arm
has at least one randomly assigned participant.
'''
#Create a column to store concatenated strata strings for each data point
self.data['strata_string'] = np.ones(len(self.data))*np.nan
#Initialize variables to be filled in during the loop
strata_unique = {}
#Loop through data points and create their strata strings
for ind in self.data.index.values:
similar_val = ''
for colname in column_names:
string_name = colname+'_strata'
similar_val += str(self.data[string_name].loc[ind])
#Add the total strata string for that data point
self.data['strata_string'].set_value(ind,similar_val)
#If the strata string exists, continue. If not, assign it a new value
if similar_val in list(strata_unique.keys()):
strata_unique[similar_val].append(ind)
continue
else:
strata_unique[similar_val] = [ind]
return (strata_unique, self.data)
def count_arm_assignments(self, strata_unique, key):
'''
For each unique strata, count how many are assigned to each arm.
'''
#Initialize arm_tally that is the same length as the number of arms
arm_tally = np.zeros(self.n_arms)
#Loop through the values in the unique strata and count how many are in each arm
for value in strata_unique[key]:
#If it is not NaN, add one to the arm_tally for the data point's arm assignment
if np.isnan(self.data['arm_assignment'][value]) == False:
arm_tally[int(self.data['arm_assignment'][value]-1)] += 1;
return arm_tally
def assign_arms(self, column_names, percent_nan = 0.05):
'''
Loop through unique strata and assign each data point to an arm.
'''
#clear all values with NaNs
self.data = self.nan_finder(column_names, percent_nan)
#call create_stratum to create strata for each chosen covariate
self.data = self.create_stratum(column_names,preference=-50)
#combine the covariate strata into a unique strata identifier
(strata_unique, self.data) = self.create_unique_strata(column_names)
#initiate an empty column in the data frame for arm assignments
self.data['arm_assignment'] = np.ones(len(self.data))*np.nan
#Loop through the uniqie strata
for key in strata_unique.keys():
#Loop through the values in the unique stratum
for value in strata_unique[key]:
#update the arm_tally based on new assignments
arm_tally = self.count_arm_assignments(strata_unique, key);
ind_unique = np.where(arm_tally==np.min(arm_tally))[0]
self.data['arm_assignment'].set_value(
value, np.random.choice(list(ind_unique+1)
))
return self.data
#
| apache-2.0 | 4,018,266,252,111,023,600 | 39.522727 | 93 | 0.5745 | false |
brokenseal/broke | examples/django/broke/blog/views.py | 1 | 2027 | import simplejson as json
from django.core import serializers
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.contrib.contenttypes.models import ContentType
from django.utils.html import strip_tags
from models import Entry
from forms import EntryForm
def save_entry(request):
if not request.is_ajax() or not request.method == 'POST':
raise Http404
form= EntryForm(request.POST)
if not form.is_valid():
return HttpResponse('{}', mimetype='application/javascript')
if 'pk' in request.POST:
entry= get_object_or_404(Entry, pk= request.POST['pk'])
form= EntryForm(request.POST, instance= entry)
entry= form.save(commit= False)
entry.body = strip_tags(entry.body)
entry.title = strip_tags(entry.title)
entry.save()
else:
entry= form.save(commit= False)
entry.body = strip_tags(entry.body)
entry.title = strip_tags(entry.title)
entry.save()
entry_content_type= ContentType.objects.get_for_model(entry.__class__)
response_data= json.dumps({
'pk': entry.pk,
'model': '%s.%s' % (entry_content_type.app_label, entry_content_type.model),
})
return HttpResponse(response_data, mimetype='application/javascript')
def delete_entry(request):
if not request.is_ajax() or not request.method == 'POST' and 'pk' in request.POST:
raise Http404
entry= get_object_or_404(Entry, pk= request.POST['pk'])
entry.delete()
response_data= json.dumps({
'operation': 'complete',
})
return HttpResponse(response_data, mimetype='application/javascript')
def get_data(request):
if not request.is_ajax():
raise Http404
entries= Entry.objects.all()
if len(request.GET):
params_dict= {}
for params in request.GET.iteritems():
param= str(params[0])
value= str(params[1])
params_dict[param]= value
entries= entries.filter(**params_dict)
return HttpResponse(serializers.serialize("json", entries), mimetype='application/javascript')
| bsd-3-clause | -219,378,214,440,639,200 | 25.767123 | 95 | 0.696103 | false |
miguelgrinberg/python-socketio | tests/asyncio/test_asyncio_namespace.py | 1 | 10400 | import asyncio
import sys
import unittest
from unittest import mock
from socketio import asyncio_namespace
def AsyncMock(*args, **kwargs):
"""Return a mock asynchronous function."""
m = mock.MagicMock(*args, **kwargs)
async def mock_coro(*args, **kwargs):
return m(*args, **kwargs)
mock_coro.mock = m
return mock_coro
def _run(coro):
"""Run the given coroutine."""
return asyncio.get_event_loop().run_until_complete(coro)
@unittest.skipIf(sys.version_info < (3, 5), 'only for Python 3.5+')
class TestAsyncNamespace(unittest.TestCase):
def test_connect_event(self):
result = {}
class MyNamespace(asyncio_namespace.AsyncNamespace):
async def on_connect(self, sid, environ):
result['result'] = (sid, environ)
ns = MyNamespace('/foo')
ns._set_server(mock.MagicMock())
_run(ns.trigger_event('connect', 'sid', {'foo': 'bar'}))
assert result['result'] == ('sid', {'foo': 'bar'})
def test_disconnect_event(self):
result = {}
class MyNamespace(asyncio_namespace.AsyncNamespace):
async def on_disconnect(self, sid):
result['result'] = sid
ns = MyNamespace('/foo')
ns._set_server(mock.MagicMock())
_run(ns.trigger_event('disconnect', 'sid'))
assert result['result'] == 'sid'
def test_sync_event(self):
result = {}
class MyNamespace(asyncio_namespace.AsyncNamespace):
def on_custom_message(self, sid, data):
result['result'] = (sid, data)
ns = MyNamespace('/foo')
ns._set_server(mock.MagicMock())
_run(ns.trigger_event('custom_message', 'sid', {'data': 'data'}))
assert result['result'] == ('sid', {'data': 'data'})
def test_async_event(self):
result = {}
class MyNamespace(asyncio_namespace.AsyncNamespace):
async def on_custom_message(self, sid, data):
result['result'] = (sid, data)
ns = MyNamespace('/foo')
ns._set_server(mock.MagicMock())
_run(ns.trigger_event('custom_message', 'sid', {'data': 'data'}))
assert result['result'] == ('sid', {'data': 'data'})
def test_event_not_found(self):
result = {}
class MyNamespace(asyncio_namespace.AsyncNamespace):
async def on_custom_message(self, sid, data):
result['result'] = (sid, data)
ns = MyNamespace('/foo')
ns._set_server(mock.MagicMock())
_run(
ns.trigger_event('another_custom_message', 'sid', {'data': 'data'})
)
assert result == {}
def test_emit(self):
ns = asyncio_namespace.AsyncNamespace('/foo')
mock_server = mock.MagicMock()
mock_server.emit = AsyncMock()
ns._set_server(mock_server)
_run(
ns.emit(
'ev', data='data', room='room', skip_sid='skip', callback='cb'
)
)
ns.server.emit.mock.assert_called_with(
'ev',
data='data',
room='room',
skip_sid='skip',
namespace='/foo',
callback='cb',
)
_run(
ns.emit(
'ev',
data='data',
room='room',
skip_sid='skip',
namespace='/bar',
callback='cb',
)
)
ns.server.emit.mock.assert_called_with(
'ev',
data='data',
room='room',
skip_sid='skip',
namespace='/bar',
callback='cb',
)
def test_send(self):
ns = asyncio_namespace.AsyncNamespace('/foo')
mock_server = mock.MagicMock()
mock_server.send = AsyncMock()
ns._set_server(mock_server)
_run(ns.send(data='data', room='room', skip_sid='skip', callback='cb'))
ns.server.send.mock.assert_called_with(
'data',
room='room',
skip_sid='skip',
namespace='/foo',
callback='cb',
)
_run(
ns.send(
data='data',
room='room',
skip_sid='skip',
namespace='/bar',
callback='cb',
)
)
ns.server.send.mock.assert_called_with(
'data',
room='room',
skip_sid='skip',
namespace='/bar',
callback='cb',
)
def test_enter_room(self):
ns = asyncio_namespace.AsyncNamespace('/foo')
ns._set_server(mock.MagicMock())
ns.enter_room('sid', 'room')
ns.server.enter_room.assert_called_with(
'sid', 'room', namespace='/foo'
)
ns.enter_room('sid', 'room', namespace='/bar')
ns.server.enter_room.assert_called_with(
'sid', 'room', namespace='/bar'
)
def test_leave_room(self):
ns = asyncio_namespace.AsyncNamespace('/foo')
ns._set_server(mock.MagicMock())
ns.leave_room('sid', 'room')
ns.server.leave_room.assert_called_with(
'sid', 'room', namespace='/foo'
)
ns.leave_room('sid', 'room', namespace='/bar')
ns.server.leave_room.assert_called_with(
'sid', 'room', namespace='/bar'
)
def test_close_room(self):
ns = asyncio_namespace.AsyncNamespace('/foo')
mock_server = mock.MagicMock()
mock_server.close_room = AsyncMock()
ns._set_server(mock_server)
_run(ns.close_room('room'))
ns.server.close_room.mock.assert_called_with('room', namespace='/foo')
_run(ns.close_room('room', namespace='/bar'))
ns.server.close_room.mock.assert_called_with('room', namespace='/bar')
def test_rooms(self):
ns = asyncio_namespace.AsyncNamespace('/foo')
ns._set_server(mock.MagicMock())
ns.rooms('sid')
ns.server.rooms.assert_called_with('sid', namespace='/foo')
ns.rooms('sid', namespace='/bar')
ns.server.rooms.assert_called_with('sid', namespace='/bar')
def test_session(self):
ns = asyncio_namespace.AsyncNamespace('/foo')
mock_server = mock.MagicMock()
mock_server.get_session = AsyncMock()
mock_server.save_session = AsyncMock()
ns._set_server(mock_server)
_run(ns.get_session('sid'))
ns.server.get_session.mock.assert_called_with('sid', namespace='/foo')
_run(ns.get_session('sid', namespace='/bar'))
ns.server.get_session.mock.assert_called_with('sid', namespace='/bar')
_run(ns.save_session('sid', {'a': 'b'}))
ns.server.save_session.mock.assert_called_with(
'sid', {'a': 'b'}, namespace='/foo'
)
_run(ns.save_session('sid', {'a': 'b'}, namespace='/bar'))
ns.server.save_session.mock.assert_called_with(
'sid', {'a': 'b'}, namespace='/bar'
)
ns.session('sid')
ns.server.session.assert_called_with('sid', namespace='/foo')
ns.session('sid', namespace='/bar')
ns.server.session.assert_called_with('sid', namespace='/bar')
def test_disconnect(self):
ns = asyncio_namespace.AsyncNamespace('/foo')
mock_server = mock.MagicMock()
mock_server.disconnect = AsyncMock()
ns._set_server(mock_server)
_run(ns.disconnect('sid'))
ns.server.disconnect.mock.assert_called_with('sid', namespace='/foo')
_run(ns.disconnect('sid', namespace='/bar'))
ns.server.disconnect.mock.assert_called_with('sid', namespace='/bar')
def test_sync_event_client(self):
result = {}
class MyNamespace(asyncio_namespace.AsyncClientNamespace):
def on_custom_message(self, sid, data):
result['result'] = (sid, data)
ns = MyNamespace('/foo')
ns._set_client(mock.MagicMock())
_run(ns.trigger_event('custom_message', 'sid', {'data': 'data'}))
assert result['result'] == ('sid', {'data': 'data'})
def test_async_event_client(self):
result = {}
class MyNamespace(asyncio_namespace.AsyncClientNamespace):
async def on_custom_message(self, sid, data):
result['result'] = (sid, data)
ns = MyNamespace('/foo')
ns._set_client(mock.MagicMock())
_run(ns.trigger_event('custom_message', 'sid', {'data': 'data'}))
assert result['result'] == ('sid', {'data': 'data'})
def test_event_not_found_client(self):
result = {}
class MyNamespace(asyncio_namespace.AsyncClientNamespace):
async def on_custom_message(self, sid, data):
result['result'] = (sid, data)
ns = MyNamespace('/foo')
ns._set_client(mock.MagicMock())
_run(
ns.trigger_event('another_custom_message', 'sid', {'data': 'data'})
)
assert result == {}
def test_emit_client(self):
ns = asyncio_namespace.AsyncClientNamespace('/foo')
mock_client = mock.MagicMock()
mock_client.emit = AsyncMock()
ns._set_client(mock_client)
_run(ns.emit('ev', data='data', callback='cb'))
ns.client.emit.mock.assert_called_with(
'ev', data='data', namespace='/foo', callback='cb'
)
_run(ns.emit('ev', data='data', namespace='/bar', callback='cb'))
ns.client.emit.mock.assert_called_with(
'ev', data='data', namespace='/bar', callback='cb'
)
def test_send_client(self):
ns = asyncio_namespace.AsyncClientNamespace('/foo')
mock_client = mock.MagicMock()
mock_client.send = AsyncMock()
ns._set_client(mock_client)
_run(ns.send(data='data', callback='cb'))
ns.client.send.mock.assert_called_with(
'data', namespace='/foo', callback='cb'
)
_run(ns.send(data='data', namespace='/bar', callback='cb'))
ns.client.send.mock.assert_called_with(
'data', namespace='/bar', callback='cb'
)
def test_disconnect_client(self):
ns = asyncio_namespace.AsyncClientNamespace('/foo')
mock_client = mock.MagicMock()
mock_client.disconnect = AsyncMock()
ns._set_client(mock_client)
_run(ns.disconnect())
ns.client.disconnect.mock.assert_called_with()
| mit | -7,737,561,823,440,931,000 | 33.323432 | 79 | 0.546731 | false |
appleseedhq/cortex | python/IECore/RelativePreset.py | 2 | 22977 | ##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import os
import re
## Implements a Preset that represents changes between two Parameter objects.
# The comparison on elements in a ClassVectorParameters takes in consideration both the parameter name and
# the loaded class name in order to consider the "same" element. We do that do try to work around the fact
# that the parameter names ("p0", "p1", etc) are very simple and easy to reapper after a sequence of removal/addition
# operations in a ClassVectorParameter. The method is not 100% safe but should work for most cases.
# \todo Consider adding a protected member that is responsible for that comparison and enable derived classes to
# do other kinds of comparisons, for example, using additional parameters such as user labels.
#
class RelativePreset( IECore.Preset ) :
## \param currParameter, IECore.Parameter, represents the parameter state after all changes have been made.
## \param oldParameter, IECore.Parameter, represents the parameter state before any changes.
## \param compareFilter, callable function that receives currParameter and oldParameter child and it should
## return a boolean to indicate if the difference should be computed or not.
def __init__( self, currParameter=None, oldParameter=None, compareFilter = None ) :
IECore.Preset.__init__( self )
self.__data = IECore.CompoundObject()
if compareFilter is None :
self.__compareFilter = lambda x,y: True
else :
self.__compareFilter = compareFilter
# accepts no parameters at all.
if currParameter is None and oldParameter is None :
return
if not isinstance( currParameter, IECore.Parameter ) :
raise TypeError, "Parameter currParameter must be a IECore.Parameter object!"
if not oldParameter is None :
if not isinstance( oldParameter, IECore.Parameter ) :
raise TypeError, "Parameter oldParameter must be a IECore.Parameter object!"
if currParameter.typeId() != oldParameter.typeId() :
raise TypeError, "Mismatching types for currParameter and oldParameter!"
self.__grabParameterChanges( currParameter, oldParameter, self.__data )
## \see IECore.Preset.applicableTo
def applicableTo( self, parameterised, rootParameter ) :
return RelativePreset.__applicableTo( rootParameter, self.__data )
def getDiffData( self ):
"""Returns a IECore.CompoundObject instance that contains the description of all the differences between the two parameters provided when creating this preset."""
return self.__data.copy()
def setDiffData( self, data ):
"""Use this function to recreate a RelativePreset from data previously returned by getDiffData()."""
if not isinstance( data, IECore.CompoundObject ):
raise TypeError, "Invalid data type! Must be a IECore.CompoundObject"
self.__data = data.copy()
## \see IECore.Preset.__call__
def __call__( self, parameterised, rootParameter ) :
if not self.applicableTo( parameterised, rootParameter ) :
raise RuntimeError, "Sorry, this preset is not applicable to the given parameter."
if len( self.__data ) :
self.__applyParameterChanges( rootParameter, self.__data )
def __grabParameterChanges( self, currParameter, oldParameter, data, paramPath = "" ) :
if not oldParameter is None:
if currParameter.staticTypeId() != oldParameter.staticTypeId() :
raise Exception, "Incompatible parameter %s!" % paramPath
if not self.__compareFilter( currParameter, oldParameter ) :
return
if isinstance( currParameter, IECore.ClassParameter ) :
self.__grabClassParameterChanges( currParameter, oldParameter, data, paramPath )
elif isinstance( currParameter, IECore.ClassVectorParameter ) :
self.__grabClassVectorParameterChanges( currParameter, oldParameter, data, paramPath )
elif isinstance( currParameter, IECore.CompoundParameter ) :
self.__grabCompoundParameterChanges( currParameter, oldParameter, data, paramPath )
else :
self.__grabSimpleParameterChanges( currParameter, oldParameter, data, paramPath )
def __grabCompoundParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
for p in currParameter.keys() :
newData = IECore.CompoundObject()
childOldParam = None
if not oldParameter is None :
if p in oldParameter.keys() :
childOldParam = oldParameter[p]
self.__grabParameterChanges(
currParameter[p],
childOldParam,
newData,
paramPath + "." + p
)
if len(newData) :
data[p] = newData
if len(data):
data["_type_"] = IECore.StringData( "CompoundParameter" )
def __grabSimpleParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
if not oldParameter is None :
if currParameter.getValue() == oldParameter.getValue() :
return
data["_type_"] = IECore.StringData( currParameter.typeName() )
data["_value_"] = currParameter.getValue().copy()
def __grabClassParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
c = currParameter.getClass( True )
className = c[1]
classVersion = c[2]
classNameFilter = "*"
try :
classNameFilter = currParameter.userData()["UI"]["classNameFilter"].value
except :
pass
oldClassName = None
oldClassVersion = None
childOldParam = None
if not oldParameter is None :
oldClass = oldParameter.getClass( True )
oldClassName = oldClass[1]
oldClassVersion = oldClass[2]
if oldClass[0] :
childOldParam = oldClass[0].parameters()
classValue = IECore.CompoundObject()
if c[0] :
self.__grabParameterChanges(
c[0].parameters(),
childOldParam,
classValue,
paramPath
)
if len(classValue):
data["_classValue_"] = classValue
if len(data) or className != oldClassName or classVersion != oldClassVersion :
data["_className_"] = IECore.StringData(className)
data["_classVersion_"] = IECore.IntData(classVersion)
data["_classNameFilter_"] = IECore.StringData(classNameFilter)
data["_type_"] = IECore.StringData( "ClassParameter" )
def __grabClassVectorParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
classes = currParameter.getClasses( True )
classNameFilter = "*"
try :
classNameFilter = currParameter.userData()["UI"]["classNameFilter"].value
except :
pass
classNameFilter = IECore.StringData( classNameFilter )
classNames = IECore.StringVectorData()
classVersions = IECore.IntVectorData()
classOrder = IECore.StringVectorData()
values = IECore.CompoundObject()
for c in classes:
pName = c[1]
classOrder.append( pName )
classNames.append( c[2] )
classVersions.append( c[3] )
v = IECore.CompoundObject()
childOldParam = None
if not oldParameter is None and pName in oldParameter.keys() :
oldClass = oldParameter.getClass( pName )
if oldClass :
childOldParam = oldClass.parameters()
self.__grabParameterChanges(
c[0].parameters(),
childOldParam,
v,
paramPath + "." + pName
)
if len(v) :
values[c[1]] = v
removedParams = []
if not oldParameter is None :
removedParams = list( set( oldParameter.keys() ).difference( classOrder ) )
if removedParams :
data["_removedParamNames_"] = IECore.StringVectorData( removedParams )
data["_removedClassNames_"] = IECore.StringVectorData()
for pName in removedParams :
oldClass = oldParameter.getClass( pName, True )
data["_removedClassNames_"].append( oldClass[1] )
modifiedParams = IECore.StringVectorData()
modifiedClassNames = IECore.StringVectorData()
modifiedClassVersions = IECore.IntVectorData()
addedParam = IECore.BoolVectorData()
for i in xrange(0,len(classOrder)):
pName = classOrder[i]
cName = classNames[i]
cVersion = classVersions[i]
oldClassName = None
oldClassVersion = None
if not oldParameter is None :
try:
oldClass = oldParameter.getClass( pName, True )
oldClassName = oldClass[1]
oldClassVersion = oldClass[2]
except Exception, e:
# added parameter...
pass
if cName != oldClassName or cVersion != oldClassVersion :
modifiedParams.append( pName )
modifiedClassNames.append( cName )
modifiedClassVersions.append( cVersion )
added = (oldClassName is None)
# if we are changing the class type, we have to mark as if we
# were removing it too
if cName != oldClassName and not oldClassName is None:
if not "_removedParamNames_" in data :
data["_removedParamNames_"] = IECore.StringVectorData()
data["_removedClassNames_"] = IECore.StringVectorData()
data["_removedParamNames_"].append(pName)
data["_removedClassNames_"].append(oldClassName)
removedParams.append(pName)
added = True
addedParam.append( added )
if len(modifiedParams) :
data["_modifiedParamsNames_"] = modifiedParams
data["_modifiedClassNames_"] = modifiedClassNames
data["_modifiedClassVersions_"] = modifiedClassVersions
data["_addedParam_"] = addedParam
# get all non-new parameters
parameterOrder = filter( lambda n: not n in modifiedParams or not addedParam[ modifiedParams.index(n) ], classOrder )
baseOrder = parameterOrder
if not oldParameter is None :
# get all non-deleted original parameters
baseOrder = filter( lambda n: not n in removedParams, oldParameter.keys() )
if baseOrder != parameterOrder :
if len(baseOrder) != len(parameterOrder):
raise Exception, "Unnexpected error. Unmatching parameter lists!"
# clamp to the smallest list containing the differences
for start in xrange(0,len(baseOrder)):
if baseOrder[start] != parameterOrder[start] :
break
for endPos in xrange(len(baseOrder),0,-1):
if baseOrder[endPos-1] != parameterOrder[endPos-1] :
break
data["_modifiedOrder_"] = IECore.StringVectorData( parameterOrder[start:endPos] )
if len(values):
# keep the original classes to which the parameters were edited
for pName in values.keys() :
values[pName]["_class_"] = IECore.StringData( classNames[classOrder.index(pName)] )
data["_values_"] = values
if len(data):
data["_classNameFilter_" ] = classNameFilter
data["_type_"] = IECore.StringData( "ClassVectorParameter" )
data["_paramNames_"] = classOrder
data["_classNames_"] = classNames
@staticmethod
def __applyParameterChanges( parameter, data, paramPath = "" ) :
if isinstance( parameter, IECore.ClassParameter ) :
RelativePreset.__applyClassParameterChanges( parameter, data, paramPath )
elif isinstance( parameter, IECore.ClassVectorParameter ) :
RelativePreset.__applyClassVectorChanges( parameter, data, paramPath )
elif isinstance( parameter, IECore.CompoundParameter ) :
RelativePreset.__applyCompoundParameterChanges( parameter, data, paramPath )
elif isinstance( parameter, IECore.Parameter ) :
RelativePreset.__applySimpleParameterChanges( parameter, data, paramPath )
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unrecognized type (%s) for parameter %s. Not affected by preset." % ( parameter.typeName(), parameter.name )
)
@staticmethod
def __applyCompoundParameterChanges( parameter, data, paramPath ) :
if data["_type_"].value != "CompoundParameter" :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found CompoundParameter."
% ( paramPath, data["_type_"].value )
)
return
for p in data.keys() :
if p in [ "_type_", "_class_" ] :
continue
if paramPath :
newParamPath = paramPath + "." + p
else :
newParamPath = p
if p not in parameter :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Could not find parameter '%s'. Preset value ignored." % newParamPath
)
continue
RelativePreset.__applyParameterChanges( parameter[p], data[p], newParamPath )
@staticmethod
def __applySimpleParameterChanges( parameter, data, paramPath ) :
if data["_type_"].value != parameter.typeName() :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found %s."
% ( paramPath, data["_type_"].value, parameter.typeName() )
)
return
try:
parameter.setValue( data["_value_"] )
except Exception, e:
IECore.msg( IECore.Msg.Level.Warning, "IECore.RelativePreset", str(e) )
@staticmethod
def __applyClassParameterChanges( parameter, data, paramPath ) :
if data["_type_"].value != "ClassParameter" :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found ClassParameter."
% ( paramPath, data["_type_"].value )
)
return
c = parameter.getClass( True )
className = data["_className_"].value
classVersion = data["_classVersion_"].value
if c[1] != className or c[2] != classVersion :
parameter.setClass( className, classVersion )
c = parameter.getClass( False )
if c and '_classValue_' in data :
RelativePreset.__applyParameterChanges( c.parameters(), data["_classValue_"], paramPath )
@staticmethod
def __applyClassVectorChanges( parameter, data, paramPath ) :
if data["_type_"].value != "ClassVectorParameter" :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found ClassVectorParameter."
% ( paramPath, data["_type_"].value )
)
return
# remove parameters if they match in parameter name and class name
if "_removedParamNames_" in data :
for (i,pName) in enumerate( data["_removedParamNames_"] ):
if pName in parameter.keys() :
c = parameter.getClass( pName, True )
if c and c[1] == data["_removedClassNames_"][i] :
parameter.removeClass( pName )
paramRemaps = {}
if "_modifiedParamsNames_" in data :
modifiedParams = data["_modifiedParamsNames_"]
modifiedClassNames = data["_modifiedClassNames_"]
modifiedClassVersions = data["_modifiedClassVersions_"]
addedParam = data["_addedParam_"]
addedCount = 0
# first modify items
for i in range( len( modifiedClassNames ) ) :
if addedParam[i] :
addedCount += 1
else :
# must find an existing matching parameter, no matter what
if modifiedParams[i] in parameter:
c = parameter.getClass( modifiedParams[i], True )
if modifiedClassNames[i] == c[1] :
if modifiedClassVersions[i] != c[2] :
parameter.setClass( modifiedParams[i], modifiedClassNames[i], modifiedClassVersions[i] )
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Parameter '%s.%s' has a different class. Expected %s but found %s. Ignoring class change on this parameter."
% ( paramPath, modifiedParams[i], modifiedClassNames[i], c[1] )
)
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to find parameter '%s.%s' in %s. Ignoring class change on this parameter."
% ( paramPath, modifiedParams[i], parameter.name )
)
# get a list of classes before the addition of new items
newOrder = False
newClassList = map( lambda c: c[1:], parameter.getClasses( True ) )
newParamList = map( lambda c: c[0], newClassList )
# compare each class with whatever existed when we created the RelativePreset and see which ones are the same
sameClasses = set()
for c in newClassList :
if '_modifiedParamsNames_' in data :
# If the preset has added this parameter it should not match current parameters in the vector, no matter if the class matches. Is it always the case?
if c[0] in data['_modifiedParamsNames_'] :
if data['_addedParam_'][ data['_modifiedParamsNames_'].index(c[0]) ] :
continue
try :
i = data['_paramNames_'].index(c[0])
except :
continue
if c[1] == data['_classNames_'][i] :
sameClasses.add( c[0] )
if "_modifiedOrder_" in data :
# there was some kind of change in the order of parameters as well...
modifiedOrder = filter( lambda pName: pName in sameClasses, data["_modifiedOrder_"] )
# find the range of parameters that lie between the reordered parameters in the current vector
firstParam = None
lastParam = None
for (i,pName) in enumerate(newParamList) :
if pName in modifiedOrder :
if firstParam is None:
firstParam = i
lastParam = i
if firstParam != lastParam :
# adds one by one the unknown parameters that lied between the reordered parameters.
for pName in newParamList[firstParam:lastParam+1] :
if not pName in modifiedOrder :
modifiedOrder.insert( modifiedOrder.index(baseParam)+1, pName )
baseParam = pName
def classOrder( c1, c2 ):
# if both elements were on the original reordering operation we use their relationship
if c1[0] in modifiedOrder and c2[0] in modifiedOrder:
i1 = modifiedOrder.index( c1[0] )
i2 = modifiedOrder.index( c2[0] )
return cmp( i1, i2 )
# otherwise we use the current order.
i1 = newParamList.index( c1[0] )
i2 = newParamList.index( c2[0] )
return cmp( i1, i2 )
newClassList.sort( classOrder )
newParamList = map( lambda c: c[0], newClassList )
newOrder = True
if "_modifiedParamsNames_" in data :
# now add items to the appropriate spot in the newClassList and newParamList
if addedCount :
newOrder = True
prevActualParam = None
lastActualParamInsertion = None
currClasses = parameter.getClasses( True )
for pName in data["_paramNames_"] :
if pName in sameClasses :
if pName in newParamList :
prevActualParam = pName
continue
if pName in modifiedParams :
i = modifiedParams.index(pName)
if addedParam[ i ] :
if prevActualParam is None :
if lastActualParamInsertion is None :
# Here we assume that the new parameter should
# go to the top because its predecessors don't exist on the
# new vector. Maybe it could also print a warning message..
lastActualParamInsertion = 0
else :
lastActualParamInsertion += 1
else :
lastActualParamInsertion = newParamList.index( prevActualParam ) + 1
prevActualParam = None
if pName in parameter:
newParamName = parameter.newParameterName()
if not re.match("^p[0-9]+$", pName) :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Custom parameter %s.%s is being renamed to %s..."
% ( paramPath, pName, newParamName )
)
paramRemaps[ pName ] = newParamName
pName = newParamName
# add the parameter to the vector, so that next calls to parameter.newParameterName() will work.
parameter.setClass( pName, modifiedClassNames[i], modifiedClassVersions[i] )
# update our official new arrays
newParamList.insert(lastActualParamInsertion, pName)
newClassList.insert(lastActualParamInsertion, (pName,modifiedClassNames[i], modifiedClassVersions[i]) )
# update parameters with new order
if newOrder :
parameter.setClasses( newClassList )
if "_values_" in data :
for paramName in data["_values_"].keys() :
remapedParamName = paramRemaps.get( paramName, paramName )
presetValue = data["_values_"][paramName]
if remapedParamName in parameter.keys() :
c = parameter.getClass( remapedParamName, True )
if c[1] == presetValue["_class_"].value :
RelativePreset.__applyParameterChanges(
c[0].parameters(),
presetValue,
paramPath + "." + remapedParamName
)
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Ignoring preset values for parameter %s.%s. Expected class %s but found %s."
% ( paramPath, remapedParamName, presetValue["_class_"].value, c[1] )
)
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to find parameter '%s.%s' in %s. Ignoring this preset changes."
% ( paramPath, remapedParamName, parameter.name )
)
@staticmethod
def __applicableTo( parameter, data ) :
if len(data) == 0 :
return True
if parameter.staticTypeId() == IECore.TypeId.CompoundParameter :
if data["_type_"].value != "CompoundParameter":
return False
elif isinstance( parameter, IECore.ClassParameter ) :
if data["_type_"].value != "ClassParameter":
return False
classNameFilter = "*"
try :
classNameFilter = parameter.userData()["UI"]["classNameFilter"].value
except :
pass
if classNameFilter != data["_classNameFilter_"].value:
return False
elif isinstance( parameter, IECore.ClassVectorParameter ) :
if data["_type_"].value != "ClassVectorParameter":
return False
classNameFilter = "*"
try :
classNameFilter = parameter.userData()["UI"]["classNameFilter"].value
except :
pass
if classNameFilter != data["_classNameFilter_"].value:
return False
else :
if data["_type_"].value != parameter.typeName():
return False
if not parameter.valueValid( data["_value_"] )[0]:
return False
return True
IECore.registerRunTimeTyped( RelativePreset )
| bsd-3-clause | -4,797,742,808,319,548,000 | 32.251809 | 164 | 0.689994 | false |
bally12345/enigma2 | lib/python/Components/Converter/ClockToText.py | 1 | 2991 | from Converter import Converter
from time import localtime, strftime
from Components.Element import cached
class ClockToText(Converter, object):
DEFAULT = 0
WITH_SECONDS = 1
IN_MINUTES = 2
DATE = 3
FORMAT = 4
AS_LENGTH = 5
TIMESTAMP = 6
FULL = 7
SHORT_DATE = 8
LONG_DATE = 9
VFD = 10
FULL_DATE = 11
# add: date, date as string, weekday, ...
# (whatever you need!)
def __init__(self, type):
Converter.__init__(self, type)
if type == "WithSeconds":
self.type = self.WITH_SECONDS
elif type == "InMinutes":
self.type = self.IN_MINUTES
elif type == "Date":
self.type = self.DATE
elif type == "AsLength":
self.type = self.AS_LENGTH
elif type == "Timestamp":
self.type = self.TIMESTAMP
elif type == "Full":
self.type = self.FULL
elif type == "ShortDate":
self.type = self.SHORT_DATE
elif type == "LongDate":
self.type = self.LONG_DATE
elif type == "FullDate":
self.type = self.FULL_DATE
elif type == "VFD":
self.type = self.VFD
elif "Format" in type:
self.type = self.FORMAT
self.fmt_string = type[7:]
else:
self.type = self.DEFAULT
@cached
def getText(self):
time = self.source.time
if time is None:
return ""
# handle durations
if self.type == self.IN_MINUTES:
return ngettext("%d Min", "%d Mins", (time / 60)) % (time / 60)
elif self.type == self.AS_LENGTH:
if time < 0:
return ""
return "%d:%02d" % (time / 60, time % 60)
elif self.type == self.TIMESTAMP:
return str(time)
t = localtime(time)
if self.type == self.WITH_SECONDS:
# TRANSLATORS: full time representation hour:minute:seconds
return _("%2d:%02d:%02d") % (t.tm_hour, t.tm_min, t.tm_sec)
elif self.type == self.DEFAULT:
# TRANSLATORS: short time representation hour:minute
return _("%2d:%02d") % (t.tm_hour, t.tm_min)
elif self.type == self.DATE:
# TRANSLATORS: full date representation dayname daynum monthname year in strftime() format! See 'man strftime'
d = _("%A %e %B %Y")
elif self.type == self.FULL:
# TRANSLATORS: long date representation short dayname daynum short monthname hour:minute in strftime() format! See 'man strftime'
d = _("%a %e/%m %-H:%M")
elif self.type == self.SHORT_DATE:
# TRANSLATORS: short date representation short dayname daynum short monthname in strftime() format! See 'man strftime'
d = _("%a %e/%m")
elif self.type == self.LONG_DATE:
# TRANSLATORS: long date representations dayname daynum monthname in strftime() format! See 'man strftime'
d = _("%A %e %B")
elif self.type == self.FULL_DATE:
# TRANSLATORS: full date representations sort dayname daynum monthname long year in strftime() format! See 'man strftime'
d = _("%a %e %B %Y")
elif self.type == self.VFD:
# TRANSLATORS: VFD hour:minute daynum short monthname in strftime() format! See 'man strftime'
d = _("%k:%M %e/%m")
elif self.type == self.FORMAT:
d = self.fmt_string
else:
return "???"
return strftime(d, t)
text = property(getText)
| gpl-2.0 | -1,581,685,355,648,117,200 | 29.520408 | 132 | 0.651287 | false |
octosend/octosend-docs | conf.py | 1 | 9945 | # -*- coding: utf-8 -*-
#
# Octosend Documentation documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 23 18:25:47 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# load PhpLexer
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
# enable highlighting for PHP code not between <?php ... ?> by default
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
api_url = ''
# substitutions to add to every files
rst_epilog = """
.. _Octosend: https://octosend.com/
.. |api_current_version| replace:: v3.0
.. |api_current_url| replace:: https://api.octosend.com/api/3.0/
"""
'.. |api_url| replace:: %s' % api_url
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Octosend Documentation'
copyright = u'2015, Octosend Documentation Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0.0'
# The full version, including alpha/beta/rc tags.
release = '3.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OctosendDocumentationdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OctosendDocumentation.tex', u'Octosend Documentation Documentation',
u'Octosend Documentation Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'octosenddocumentation', u'Octosend Documentation Documentation',
[u'Octosend Documentation Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OctosendDocumentation', u'Octosend Documentation Documentation',
u'Octosend Documentation Team', 'OctosendDocumentation', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Octosend Documentation'
epub_author = u'Octosend Documentation Team'
epub_publisher = u'Octosend Documentation Team'
epub_copyright = u'2015, Octosend Documentation Team'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| isc | 1,927,615,428,846,011,400 | 31.080645 | 95 | 0.708999 | false |
klahnakoski/MySQL-to-S3 | vendor/jx_sqlite/snowflake.py | 1 | 12852 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from collections import OrderedDict
from copy import copy
from jx_base import STRUCT, OBJECT, EXISTS, STRING
from jx_base.container import Container
from jx_base.queries import get_property_name
from jx_python import jx
from jx_python.meta import Column
from jx_sqlite import typed_column, UID, quoted_UID, quoted_GUID, sql_types, quoted_PARENT, quoted_ORDER, GUID, untyped_column
from mo_dots import relative_field, listwrap, split_field, join_field, wrap, startswith_field, concat_field, Null, coalesce, set_default
from mo_future import text_type
from mo_logs import Log
from pyLibrary.sql import SQL_FROM, sql_iso, sql_list, SQL_LIMIT, SQL_SELECT, SQL_ZERO, SQL_STAR
from pyLibrary.sql.sqlite import quote_column
class Snowflake(object):
"""
MANAGE SQLITE DATABASE
"""
def __init__(self, fact, uid, db):
self.fact = fact # THE CENTRAL FACT TABLE
self.uid = uid
self.db = db
self._columns = [] # EVERY COLUMN IS ACCESSIBLE BY EVERY TABLE IN THE SNOWFLAKE
self.tables = OrderedDict() # MAP FROM NESTED PATH TO Table OBJECT, PARENTS PROCEED CHILDREN
if not self.read_db():
self.create_fact(uid)
def read_db(self):
"""
PULL SCHEMA FROM DATABASE, BUILD THE MODEL
:return: None
"""
# FIND ALL TABLES
result = self.db.query("SELECT * FROM sqlite_master WHERE type='table' ORDER BY name")
tables = wrap([{k: d[i] for i, k in enumerate(result.header)} for d in result.data])
tables_found = False
for table in tables:
if table.name.startswith("__"):
continue
tables_found = True
nested_path = [join_field(split_field(tab.name)[1:]) for tab in jx.reverse(tables) if startswith_field(table.name, tab.name)]
self.add_table_to_schema(nested_path)
# LOAD THE COLUMNS
command = "PRAGMA table_info"+sql_iso(quote_column(table.name))
details = self.db.query(command)
for cid, name, dtype, notnull, dfft_value, pk in details.data:
if name.startswith("__"):
continue
cname, ctype = untyped_column(name)
column = Column(
names={np: relative_field(cname, np) for np in nested_path},
type=coalesce(ctype, {"TEXT": "string", "REAL": "number", "INTEGER": "integer"}.get(dtype)),
nested_path=nested_path,
es_column=name,
es_index=table.name
)
self.add_column_to_schema(column)
return tables_found
def create_fact(self, uid=UID):
"""
MAKE NEW TABLE WITH GIVEN guid
:param uid: name, or list of names, for the GUID
:return: None
"""
self.add_table_to_schema(["."])
uid = listwrap(uid)
new_columns = []
for u in uid:
if u == UID:
pass
else:
c = Column(
names={".": u},
type="string",
es_column=typed_column(u, "string"),
es_index=self.fact
)
self.add_column_to_schema(c)
new_columns.append(c)
command = (
"CREATE TABLE " + quote_column(self.fact) + sql_iso(sql_list(
[quoted_GUID + " TEXT "] +
[quoted_UID + " INTEGER"] +
[quote_column(c.es_column) + " " + sql_types[c.type] for c in self.tables["."].schema.columns] +
["PRIMARY KEY " + sql_iso(sql_list(
[quoted_GUID] +
[quoted_UID] +
[quote_column(c.es_column) for c in self.tables["."].schema.columns]
))]
))
)
self.db.execute(command)
def change_schema(self, required_changes):
"""
ACCEPT A LIST OF CHANGES
:param required_changes:
:return: None
"""
required_changes = wrap(required_changes)
for required_change in required_changes:
if required_change.add:
self._add_column(required_change.add)
elif required_change.nest:
column, cname = required_change.nest
self._nest_column(column, cname)
# REMOVE KNOWLEDGE OF PARENT COLUMNS (DONE AUTOMATICALLY)
# TODO: DELETE PARENT COLUMNS? : Done
def _add_column(self, column):
cname = column.names["."]
if column.type == "nested":
# WE ARE ALSO NESTING
self._nest_column(column, [cname]+column.nested_path)
table = concat_field(self.fact, column.nested_path[0])
self.db.execute(
"ALTER TABLE " + quote_column(table) +
" ADD COLUMN " + quote_column(column.es_column) + " " + sql_types[column.type]
)
self.add_column_to_schema(column)
def _nest_column(self, column, new_path):
destination_table = concat_field(self.fact, new_path[0])
existing_table = concat_field(self.fact, column.nested_path[0])
# FIND THE INNER COLUMNS WE WILL BE MOVING
moving_columns = []
for c in self._columns:
if destination_table!=column.es_index and column.es_column==c.es_column:
moving_columns.append(c)
c.nested_path = new_path
# TODO: IF THERE ARE CHILD TABLES, WE MUST UPDATE THEIR RELATIONS TOO?
# DEFINE A NEW TABLE?
# LOAD THE COLUMNS
command = "PRAGMA table_info"+sql_iso(quote_column(destination_table))
details = self.db.query(command)
if not details.data:
command = (
"CREATE TABLE " + quote_column(destination_table) + sql_iso(sql_list([
quoted_UID + "INTEGER",
quoted_PARENT + "INTEGER",
quoted_ORDER + "INTEGER",
"PRIMARY KEY " + sql_iso(quoted_UID),
"FOREIGN KEY " + sql_iso(quoted_PARENT) + " REFERENCES " + quote_column(existing_table) + sql_iso(quoted_UID)
]))
)
self.db.execute(command)
self.add_table_to_schema(new_path)
# TEST IF THERE IS ANY DATA IN THE NEW NESTED ARRAY
if not moving_columns:
return
column.es_index = destination_table
self.db.execute(
"ALTER TABLE " + quote_column(destination_table) +
" ADD COLUMN " + quote_column(column.es_column) + " " + sql_types[column.type]
)
# Deleting parent columns
for col in moving_columns:
column = col.es_column
tmp_table = "tmp_" + existing_table
columns = list(map(text_type, self.db.query(SQL_SELECT + SQL_STAR + SQL_FROM + quote_column(existing_table) + SQL_LIMIT + SQL_ZERO).header))
self.db.execute(
"ALTER TABLE " + quote_column(existing_table) +
" RENAME TO " + quote_column(tmp_table)
)
self.db.execute(
"CREATE TABLE " + quote_column(existing_table) + " AS " +
SQL_SELECT + sql_list([quote_column(c) for c in columns if c != column]) +
SQL_FROM + quote_column(tmp_table)
)
self.db.execute("DROP TABLE " + quote_column(tmp_table))
def add_table_to_schema(self, nested_path):
table = Table(nested_path)
self.tables[table.name] = table
path = table.name
for c in self._columns:
rel_name = c.names[path] = relative_field(c.names["."], path)
table.schema.add(rel_name, c)
return table
@property
def columns(self):
return self._columns
def add_column_to_schema(self, column):
self._columns.append(column)
abs_name = column.names["."]
for table in self.tables.values():
rel_name = column.names[table.name] = relative_field(abs_name, table.name)
table.schema.add(rel_name, column)
table.columns.append(column)
class Table(Container):
def __init__(self, nested_path):
self.nested_path = nested_path
self._schema = Schema(nested_path)
self.columns = [] # PLAIN DATABASE COLUMNS
@property
def name(self):
"""
:return: THE TABLE NAME RELATIVE TO THE FACT TABLE
"""
return self.nested_path[0]
@property
def schema(self):
return self._schema
class Schema(object):
"""
A Schema MAPS ALL COLUMNS IN SNOWFLAKE FROM THE PERSPECTIVE OF A SINGLE TABLE (a nested_path)
"""
def __init__(self, nested_path):
if nested_path[-1] != '.':
Log.error("Expecting full nested path")
source = Column(
names={".": "."},
type=OBJECT,
es_column="_source",
es_index=nested_path,
nested_path=nested_path
)
guid = Column(
names={".": GUID},
type=STRING,
es_column=GUID,
es_index=nested_path,
nested_path=nested_path
)
self.namespace = {".": {source}, GUID: {guid}}
self._columns = [source, guid]
self.nested_path = nested_path
def add(self, column_name, column):
if column_name != column.names[self.nested_path[0]]:
Log.error("Logic error")
self._columns.append(column)
for np in self.nested_path:
rel_name = column.names[np]
container = self.namespace.setdefault(rel_name, set())
hidden = [
c
for c in container
if len(c.nested_path[0]) < len(np)
]
for h in hidden:
container.remove(h)
container.add(column)
container = self.namespace.setdefault(column.es_column, set())
container.add(column)
def remove(self, column_name, column):
if column_name != column.names[self.nested_path[0]]:
Log.error("Logic error")
self.namespace[column_name] = [c for c in self.namespace[column_name] if c != column]
def __getitem__(self, item):
output = self.namespace.get(item, Null)
return output
def __copy__(self):
output = Schema(self.nested_path)
for k, v in self.namespace.items():
output.namespace[k] = copy(v)
return output
def get_column_name(self, column):
"""
RETURN THE COLUMN NAME, FROM THE PERSPECTIVE OF THIS SCHEMA
:param column:
:return: NAME OF column
"""
return get_property_name(column.names[self.nested_path[0]])
def keys(self):
return set(self.namespace.keys())
def items(self):
return list(self.namespace.items())
@property
def columns(self):
return [c for c in self._columns if c.es_column not in [GUID, '_source']]
def leaves(self, prefix):
head = self.namespace.get(prefix, None)
if not head:
return Null
full_name = list(head)[0].names['.']
return set(
c
for k, cs in self.namespace.items()
if startswith_field(k, full_name) and k != GUID or k == full_name
for c in cs
if c.type not in [OBJECT, EXISTS]
)
def map_to_sql(self, var=""):
"""
RETURN A MAP FROM THE RELATIVE AND ABSOLUTE NAME SPACE TO COLUMNS
"""
origin = self.nested_path[0]
if startswith_field(var, origin) and origin != var:
var = relative_field(var, origin)
fact_dict = {}
origin_dict = {}
for k, cs in self.namespace.items():
for c in cs:
if c.type in STRUCT:
continue
if startswith_field(get_property_name(k), var):
origin_dict.setdefault(c.names[origin], []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(c.names["."], []).append(c)
elif origin == var:
origin_dict.setdefault(concat_field(var, c.names[origin]), []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(concat_field(var, c.names["."]), []).append(c)
return set_default(origin_dict, fact_dict)
| mpl-2.0 | 6,842,269,990,975,783,000 | 33.829268 | 152 | 0.549876 | false |
reiven/pungabot | modules/module_tell.py | 1 | 1418 | # -*- coding: utf-8 -*-
import string
from datetime import datetime
def sanitize(buf):
return filter(lambda x: x in string.printable, buf)
def handle_userJoined(bot, user, channel):
"""Someone Joined, lets salute him"""
dbCursor.execute("SELECT * FROM tell WHERE tell_to = '%s' AND tell_channel = '%s'" % (getNick(user), channel ))
rows = dbCursor.fetchall()
for row in rows:
bot.say(channel, '%s: %s leaved this message for you on %s at %s:' % (
getNick(user),
row[1].encode("utf-8"),
row[3].split()[0],
row[3].split()[1],
))
bot.say(channel, '"%s"' % row[4].encode("utf-8"))
dbCursor.execute("DELETE FROM tell WHERE tell_id = '%s'" % row[0])
def command_tell(bot, user, channel, args):
"""tell something to user when he/she rejoin the channel"""
if len(args.split()) >= 2:
tell_to, args = args.split(' ', 1)
dbCursor.execute("INSERT INTO tell VALUES (NULL, ?, ?, ?, ?, ?)", (
getNick(user),
unicode(tell_to, 'utf-8'),
datetime.now().strftime("%d-%m-%Y %H:%M"),
unicode(args, 'utf-8'),
channel
))
return bot.say(channel, '%s, i will tell that to %s' % (getNick(user), unicode(tell_to, 'utf-8') ))
else:
return bot.say(channel, '%s, for who and what you save a message?' % getNick(user))
| gpl-3.0 | 1,568,273,911,589,310,500 | 33.585366 | 115 | 0.550071 | false |
brahamcosoX3/TheIoTLearningInitiative | InternetOfThings101/main.py | 1 | 5409 | #!/usr/bin/python
# Libraries
import paho.mqtt.client as paho
import psutil
import pywapi
import signal
import sys
import time
import dweepy
import random
import plotly.plotly as py
import pyupm_i2clcd as lcd
import pyupm_grove as grove
from threading import Thread
from flask import Flask
from flask_restful import Api, Resource
from plotly.graph_objs import Scatter, Layout, Figure
from Adafruit_IO import MQTTClient
# Global variables
# Display config
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
myLcd.setColor(255, 255, 255)
# Light sensor config
light = grove.GroveLight(0)
# Relay
relay = grove.GroveRelay(4)
# Restful init
#app = Flask(__name__)
#api = Api(app)
# Adafruit variables
ADAFRUIT_IO_KEY = 'cd6bfee245bd4b2c9e14fe2eb882643a'
ADAFRUIT_IO_USERNAME = 'brahamcoso'
# Plotly variables
username = 'brahamcosoX3'
api_key = '2no5uo7af9'
stream_token = 'npg3mqqj85'
# Classes
class Network(Resource):
def get(self):
data = 'Network Data: %i' % dataNetwork()
return data
# Functions
def interruptHandler(signal, frame):
sys.exit(0)
def on_publish(mosq, obj, msg):
pass
def dataNetwork():
netdata = psutil.net_io_counters()
return netdata.packets_sent + netdata.packets_recv
def getMac(interface):
try:
mac = open('/sys/class/net/' + interface +
'/address').readline()
except:
mac = "00:00:00:00:00:00"
return mac[0:17]
def dataWeatherHandler():
weather = pywapi.get_weather_from_weather_com('MXJO0043', 'metric')
message = "Weather Report in " + weather['location']['name']
message = message + ", Temperature "
message = message + (weather['current_conditions']['temperature'] +
" C")
message = message + ", Atmospheric Pressure "
message = message + (weather['current_conditions']
['barometer']['reading'][:-3] + " mbar")
dataLcd = "%s-%s C, %s mbar" % ( weather['location']['name'],
weather['current_conditions']['temperature'],weather['current_conditions']['barometer']['reading'][:-3])
#print message
return dataLcd
def connected(client):
print 'Connected to Adafruit IO! Listening for DemoFeed changes...'
client.subscribe('my-data')
def disconnected(client):
print 'Disconnected from Adafruit IO!'
sys.exit(1)
def message(client, feed_id, payload):
print 'Feed {0} received new value: {1}'.format(feed_id, payload)
# Network Thread
def dataAdafruitHandler():
client = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
client.on_connect = connected
client.on_disconnect = disconnected
client.on_message = message
client.connect()
client.loop_background()
while True:
value = random.randint(0, 100)
print 'Publishing {0} to my-data.'.format(value)
client.publish('my-data', value)
time.sleep(5)
# Network Thread
def dataNetworkHandler():
idDevice = "Charles: " + getMac("wlan0")
while True:
packets = dataNetwork()
message = idDevice + " " + str(packets)
#print "MQTT dataNetworkHandler " + message
mqttclient.publish("IoT101/Network", message)
time.sleep(2)
# Message Thread
def on_message(mosq, obj, msg):
print "MQTT dataMessageHandler %s %s" % (msg.topic, msg.payload)
if "78:4b:87:9f:39:35/Actuator" in msg.topic:
if msg.payload == '1':
relay.on()
elif msg.payload == '0':
relay.off()
def dataMessageHandler():
mqttclient.subscribe("IoT101/#", 0)
#mqttclient.subscribe("IoT101/78:4b:87:9f:39:35/Actuator", 0)
while mqttclient.loop() == 0:
pass
# Plotly Thread
def dataPlotly():
return dataNetwork()
def dataPlotlyHandler():
py.sign_in(username, api_key)
trace1 = Scatter(
x=[],
y=[],
stream = dict(
token = stream_token,
maxpoints = 200))
layout = Layout(
title='Hello Internet of Things 101 Data')
fig = Figure(data = [trace1], layout = layout)
print py.plot(fig, filename = 'Hello Internet of Things 101 Plotly', auto_open=False)
i = 0
stream = py.Stream(stream_token)
stream.open()
while True:
stream_data = dataPlotly()
stream.write({'x': i, 'y': stream_data})
i += 1
time.sleep(0.25)
# Light Thread
def dataLightHandler():
while True:
dweepy.dweet_for('brahamcosoIoT101',
{'value': str(light.value())})
time.sleep(2)
#api.add_resource(Network, '/network')
if __name__ == '__main__':
signal.signal(signal.SIGINT, interruptHandler)
# Mosquitto config
mqttclient = paho.Client()
mqttclient.on_publish = on_publish
mqttclient.on_message = on_message
mqttclient.connect("test.mosquitto.org", 1883, 60)
# Run Restful site
#app.run(host='0.0.0.0', debug=True)
# Threads
threadv = Thread(target=dataAdafruitHandler)
threadv.start()
threadw = Thread(target=dataLightHandler)
threadw.start()
threadx = Thread(target=dataNetworkHandler)
threadx.start()
thready = Thread(target=dataMessageHandler)
thready.start()
threadz = Thread(target=dataPlotlyHandler)
threadz.start()
while True:
myLcd.setCursor(0, 0)
toString = dataWeatherHandler()
a,b = toString.split("-")
myLcd.write(str(a))
myLcd.setCursor(1, 0)
myLcd.write(str(b))
time.sleep(5)
# End of File
| apache-2.0 | -6,768,222,355,278,814,000 | 23.926267 | 104 | 0.648364 | false |
antonev/django-handlers | django_handlers.py | 1 | 6406 | from itertools import chain
from collections import (
defaultdict,
Iterable,
)
from django.http import HttpResponseNotAllowed
__version__ = '0.1.1'
class Handler(object):
"""Container for views.
:param decorators: (optional) list of decorators that will be applied
to each endpoint.
"""
def __init__(self, decorators=None):
self._decorators = decorators or []
self._views = defaultdict(dict)
self._pre_hooks = defaultdict(list)
self._post_hooks = defaultdict(list)
self._invalid_endpoint_names = dir(self)
def add_view(self, method, endpoint_name, view):
"""Adds a view to handler.
:param method: HTTP method to be handled by the view
:param endpoint_name: name of endpoint to associate the view with
:param view: function to be used for requests handling
"""
self._ensure_endpoint_exists(endpoint_name)
self._views[endpoint_name][method.upper()] = view
def _ensure_endpoint_exists(self, endpoint_name):
self._validate_endpoint_name(endpoint_name)
if endpoint_name not in self._views:
self._add_endpoint(endpoint_name)
def _validate_endpoint_name(self, endpoint_name):
if endpoint_name in self._invalid_endpoint_names:
raise ValueError('Invalid endpoint name {}'.format(endpoint_name))
def _add_endpoint(self, endpoint_name):
def endpoint(request, *args, **kwargs):
for hook in self._get_pre_hooks(endpoint_name):
hook(request, *args, **kwargs)
try:
view = self._views[endpoint_name][request.method]
except KeyError:
allowed_methods = self._views[endpoint_name].keys()
response = HttpResponseNotAllowed(allowed_methods)
else:
response = view(request, *args, **kwargs)
for hook in self._get_post_hooks(endpoint_name):
hook(request, *args, **kwargs)
return response
for decorator in reversed(self._decorators):
endpoint = decorator(endpoint)
setattr(self, endpoint_name, endpoint)
def _get_pre_hooks(self, endpoint_name):
return chain(self._pre_hooks[None], self._pre_hooks[endpoint_name])
def _get_post_hooks(self, endpoint_name):
return chain(self._post_hooks[None], self._post_hooks[endpoint_name])
def _register(self, method, endpoint_name):
def decorator(view):
self.add_view(method, endpoint_name, view)
return view
return decorator
def get(self, endpoint_name):
"""Decorates a view to use it for handling of GET requests.
:param endpoint_name: name of endpoint for given view.
"""
return self._register('GET', endpoint_name)
def head(self, endpoint_name):
"""Decorates a view to use it for handling of HEAD requests.
:param endpoint_name: name of endpoint for given view.
"""
return self._register('HEAD', endpoint_name)
def options(self, endpoint_name):
"""Decorates a view to use it for handling of OPTIONS requests.
:param endpoint_name: name of endpoint for given view.
"""
return self._register('OPTIONS', endpoint_name)
def post(self, endpoint_name):
"""Decorates a view to use it for handling of POST requests.
:param endpoint_name: name of endpoint`.
"""
return self._register('POST', endpoint_name)
def put(self, endpoint_name):
"""Decorates a view to use it for handling of PUT requests.
:param endpoint_name: name of endpoint.
"""
return self._register('PUT', endpoint_name)
def patch(self, endpoint_name):
"""Decorates a view to use it for handling of PATCH requests.
:param endpoint_name: name of endpoint.
"""
return self._register('PATCH', endpoint_name)
def delete(self, endpoint_name):
"""Decorates a view to use it for handling of DELETE requests.
:param endpoint_name: name of endpoint.
"""
return self._register('DELETE', endpoint_name)
def before(self, target):
"""Decorates a function to call it before views.
:param target: (optional) name of endpoint. Without it the
hook will be added for all endpoints.
"""
if callable(target):
endpoint_name = None
else:
endpoint_name = target
def decorator(view):
self.add_pre_hook(endpoint_name, view)
return view
if endpoint_name is None:
return decorator(target)
return decorator
def add_pre_hook(self, endpoint_name, hook):
"""Adds a function to call it before endpoint's views.
:param endpoint_name: name of handler endpoint
:param hook: function that should be called after endpoint's views
"""
self._pre_hooks[endpoint_name].append(hook)
def after(self, target):
"""Decorates a function to call it after views.
:param target: (optional) name of endpoint. Without it the
hook will be added for all endpoints.
"""
if callable(target):
endpoint_name = None
else:
endpoint_name = target
def decorator(view):
self.add_post_hook(endpoint_name, view)
return view
if endpoint_name is None:
return decorator(target)
return decorator
def add_post_hook(self, endpoint_name, hook):
"""Adds a function to call it after endpoint's views.
:param endpoint_name: name of handler endpoint
:param hook: function that should be called after endpoint's views
"""
self._post_hooks[endpoint_name].append(hook)
def decorate(self, endpoint_name, decorator):
"""Decorates an endpoint.
:param endpoint_name: an endpoint to decorate.
:param decorator: one decorator or iterable with decorators.
"""
endpoint = getattr(self, endpoint_name)
if isinstance(decorator, Iterable):
for dec in reversed(decorator):
endpoint = dec(endpoint)
else:
endpoint = decorator(endpoint)
setattr(self, endpoint_name, endpoint)
| mit | 9,030,394,841,770,216,000 | 29.650718 | 78 | 0.611146 | false |
Senseg/robotframework | src/robot/libraries/BuiltIn.py | 1 | 87619 | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import time
from robot.output import LOGGER, Message
from robot.errors import DataError, ExecutionFailed, ExecutionFailures
from robot import utils
from robot.utils import asserts
from robot.variables import is_var, is_list_var
from robot.running import Keyword, RUN_KW_REGISTER
from robot.running.context import EXECUTION_CONTEXTS
from robot.common import UserErrorHandler
from robot.version import get_version
from robot.model import TagPatterns
if utils.is_jython:
from java.lang import String, Number
try:
bin # available since Python 2.6
except NameError:
def bin(integer):
if not isinstance(integer, (int, long)):
raise TypeError
if integer >= 0:
prefix = '0b'
else:
prefix = '-0b'
integer = abs(integer)
bins = []
while integer > 1:
integer, remainder = divmod(integer, 2)
bins.append(str(remainder))
bins.append(str(integer))
return prefix + ''.join(reversed(bins))
class _Converter:
def convert_to_integer(self, item, base=None):
"""Converts the given item to an integer number.
If the given item is a string, it is by default expected to be an
integer in base 10. Starting from Robot Framework 2.6 there are two
ways to convert from other bases:
1) Give base explicitly to the keyword as `base` argument.
2) Prefix the given string with the base so that `0b` means binary
(base 2), `0o` means octal (base 8), and `0x` means hex (base 16).
The prefix is considered only when `base` argument is not given and
may itself be prefixed with a plus or minus sign.
The syntax is case-insensitive and possible spaces are ignored.
Examples:
| ${result} = | Convert To Integer | 100 | | # Result is 100 |
| ${result} = | Convert To Integer | FF AA | 16 | # Result is 65450 |
| ${result} = | Convert To Integer | 100 | 8 | # Result is 64 |
| ${result} = | Convert To Integer | -100 | 2 | # Result is -4 |
| ${result} = | Convert To Integer | 0b100 | | # Result is 4 |
| ${result} = | Convert To Integer | -0x100 | | # Result is -256 |
See also `Convert To Number`, `Convert To Binary`, `Convert To Octal`
and `Convert To Hex`.
"""
self._log_types(item)
return self._convert_to_integer(item, base)
def _convert_to_integer(self, orig, base=None):
try:
item = self._handle_java_numbers(orig)
item, base = self._get_base(item, base)
if base:
return int(item, self._convert_to_integer(base))
return int(item)
except:
raise RuntimeError("'%s' cannot be converted to an integer: %s"
% (orig, utils.get_error_message()))
def _handle_java_numbers(self, item):
if not utils.is_jython:
return item
if isinstance(item, String):
return utils.unic(item)
if isinstance(item, Number):
return item.doubleValue()
return item
def _get_base(self, item, base):
if not isinstance(item, basestring):
return item, base
item = utils.normalize(item)
if item.startswith(('-', '+')):
sign = item[0]
item = item[1:]
else:
sign = ''
bases = {'0b': 2, '0o': 8, '0x': 16}
if base or not item.startswith(tuple(bases)):
return sign+item, base
return sign+item[2:], bases[item[:2]]
def convert_to_binary(self, item, base=None, prefix=None, length=None):
"""Converts the given item to a binary string.
The `item`, with an optional `base`, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to a binary number (base 2) represented as a
string such as `'1011'`.
The returned value can contain an optional `prefix` and can be
required to be of minimum `length` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
Examples:
| ${result} = | Convert To Binary | 10 | | | # Result is 1010 |
| ${result} = | Convert To Binary | F | base=16 | prefix=0b | # Result is 0b1111 |
| ${result} = | Convert To Binary | -2 | prefix=B | length=4 | # Result is -B0010 |
This keyword was added in Robot Framework 2.6. See also
`Convert To Integer`, `Convert To Octal` and `Convert To Hex`.
"""
return self._convert_to_bin_oct_hex(bin, item, base, prefix, length)
def convert_to_octal(self, item, base=None, prefix=None, length=None):
"""Converts the given item to an octal string.
The `item`, with an optional `base`, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to an octal number (base 8) represented as a
string such as `'775'`.
The returned value can contain an optional `prefix` and can be
required to be of minimum `length` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
Examples:
| ${result} = | Convert To Octal | 10 | | | # Result is 12 |
| ${result} = | Convert To Octal | -F | base=16 | prefix=0 | # Result is -017 |
| ${result} = | Convert To Octal | 16 | prefix=oct | length=4 | # Result is oct0020 |
This keyword was added in Robot Framework 2.6. See also
`Convert To Integer`, `Convert To Binary` and `Convert To Hex`.
"""
return self._convert_to_bin_oct_hex(oct, item, base, prefix, length)
def convert_to_hex(self, item, base=None, prefix=None, length=None,
lowercase=False):
"""Converts the given item to a hexadecimal string.
The `item`, with an optional `base`, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to a hexadecimal number (base 16) represented as
a string such as `'FF0A'`.
The returned value can contain an optional `prefix` and can be
required to be of minimum `length` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
By default the value is returned as an upper case string, but
giving any non-empty value to the `lowercase` argument turns
the value (but not the prefix) to lower case.
Examples:
| ${result} = | Convert To Hex | 255 | | | # Result is FF |
| ${result} = | Convert To Hex | -10 | prefix=0x | length=2 | # Result is -0x0A |
| ${result} = | Convert To Hex | 255 | prefix=X | lowercase=yes | # Result is Xff |
This keyword was added in Robot Framework 2.6. See also
`Convert To Integer`, `Convert To Binary` and `Convert To Octal`.
"""
return self._convert_to_bin_oct_hex(hex, item, base, prefix, length,
lowercase)
def _convert_to_bin_oct_hex(self, method, item, base, prefix, length,
lowercase=False):
self._log_types(item)
ret = method(self._convert_to_integer(item, base)).upper()
prefix = prefix or ''
if ret[0] == '-':
prefix = '-' + prefix
ret = ret[1:]
if len(ret) > 1: # oct(0) -> '0' (i.e. has no prefix)
prefix_length = {bin: 2, oct: 1, hex: 2}[method]
ret = ret[prefix_length:]
if length:
ret = ret.rjust(self._convert_to_integer(length), '0')
if lowercase:
ret = ret.lower()
return prefix + ret
def convert_to_number(self, item, precision=None):
"""Converts the given item to a floating point number.
If the optional `precision` is positive or zero, the returned number
is rounded to that number of decimal digits. Negative precision means
that the number is rounded to the closest multiple of 10 to the power
of the absolute precision. The support for precision was added in
Robot Framework 2.6.
Examples:
| ${result} = | Convert To Number | 42.512 | | # Result is 42.512 |
| ${result} = | Convert To Number | 42.512 | 1 | # Result is 42.5 |
| ${result} = | Convert To Number | 42.512 | 0 | # Result is 43.0 |
| ${result} = | Convert To Number | 42.512 | -1 | # Result is 40.0 |
Notice that machines generally cannot store floating point numbers
accurately. This may cause surprises with these numbers in general
and also when they are rounded. For more information see, for example,
this floating point arithmetic tutorial:
http://docs.python.org/tutorial/floatingpoint.html
If you need an integer number, use `Convert To Integer` instead.
"""
self._log_types(item)
return self._convert_to_number(item, precision)
def _convert_to_number(self, item, precision=None):
number = self._convert_to_number_without_precision(item)
if precision:
number = round(number, self._convert_to_integer(precision))
return number
def _convert_to_number_without_precision(self, item):
try:
if utils.is_jython:
item = self._handle_java_numbers(item)
return float(item)
except:
error = utils.get_error_message()
try:
return float(self._convert_to_integer(item))
except RuntimeError:
raise RuntimeError("'%s' cannot be converted to a floating "
"point number: %s" % (item, error))
def convert_to_string(self, item):
"""Converts the given item to a Unicode string.
Uses '__unicode__' or '__str__' method with Python objects and
'toString' with Java objects.
"""
self._log_types(item)
return self._convert_to_string(item)
def _convert_to_string(self, item):
return utils.unic(item)
def convert_to_boolean(self, item):
"""Converts the given item to Boolean true or false.
Handles strings 'True' and 'False' (case-insensitive) as expected,
otherwise returns item's truth value using Python's 'bool' method.
For more information about truth values, see
http://docs.python.org/lib/truth.html.
"""
self._log_types(item)
if isinstance(item, basestring):
if utils.eq(item, 'True'):
return True
if utils.eq(item, 'False'):
return False
return bool(item)
def create_list(self, *items):
"""Returns a list containing given items.
The returned list can be assigned both to ${scalar} and @{list}
variables. The earlier can be used e.g. with Java keywords expecting
an array as an argument.
Examples:
| @{list} = | Create List | a | b | c |
| ${scalar} = | Create List | a | b | c |
| ${ints} = | Create List | ${1} | ${2} | ${3} |
"""
return list(items)
class _Verify:
def fail(self, msg=None):
"""Fails the test immediately with the given (optional) message.
See `Fatal Error` if you need to stop the whole test execution.
"""
raise AssertionError(msg) if msg else AssertionError()
def fatal_error(self, msg=None):
"""Stops the whole test execution.
The test or suite where this keyword is used fails with the provided
message, and subsequent tests fail with a canned message.
Possible teardowns will nevertheless be executed.
See `Fail` if you only want to stop one test case unconditionally.
"""
error = AssertionError(msg) if msg else AssertionError()
error.ROBOT_EXIT_ON_FAILURE = True
raise error
def exit_for_loop(self):
"""Immediately stops executing the enclosing for loop.
This keyword can be used directly in a for loop or in a keyword that
the for loop uses. In both cases the test execution continues after
the for loop. If executed outside of a for loop, the test fails.
Example:
| :FOR | ${var} | IN | @{SOME LIST} |
| | Run Keyword If | '${var}' == 'EXIT' | Exit For Loop |
| | Do Something | ${var} |
New in Robot Framework 2.5.2.
"""
# Error message is shown only if there is no enclosing for loop
error = AssertionError('Exit for loop without enclosing for loop.')
error.ROBOT_EXIT_FOR_LOOP = True
raise error
def should_not_be_true(self, condition, msg=None):
"""Fails if the given condition is true.
See `Should Be True` for details about how `condition` is evaluated and
how `msg` can be used to override the default error message.
"""
if not msg:
msg = "'%s' should not be true" % condition
asserts.fail_if(self._is_true(condition), msg)
def should_be_true(self, condition, msg=None):
"""Fails if the given condition is not true.
If `condition` is a string (e.g. '${rc} < 10'), it is evaluated as a
Python expression using the built-in 'eval' function and the keyword
status is decided based on the result. If a non-string item is given,
the status is got directly from its truth value as explained at
http://docs.python.org/lib/truth.html.
The default error message ('<condition> should be true') is not very
informative, but it can be overridden with the `msg` argument.
Examples:
| Should Be True | ${rc} < 10 |
| Should Be True | '${status}' == 'PASS' | # Strings must be quoted |
| Should Be True | ${number} | # Passes if ${number} is not zero |
| Should Be True | ${list} | # Passes if ${list} is not empty |
"""
if not msg:
msg = "'%s' should be true" % condition
asserts.fail_unless(self._is_true(condition), msg)
def should_be_equal(self, first, second, msg=None, values=True):
"""Fails if the given objects are unequal.
- If `msg` is not given, the error message is 'first != second'.
- If `msg` is given and `values` is either Boolean False or the
string 'False' or 'No Values', the error message is simply `msg`.
- Otherwise the error message is '`msg`: `first` != `second`'.
"""
self._log_types(first, second)
self._should_be_equal(first, second, msg, values)
def _should_be_equal(self, first, second, msg, values):
asserts.fail_unless_equal(first, second, msg,
self._include_values(values))
def _log_types(self, *args):
msg = ["Argument types are:"] + [str(type(a)) for a in args]
self.log('\n'.join(msg))
def _include_values(self, values):
if isinstance(values, basestring):
return values.lower() not in ['no values', 'false']
return bool(values)
def should_not_be_equal(self, first, second, msg=None, values=True):
"""Fails if the given objects are equal.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
"""
self._log_types(first, second)
self._should_not_be_equal(first, second, msg, values)
def _should_not_be_equal(self, first, second, msg, values):
asserts.fail_if_equal(first, second, msg, self._include_values(values))
def should_not_be_equal_as_integers(self, first, second, msg=None,
values=True, base=None):
"""Fails if objects are equal after converting them to integers.
See `Convert To Integer` for information how to convert integers from
other bases than 10 using `base` argument or `0b/0o/0x` prefixes.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
See `Should Be Equal As Integers` for some usage examples.
"""
self._log_types(first, second)
self._should_not_be_equal(self._convert_to_integer(first, base),
self._convert_to_integer(second, base),
msg, values)
def should_be_equal_as_integers(self, first, second, msg=None, values=True,
base=None):
"""Fails if objects are unequal after converting them to integers.
See `Convert To Integer` for information how to convert integers from
other bases than 10 using `base` argument or `0b/0o/0x` prefixes.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
Examples:
| Should Be Equal As Integers | 42 | ${42} | Error message |
| Should Be Equal As Integers | ABCD | abcd | base=16 |
| Should Be Equal As Integers | 0b1011 | 11 |
"""
self._log_types(first, second)
self._should_be_equal(self._convert_to_integer(first, base),
self._convert_to_integer(second, base),
msg, values)
def should_not_be_equal_as_numbers(self, first, second, msg=None,
values=True, precision=6):
"""Fails if objects are equal after converting them to real numbers.
The conversion is done with `Convert To Number` keyword using the
given `precision`. The support for giving precision was added in
Robot Framework 2.6, in earlier versions it was hard-coded to 6.
See `Should Be Equal As Numbers` for examples on how to use
`precision` and why it does not always work as expected. See also
`Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
"""
self._log_types(first, second)
first = self._convert_to_number(first, precision)
second = self._convert_to_number(second, precision)
self._should_not_be_equal(first, second, msg, values)
def should_be_equal_as_numbers(self, first, second, msg=None, values=True,
precision=6):
"""Fails if objects are unequal after converting them to real numbers.
The conversion is done with `Convert To Number` keyword using the
given `precision`. The support for giving precision was added in
Robot Framework 2.6, in earlier versions it was hard-coded to 6.
Examples:
| Should Be Equal As Numbers | ${x} | 1.1 | | # Passes if ${x} is 1.1 |
| Should Be Equal As Numbers | 1.123 | 1.1 | precision=1 | # Passes |
| Should Be Equal As Numbers | 1.123 | 1.4 | precision=0 | # Passes |
| Should Be Equal As Numbers | 112.3 | 75 | precision=-2 | # Passes |
As discussed in the documentation of `Convert To Number`, machines
generally cannot store floating point numbers accurately. Because of
this limitation, comparing floats for equality is problematic and
a correct approach to use depends on the context. This keyword uses
a very naive approach of rounding the numbers before comparing them,
which is both prone to rounding errors and does not work very well if
numbers are really big or small. For more information about comparing
floats, and ideas on how to implement your own context specific
comparison algorithm, see this great article:
http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
See `Should Not Be Equal As Numbers` for a negative version of this
keyword and `Should Be Equal` for an explanation on how to override
the default error message with `msg` and `values`.
"""
self._log_types(first, second)
first = self._convert_to_number(first, precision)
second = self._convert_to_number(second, precision)
self._should_be_equal(first, second, msg, values)
def should_not_be_equal_as_strings(self, first, second, msg=None, values=True):
"""Fails if objects are equal after converting them to strings.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
"""
self._log_types(first, second)
first, second = [self._convert_to_string(i) for i in first, second]
self._should_not_be_equal(first, second, msg, values)
def should_be_equal_as_strings(self, first, second, msg=None, values=True):
"""Fails if objects are unequal after converting them to strings.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
"""
self._log_types(first, second)
first, second = [self._convert_to_string(i) for i in first, second]
self._should_be_equal(first, second, msg, values)
def should_not_start_with(self, str1, str2, msg=None, values=True):
"""Fails if the string `str1` starts with the string `str2`.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'starts with')
asserts.fail_if(str1.startswith(str2), msg)
def should_start_with(self, str1, str2, msg=None, values=True):
"""Fails if the string `str1` does not start with the string `str2`.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'does not start with')
asserts.fail_unless(str1.startswith(str2), msg)
def should_not_end_with(self, str1, str2, msg=None, values=True):
"""Fails if the string `str1` ends with the string `str2`.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'ends with')
asserts.fail_if(str1.endswith(str2), msg)
def should_end_with(self, str1, str2, msg=None, values=True):
"""Fails if the string `str1` does not end with the string `str2`.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'does not end with')
asserts.fail_unless(str1.endswith(str2), msg)
def should_not_contain(self, item1, item2, msg=None, values=True):
"""Fails if `item1` contains `item2` one or more times.
Works with strings, lists, and anything that supports Python's 'in'
keyword. See `Should Be Equal` for an explanation on how to override
the default error message with `msg` and `values`.
Examples:
| Should Not Contain | ${output} | FAILED |
| Should Not Contain | ${some_list} | value |
"""
msg = self._get_string_msg(item1, item2, msg, values, 'contains')
asserts.fail_if(item2 in item1, msg)
def should_contain(self, item1, item2, msg=None, values=True):
"""Fails if `item1` does not contain `item2` one or more times.
Works with strings, lists, and anything that supports Python's 'in'
keyword. See `Should Be Equal` for an explanation on how to override
the default error message with `msg` and `values`.
Examples:
| Should Contain | ${output} | PASS |
| Should Contain | ${some_list} | value |
"""
msg = self._get_string_msg(item1, item2, msg, values, 'does not contain')
asserts.fail_unless(item2 in item1, msg)
def should_contain_x_times(self, item1, item2, count, msg=None):
"""Fails if `item1` does not contain `item2` `count` times.
Works with strings, lists and all objects that `Get Count` works
with. The default error message can be overridden with `msg` and
the actual count is always logged.
Examples:
| Should Contain X Times | ${output} | hello | 2 |
| Should Contain X Times | ${some list} | value | 3 |
"""
if not msg:
msg = "'%s' does not contain '%s' %s times" \
% (utils.unic(item1), utils.unic(item2), count)
self.should_be_equal_as_integers(self.get_count(item1, item2),
count, msg, values=False)
def get_count(self, item1, item2):
"""Returns and logs how many times `item2` is found from `item1`.
This keyword works with Python strings and lists and all objects
that either have 'count' method or can be converted to Python lists.
Example:
| ${count} = | Get Count | ${some item} | interesting value |
| Should Be True | 5 < ${count} < 10 |
"""
if not hasattr(item1, 'count'):
try:
item1 = list(item1)
except:
raise RuntimeError("Converting '%s' to list failed: %s"
% (item1, utils.get_error_message()))
count = item1.count(item2)
self.log('Item found from the first item %d time%s'
% (count, utils.plural_or_not(count)))
return count
def should_not_match(self, string, pattern, msg=None, values=True):
"""Fails if the given `string` matches the given `pattern`.
Pattern matching is similar as matching files in a shell, and it is
always case-sensitive. In the pattern '*' matches to anything and '?'
matches to any single character.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
"""
msg = self._get_string_msg(string, pattern, msg, values, 'matches')
asserts.fail_if(self._matches(string, pattern), msg)
def should_match(self, string, pattern, msg=None, values=True):
"""Fails unless the given `string` matches the given `pattern`.
Pattern matching is similar as matching files in a shell, and it is
always case-sensitive. In the pattern, '*' matches to anything and '?'
matches to any single character.
See `Should Be Equal` for an explanation on how to override the default
error message with `msg` and `values`.
"""
msg = self._get_string_msg(string, pattern, msg, values,
'does not match')
asserts.fail_unless(self._matches(string, pattern), msg)
def should_match_regexp(self, string, pattern, msg=None, values=True):
"""Fails if `string` does not match `pattern` as a regular expression.
Regular expression check is done using the Python 're' module, which
has a pattern syntax derived from Perl, and thus also very similar to
the one in Java. See the following documents for more details about
regular expressions in general and Python implementation in particular.
| http://docs.python.org/lib/module-re.html
| http://www.amk.ca/python/howto/regex/
Things to note about the regexp syntax in Robot Framework test data:
1) Backslash is an escape character in the test data, and possible
backslashes in the pattern must thus be escaped with another backslash
(e.g. '\\\\d\\\\w+').
2) Strings that may contain special characters, but should be handled
as literal strings, can be escaped with the `Regexp Escape` keyword.
3) The given pattern does not need to match the whole string. For
example, the pattern 'ello' matches the string 'Hello world!'. If
a full match is needed, the '^' and '$' characters can be used to
denote the beginning and end of the string, respectively. For example,
'^ello$' only matches the exact string 'ello'.
4) Possible flags altering how the expression is parsed (e.g.
re.IGNORECASE, re.MULTILINE) can be set by prefixing the pattern with
the '(?iLmsux)' group (e.g. '(?im)pattern'). The available flags are
'IGNORECASE': 'i', 'MULTILINE': 'm', 'DOTALL': 's', 'VERBOSE': 'x',
'UNICODE': 'u', and 'LOCALE': 'L'.
If this keyword passes, it returns the portion of the string that
matched the pattern. Additionally, the possible captured groups are
returned.
See the `Should Be Equal` keyword for an explanation on how to override
the default error message with the `msg` and `values` arguments.
Examples:
| Should Match Regexp | ${output} | \\\\d{6} | # Output contains six numbers |
| Should Match Regexp | ${output} | ^\\\\d{6}$ | # Six numbers and nothing more |
| ${ret} = | Should Match Regexp | Foo: 42 | (?i)foo: \\\\d+ |
| ${match} | ${group1} | ${group2} = |
| ... | Should Match Regexp | Bar: 43 | (Foo|Bar): (\\\\d+) |
=>
| ${ret} = 'Foo: 42'
| ${match} = 'Bar: 43'
| ${group1} = 'Bar'
| ${group2} = '43'
"""
msg = self._get_string_msg(string, pattern, msg, values, 'does not match')
res = re.search(pattern, string)
asserts.fail_if_none(res, msg, False)
match = res.group(0)
groups = res.groups()
if groups:
return [match] + list(groups)
return match
def should_not_match_regexp(self, string, pattern, msg=None, values=True):
"""Fails if `string` matches `pattern` as a regular expression.
See `Should Match Regexp` for more information about arguments.
"""
msg = self._get_string_msg(string, pattern, msg, values, 'matches')
asserts.fail_unless_none(re.search(pattern, string), msg, False)
def get_length(self, item):
"""Returns and logs the length of the given item.
The item can be anything that has a length, for example, a string,
a list, or a mapping. The keyword first tries to get the length with
the Python function `len`, which calls the item's `__len__` method
internally. If that fails, the keyword tries to call the item's
possible `length` and `size` methods directly. The final attempt is
trying to get the value of the item's `length` attribute. If all
these attempts are unsuccessful, the keyword fails.
It is possible to use this keyword also with list variables (e.g.
`@{LIST}`), but you need to use them as scalars (e.g. `${LIST}`).
"""
length = self._get_length(item)
self.log('Length is %d' % length)
return length
def _get_length(self, item):
try: return len(item)
except utils.RERAISED_EXCEPTIONS: raise
except:
try: return item.length()
except utils.RERAISED_EXCEPTIONS: raise
except:
try: return item.size()
except utils.RERAISED_EXCEPTIONS: raise
except:
try: return item.length
except utils.RERAISED_EXCEPTIONS: raise
except:
raise RuntimeError("Could not get length of '%s'" % item)
def length_should_be(self, item, length, msg=None):
"""Verifies that the length of the given item is correct.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the `msg` argument.
"""
length = self._convert_to_integer(length)
if self.get_length(item) != length:
if not msg:
msg = "Length of '%s' should be %d but it is %d" \
% (item, length, self.get_length(item))
raise AssertionError(msg)
def should_be_empty(self, item, msg=None):
"""Verifies that the given item is empty.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the `msg` argument.
"""
if self.get_length(item) > 0:
raise AssertionError(msg or "'%s' should be empty" % item)
def should_not_be_empty(self, item, msg=None):
"""Verifies that the given item is not empty.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the `msg` argument.
"""
if self.get_length(item) == 0:
raise AssertionError(msg or "'%s' should not be empty" % item)
def _get_string_msg(self, str1, str2, msg, values, delim):
default = "'%s' %s '%s'" % (utils.unic(str1), delim, utils.unic(str2))
if not msg:
msg = default
elif values is True:
msg = '%s: %s' % (msg, default)
return msg
class _Variables:
def get_variables(self):
"""Returns a dictionary containing all variables in the current scope."""
return self._variables
def get_variable_value(self, name, default=None):
"""Returns variable value or `default` if the variable does not exist.
The name of the variable can be given either as a normal variable name
(e.g. `${NAME}`) or in escaped format (e.g. `\\${NAME}`). Notice that
the former has some limitations explained in `Set Suite Variable`.
Examples:
| ${x} = | Get Variable Value | ${a} | default |
| ${y} = | Get Variable Value | ${a} | ${b} |
| ${z} = | Get Variable Value | ${z} | |
=>
| ${x} gets value of ${a} if ${a} exists and string "default" otherwise
| ${y} gets value of ${a} if ${a} exists and value of ${b} otherwise
| ${z} is set to Python `None` if it does not exist previously
This keyword was added in Robot Framework 2.6. See `Set Variable If`
for another keyword to set variables dynamically.
"""
name = self._get_var_name(name)
variables = self.get_variables()
try:
return variables[name]
except DataError:
return variables.replace_scalar(default)
def log_variables(self, level='INFO'):
"""Logs all variables in the current scope with given log level."""
variables = self.get_variables()
for name in sorted(variables.keys(), key=lambda s: s.lower()):
msg = utils.format_assign_message(name, variables[name],
cut_long=False)
self.log(msg, level)
def variable_should_exist(self, name, msg=None):
"""Fails unless the given variable exists within the current scope.
The name of the variable can be given either as a normal variable name
(e.g. `${NAME}`) or in escaped format (e.g. `\\${NAME}`). Notice that
the former has some limitations explained in `Set Suite Variable`.
The default error message can be overridden with the `msg` argument.
See also `Variable Should Not Exist` and `Keyword Should Exist`.
"""
name = self._get_var_name(name)
variables = self.get_variables()
msg = variables.replace_string(msg) if msg \
else "Variable %s does not exist" % name
asserts.fail_unless(variables.has_key(name), msg)
def variable_should_not_exist(self, name, msg=None):
"""Fails if the given variable exists within the current scope.
The name of the variable can be given either as a normal variable name
(e.g. `${NAME}`) or in escaped format (e.g. `\\${NAME}`). Notice that
the former has some limitations explained in `Set Suite Variable`.
The default error message can be overridden with the `msg` argument.
See also `Variable Should Exist` and `Keyword Should Exist`.
"""
name = self._get_var_name(name)
variables = self.get_variables()
msg = variables.replace_string(msg) if msg \
else "Variable %s exists" % name
asserts.fail_if(variables.has_key(name), msg)
def replace_variables(self, text):
"""Replaces variables in the given text with their current values.
If the text contains undefined variables, this keyword fails.
If the given `text` contains only a single variable, its value is
returned as-is and it can be any object. Otherwise this keyword
always returns a string.
Example:
The file 'template.txt' contains 'Hello ${NAME}!' and variable
'${NAME}' has the value 'Robot'.
| ${template} = | Get File | ${CURDIR}/template.txt |
| ${message} = | Replace Variables | ${template} |
| Should Be Equal | ${message} | Hello Robot! |
"""
return self.get_variables().replace_scalar(text)
def set_variable(self, *values):
"""Returns the given values which can then be assigned to a variables.
This keyword is mainly used for setting scalar variables.
Additionally it can be used for converting a scalar variable
containing a list to a list variable or to multiple scalar variables.
It is recommended to use `Create List` when creating new lists.
Examples:
| ${hi} = | Set Variable | Hello, world! |
| ${hi2} = | Set Variable | I said: ${hi} |
| ${var1} | ${var2} = | Set Variable | Hello | world |
| @{list} = | Set Variable | ${list with some items} |
| ${item1} | ${item2} = | Set Variable | ${list with 2 items} |
Variables created with this keyword are available only in the
scope where they are created. See `Set Global Variable`,
`Set Test Variable` and `Set Suite Variable` for information on how to
set variables so that they are available also in a larger scope.
"""
if len(values) == 0:
return ''
elif len(values) == 1:
return values[0]
else:
return list(values)
def set_test_variable(self, name, *values):
"""Makes a variable available everywhere within the scope of the current test.
Variables set with this keyword are available everywhere within the
scope of the currently executed test case. For example, if you set a
variable in a user keyword, it is available both in the test case level
and also in all other user keywords used in the current test. Other
test cases will not see variables set with this keyword.
See `Set Suite Variable` for more information and examples.
"""
name = self._get_var_name(name)
value = self._get_var_value(name, values)
self.get_variables().set_test(name, value)
self._log_set_variable(name, value)
def set_suite_variable(self, name, *values):
"""Makes a variable available everywhere within the scope of the current suite.
Variables set with this keyword are available everywhere within the
scope of the currently executed test suite. Setting variables with this
keyword thus has the same effect as creating them using the Variable
table in the test data file or importing them from variable files.
Other test suites, including possible child test suites, will not see
variables set with this keyword.
The name of the variable can be given either as a normal variable name
(e.g. `${NAME}`) or in escaped format as `\\${NAME}` or `$NAME`.
If a variable already exists within the new scope, its value will be
overwritten. Otherwise a new variable is created. If a variable already
exists within the current scope, the value can be left empty and the
variable within the new scope gets the value within the current scope.
Examples:
| Set Suite Variable | ${GREET} | Hello, world! |
| ${ID} = | Get ID |
| Set Suite Variable | ${ID} |
*NOTE:* If the variable has value which itself is a variable (escaped
or not), you must always use the escaped format to reset the variable:
Example:
| ${NAME} = | Set Variable | \${var} |
| Set Suite Variable | ${NAME} | value | # Sets variable ${var} |
| Set Suite Variable | \${NAME} | value | # Sets variable ${NAME} |
This limitation applies also to `Set Test/Suite/Global Variable`,
`Variable Should (Not) Exist`, and `Get Variable Value` keywords.
"""
name = self._get_var_name(name)
value = self._get_var_value(name, values)
self.get_variables().set_suite(name, value)
self._log_set_variable(name, value)
def set_global_variable(self, name, *values):
"""Makes a variable available globally in all tests and suites.
Variables set with this keyword are globally available in all test
cases and suites executed after setting them. Setting variables with
this keyword thus has the same effect as creating from the command line
using the options '--variable' or '--variablefile'. Because this
keyword can change variables everywhere, it should be used with care.
See `Set Suite Variable` for more information and examples.
"""
name = self._get_var_name(name)
value = self._get_var_value(name, values)
self.get_variables().set_global(name, value)
self._log_set_variable(name, value)
# Helpers
def _get_var_name(self, orig):
name = self._resolve_possible_variable(orig)
try:
return self._unescape_variable_if_needed(name)
except ValueError:
raise RuntimeError("Invalid variable syntax '%s'" % orig)
def _resolve_possible_variable(self, name):
try:
resolved = self.get_variables()[name]
return self._unescape_variable_if_needed(resolved)
except (KeyError, ValueError, DataError):
return name
def _unescape_variable_if_needed(self, name):
if not (isinstance(name, basestring) and len(name) > 1):
raise ValueError
if name.startswith('\\'):
name = name[1:]
elif name[0] in ['$','@'] and name[1] != '{':
name = '%s{%s}' % (name[0], name[1:])
if is_var(name):
return name
# Support for possible internal variables (issue 397)
name = '%s{%s}' % (name[0], self.replace_variables(name[2:-1]))
if is_var(name):
return name
raise ValueError
def _get_var_value(self, name, values):
variables = self.get_variables()
if not values:
return variables[name]
values = variables.replace_list(values)
if len(values) == 1 and name[0] == '$':
return values[0]
return list(values)
def _log_set_variable(self, name, value):
self.log(utils.format_assign_message(name, value))
class _RunKeyword:
# If you use any of these run keyword variants from another library, you
# should register those keywords with 'register_run_keyword' method. See
# the documentation of that method at the end of this file. There are also
# other run keyword variant keywords in BuiltIn which can also be seen
# at the end of this file.
def run_keyword(self, name, *args):
"""Executes the given keyword with the given arguments.
Because the name of the keyword to execute is given as an argument, it
can be a variable and thus set dynamically, e.g. from a return value of
another keyword or from the command line.
"""
if not isinstance(name, basestring):
raise RuntimeError('Keyword name must be a string.')
kw = Keyword(name, list(args))
return kw.run(self._execution_context)
def run_keywords(self, *names):
"""Executes all the given keywords in a sequence without arguments.
This keyword is mainly useful in setups and teardowns when they need to
take care of multiple actions and creating a new higher level user
keyword is overkill. User keywords must nevertheless be used if the
executed keywords need to take arguments.
Example:
| *Setting* | *Value* | *Value* | *Value* |
| Suite Setup | Run Keywords | Initialize database | Start servers |
"""
errors = []
for kw in self.get_variables().replace_list(names):
try:
self.run_keyword(kw)
except ExecutionFailed, err:
errors.extend(err.get_errors())
if not err.can_continue(self._execution_context.teardown):
break
if errors:
raise ExecutionFailures(errors)
def run_keyword_if(self, condition, name, *args):
"""Runs the given keyword with the given arguments, if `condition` is true.
The given `condition` is evaluated similarly as with `Should Be
True` keyword, and `name` and `*args` have same semantics as with
`Run Keyword`.
Example, a simple if/else construct:
| ${status} | ${value} = | Run Keyword And Ignore Error | My Keyword |
| Run Keyword If | '${status}' == 'PASS' | Some Action |
| Run Keyword Unless | '${status}' == 'PASS' | Another Action |
In this example, only either 'Some Action' or 'Another Action' is
executed, based on the status of 'My Keyword'.
"""
if self._is_true(condition):
return self.run_keyword(name, *args)
def run_keyword_unless(self, condition, name, *args):
"""Runs the given keyword with the given arguments, if `condition` is false.
See `Run Keyword If` for more information and an example.
"""
if not self._is_true(condition):
return self.run_keyword(name, *args)
def run_keyword_and_ignore_error(self, name, *args):
"""Runs the given keyword with the given arguments and ignores possible error.
This keyword returns two values, so that the first is either 'PASS' or
'FAIL', depending on the status of the executed keyword. The second
value is either the return value of the keyword or the received error
message.
The keyword name and arguments work as in `Run Keyword`. See
`Run Keyword If` for a usage example.
Starting from Robot Framework 2.5 errors caused by invalid syntax,
timeouts, or fatal exceptions are not caught by this keyword.
"""
try:
return 'PASS', self.run_keyword(name, *args)
except ExecutionFailed, err:
if err.dont_cont:
raise
return 'FAIL', unicode(err)
def run_keyword_and_continue_on_failure(self, name, *args):
"""Runs the keyword and continues execution even if a failure occurs.
The keyword name and arguments work as with `Run Keyword`.
Example:
| Run Keyword And Continue On Failure | Fail | This is a stupid example |
| Log | This keyword is executed |
This keyword was added in Robot Framework 2.5. The execution is not
continued if the failure is caused by invalid syntax, timeout, or
fatal exception.
"""
try:
return self.run_keyword(name, *args)
except ExecutionFailed, err:
if not err.dont_cont:
err.cont = True
raise err
def run_keyword_and_expect_error(self, expected_error, name, *args):
"""Runs the keyword and checks that the expected error occurred.
The expected error must be given in the same format as in
Robot Framework reports. It can be a pattern containing
characters '?', which matches to any single character and
'*', which matches to any number of any characters. `name` and
`*args` have same semantics as with `Run Keyword`.
If the expected error occurs, the error message is returned and it can
be further processed/tested, if needed. If there is no error, or the
error does not match the expected error, this keyword fails.
Examples:
| Run Keyword And Expect Error | My error | Some Keyword | arg1 | arg2 |
| ${msg} = | Run Keyword And Expect Error | * | My KW |
| Should Start With | ${msg} | Once upon a time in |
Starting from Robot Framework 2.5 errors caused by invalid syntax,
timeouts, or fatal exceptions are not caught by this keyword.
"""
try:
self.run_keyword(name, *args)
except ExecutionFailed, err:
if err.dont_cont:
raise
else:
raise AssertionError("Expected error '%s' did not occur"
% expected_error)
if not self._matches(unicode(err), expected_error):
raise AssertionError("Expected error '%s' but got '%s'"
% (expected_error, err))
return unicode(err)
def repeat_keyword(self, times, name, *args):
"""Executes the specified keyword multiple times.
`name` and `args` define the keyword that is executed
similarly as with `Run Keyword`, and `times` specifies how many
the keyword should be executed. `times` can be given as an
integer or as a string that can be converted to an integer. It
can also have postfix 'times' or 'x' (case and space
insensitive) to make the expression easier to read.
If `times` is zero or negative, the keyword is not executed at
all. This keyword fails immediately if any of the execution
rounds fails.
Examples:
| Repeat Keyword | 5 times | Goto Previous Page |
| Repeat Keyword | ${var} | Some Keyword | arg1 | arg2 |
"""
times = utils.normalize(str(times))
if times.endswith('times'):
times = times[:-5]
elif times.endswith('x'):
times = times[:-1]
times = self._convert_to_integer(times)
if times <= 0:
self.log("Keyword '%s' repeated zero times" % name)
for i in xrange(times):
self.log("Repeating keyword, round %d/%d" % (i+1, times))
self.run_keyword(name, *args)
def wait_until_keyword_succeeds(self, timeout, retry_interval, name, *args):
"""Waits until the specified keyword succeeds or the given timeout expires.
`name` and `args` define the keyword that is executed
similarly as with `Run Keyword`. If the specified keyword does
not succeed within `timeout`, this keyword fails.
`retry_interval` is the time to wait before trying to run the
keyword again after the previous run has failed.
Both `timeout` and `retry_interval` must be given in Robot Framework's
time format (e.g. '1 minute', '2 min 3 s', '4.5').
Errors caused by invalid syntax, test or keyword timeouts, or fatal
exceptions are not caught by this keyword.
Example:
| Wait Until Keyword Succeeds | 2 min | 5 sec | My keyword | arg1 | arg2 |
Running the same keyword multiple times inside this keyword can create
lots of output and considerably increase the size of the generated
output files. Starting from Robot Framework 2.7, it is possible to
remove unnecessary keywords from the outputs using
`--RemoveKeywords WUKS` command line option.
"""
timeout = utils.timestr_to_secs(timeout)
retry_interval = utils.timestr_to_secs(retry_interval)
maxtime = time.time() + timeout
error = None
while not error:
try:
return self.run_keyword(name, *args)
except ExecutionFailed, err:
if err.dont_cont:
raise
if time.time() > maxtime:
error = unicode(err)
else:
time.sleep(retry_interval)
raise AssertionError("Timeout %s exceeded. The last error was: %s"
% (utils.secs_to_timestr(timeout), error))
def set_variable_if(self, condition, *values):
"""Sets variable based on the given condition.
The basic usage is giving a condition and two values. The
given condition is first evaluated the same way as with the
`Should Be True` keyword. If the condition is true, then the
first value is returned, and otherwise the second value is
returned. The second value can also be omitted, in which case
it has a default value None. This usage is illustrated in the
examples below, where ${rc} is assumed to be zero.
| ${var1} = | Set Variable If | ${rc} == 0 | zero | nonzero |
| ${var2} = | Set Variable If | ${rc} > 0 | value1 | value2 |
| ${var3} = | Set Variable If | ${rc} > 0 | whatever | |
=>
| ${var1} = 'zero'
| ${var2} = 'value2'
| ${var3} = None
It is also possible to have 'Else If' support by replacing the
second value with another condition, and having two new values
after it. If the first condition is not true, the second is
evaluated and one of the values after it is returned based on
its truth value. This can be continued by adding more
conditions without a limit.
| ${var} = | Set Variable If | ${rc} == 0 | zero |
| ... | ${rc} > 0 | greater than zero | less then zero |
| |
| ${var} = | Set Variable If |
| ... | ${rc} == 0 | zero |
| ... | ${rc} == 1 | one |
| ... | ${rc} == 2 | two |
| ... | ${rc} > 2 | greater than two |
| ... | ${rc} < 0 | less than zero |
Use `Get Variable Value` if you need to set variables
dynamically based on whether a variable exist or not.
"""
values = self._verify_values_for_set_variable_if(list(values))
if self._is_true(condition):
return self._variables.replace_scalar(values[0])
values = self._verify_values_for_set_variable_if(values[1:], True)
if len(values) == 1:
return self._variables.replace_scalar(values[0])
return self.run_keyword('BuiltIn.Set Variable If', *values[0:])
def _verify_values_for_set_variable_if(self, values, default=False):
if not values:
if default:
return [None]
raise RuntimeError('At least one value is required')
if is_list_var(values[0]):
values[:1] = [utils.escape(item) for item in
self._variables[values[0]]]
return self._verify_values_for_set_variable_if(values)
return values
def run_keyword_if_test_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if the test failed.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
test = self._get_test_in_teardown('Run Keyword If Test Failed')
if not test.passed:
return self.run_keyword(name, *args)
def run_keyword_if_test_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if the test passed.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
test = self._get_test_in_teardown('Run Keyword If Test Passed')
if test.passed:
return self.run_keyword(name, *args)
def run_keyword_if_timeout_occurred(self, name, *args):
"""Runs the given keyword if either a test or a keyword timeout has occurred.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
Available in Robot Framework 2.5 and newer.
"""
test = self._get_test_in_teardown('Run Keyword If Timeout Occurred')
if test.timeout.any_timeout_occurred():
return self.run_keyword(name, *args)
def _get_test_in_teardown(self, kwname):
test = self._namespace.test
if test and test.status != 'RUNNING':
return test
raise RuntimeError("Keyword '%s' can only be used in test teardown"
% kwname)
def run_keyword_if_all_critical_tests_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if all critical tests passed.
This keyword can only be used in suite teardown. Trying to use it in
any other place will result in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If '
'All Critical Tests Passed')
if suite.critical_stats.failed == 0:
return self.run_keyword(name, *args)
def run_keyword_if_any_critical_tests_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if any critical tests failed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If '
'Any Critical Tests Failed')
if suite.critical_stats.failed > 0:
return self.run_keyword(name, *args)
def run_keyword_if_all_tests_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if all tests passed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If All Tests Passed')
if suite.all_stats.failed == 0:
return self.run_keyword(name, *args)
def run_keyword_if_any_tests_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if one or more tests failed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If Any Tests Failed')
if suite.all_stats.failed > 0:
return self.run_keyword(name, *args)
def _get_suite_in_teardown(self, kwname):
if self._namespace.suite.status == 'RUNNING':
raise RuntimeError("Keyword '%s' can only be used in suite teardown"
% kwname)
return self._namespace.suite
class _Misc:
def no_operation(self):
"""Does absolutely nothing."""
def sleep(self, time_, reason=None):
"""Pauses the test executed for the given time.
`time` may be either a number or a time string. Time strings are in
a format such as '1 day 2 hours 3 minutes 4 seconds 5milliseconds' or
'1d 2h 3m 4s 5ms', and they are fully explained in an appendix of Robot
Framework User Guide. Optional `reason` can be used to explain why
sleeping is necessary. Both the time slept and the reason are logged.
Examples:
| Sleep | 42 |
| Sleep | 1.5 |
| Sleep | 2 minutes 10 seconds |
| Sleep | 10s | Wait for a reply |
"""
seconds = utils.timestr_to_secs(time_)
# Python hangs with negative values
if seconds < 0:
seconds = 0
self._sleep_in_parts(seconds)
self.log('Slept %s' % utils.secs_to_timestr(seconds))
if reason:
self.log(reason)
def _sleep_in_parts(self, seconds):
# time.sleep can't be stopped in windows
# to ensure that we can signal stop (with timeout)
# split sleeping to small pieces
endtime = time.time() + float(seconds)
while True:
remaining = endtime - time.time()
if remaining <= 0:
break
time.sleep(min(remaining, 0.5))
def catenate(self, *items):
"""Catenates the given items together and returns the resulted string.
By default, items are catenated with spaces, but if the first item
contains the string 'SEPARATOR=<sep>', the separator '<sep>' is used.
Items are converted into strings when necessary.
Examples:
| ${str1} = | Catenate | Hello | world | |
| ${str2} = | Catenate | SEPARATOR=--- | Hello | world |
| ${str3} = | Catenate | SEPARATOR= | Hello | world |
=>
| ${str1} = 'Hello world'
| ${str2} = 'Hello---world'
| ${str3} = 'Helloworld'
"""
if not items:
return ''
items = [utils.unic(item) for item in items]
if items[0].startswith('SEPARATOR='):
sep = items[0][len('SEPARATOR='):]
items = items[1:]
else:
sep = ' '
return sep.join(items)
def log(self, message, level="INFO"):
"""Logs the given message with the given level.
Valid levels are TRACE, DEBUG, INFO (default), HTML and WARN.
The HTML level is special because it allows writing messages
without HTML code in them being escaped. For example, logging
a message '<img src="image.png">' using the HTML level creates
an image, but with other levels the message would be that exact
string. Notice that invalid HTML can easily corrupt the whole
log file so this feature should be used with care. The
actual log level used for HTML messages is INFO.
Messages logged with the WARN level will be visible also in
the console and in the Test Execution Errors section in the
log file.
"""
LOGGER.log_message(Message(message, level))
def log_many(self, *messages):
"""Logs the given messages as separate entries with the INFO level."""
for msg in messages:
self.log(msg)
def comment(self, *messages):
"""Displays the given messages in the log file as keyword arguments.
This keyword does nothing with the arguments it receives, but as they
are visible in the log, this keyword can be used to display simple
messages. Given arguments are ignored so thoroughly that they can even
contain non-existing variables. If you are interested about variable
values, you can use the `Log` or `Log Many` keywords.
"""
pass
def set_log_level(self, level):
"""Sets the log threshold to the specified level and returns the old level.
Messages below the level will not logged. The default logging level is
INFO, but it can be overridden with the command line option
'--loglevel'.
The available levels: TRACE, DEBUG, INFO (default), WARN and NONE (no
logging).
"""
try:
old = self._execution_context.output.set_log_level(level)
except DataError, err:
raise RuntimeError(unicode(err))
self.log('Log level changed from %s to %s' % (old, level.upper()))
return old
def import_library(self, name, *args):
"""Imports a library with the given name and optional arguments.
This functionality allows dynamic importing of libraries while tests
are running. That may be necessary, if the library itself is dynamic
and not yet available when test data is processed. In a normal case,
libraries should be imported using the Library setting in the Setting
table.
This keyword supports importing libraries both using library
names and physical paths. When path are used, they must be
given in absolute format. Forward slashes can be used as path
separators in all operating systems. It is possible to use
arguments as well as to give a custom name with 'WITH NAME'
syntax. For more information about importing libraries, see
Robot Framework User Guide.
Examples:
| Import Library | MyLibrary |
| Import Library | ${CURDIR}/Library.py | some | args |
| Import Library | ${CURDIR}/../libs/Lib.java | arg | WITH NAME | JavaLib |
"""
try:
self._namespace.import_library(name.replace('/', os.sep), list(args))
except DataError, err:
raise RuntimeError(unicode(err))
def import_variables(self, path, *args):
"""Imports a variable file with the given path and optional arguments.
Variables imported with this keyword are set into the test suite scope
similarly when importing them in the Setting table using the Variables
setting. These variables override possible existing variables with
the same names and this functionality can thus be used to import new
variables, e.g. for each test in a test suite.
The given path must be absolute. Forward slashes can be used as path
separator regardless the operating system.
Examples:
| Import Variables | ${CURDIR}/variables.py | | |
| Import Variables | ${CURDIR}/../vars/env.py | arg1 | arg2 |
New in Robot Framework 2.5.4.
"""
try:
self._namespace.import_variables(path.replace('/', os.sep),
list(args), overwrite=True)
except DataError, err:
raise RuntimeError(unicode(err))
def import_resource(self, path):
"""Imports a resource file with the given path.
Resources imported with this keyword are set into the test suite scope
similarly when importing them in the Setting table using the Resource
setting.
The given path must be absolute. Forward slashes can be used as path
separator regardless the operating system.
Examples:
| Import Resource | ${CURDIR}/resource.txt |
| Import Resource | ${CURDIR}/../resources/resource.html |
"""
try:
self._namespace.import_resource(path.replace('/', os.sep))
except DataError, err:
raise RuntimeError(unicode(err))
def set_library_search_order(self, *libraries):
"""Sets the resolution order to use when a name matches multiple keywords.
The library search order is used to resolve conflicts when a keyword
name in the test data matches multiple keywords. The first library
(or resource, see below) containing the keyword is selected and that
keyword implementation used. If the keyword is not found from any library
(or resource), test executing fails the same way as when the search
order is not set.
When this keyword is used, there is no need to use the long
`LibraryName.Keyword Name` notation. For example, instead of
having
| MyLibrary.Keyword | arg |
| MyLibrary.Another Keyword |
| MyLibrary.Keyword | xxx |
you can have
| Set Library Search Order | MyLibrary |
| Keyword | arg |
| Another Keyword |
| Keyword | xxx |
Starting from Robot Framework 2.6.2 this keyword can be used also to
set the order of keywords in different resource files. In this case
resource names must be given without paths or extensions like:
| Set Library Search Order | resource | another_resource |
*NOTE:*
- The search order is valid only in the suite where this keywords is used.
- Keywords in resources always have higher priority than
keywords in libraries regardless the search order.
- The old order is returned and can be used to reset the search order later.
- Starting from RF 2.6.2, library and resource names in the search order
are both case and space insensitive.
"""
old_order = self._namespace.library_search_order
self._namespace.library_search_order = libraries
return old_order
def keyword_should_exist(self, name, msg=None):
"""Fails unless the given keyword exists in the current scope.
Fails also if there are more than one keywords with the same name.
Works both with the short name (e.g. `Log`) and the full name
(e.g. `BuiltIn.Log`).
The default error message can be overridden with the `msg` argument.
New in Robot Framework 2.6. See also `Variable Should Exist`.
"""
try:
handler = self._namespace._get_handler(name)
if not handler:
raise DataError("No keyword with name '%s' found." % name)
if isinstance(handler, UserErrorHandler):
handler.run()
except DataError, err:
raise AssertionError(msg or unicode(err))
def get_time(self, format='timestamp', time_='NOW'):
"""Returns the given time in the requested format.
How time is returned is determined based on the given `format` string
as follows. Note that all checks are case-insensitive.
1) If `format` contains the word 'epoch', the time is returned
in seconds after the UNIX epoch (Jan 1, 1970 0:00:00).
The return value is always an integer.
2) If `format` contains any of the words 'year', 'month',
'day', 'hour', 'min', or 'sec', only the selected parts are
returned. The order of the returned parts is always the one
in the previous sentence and the order of words in `format`
is not significant. The parts are returned as zero-padded
strings (e.g. May -> '05').
3) Otherwise (and by default) the time is returned as a
timestamp string in the format '2006-02-24 15:08:31'.
By default this keyword returns the current time, but that can be
altered using `time` argument as explained below.
1) If `time` is a floating point number, it is interpreted as
seconds since the epoch. This documentation is written about
1177654467 seconds after the epoch.
2) If `time` is a valid timestamp, that time will be used. Valid
timestamp formats are 'YYYY-MM-DD hh:mm:ss' and 'YYYYMMDD hhmmss'.
3) If `time` is equal to 'NOW' (case-insensitive), the
current time is used.
4) If `time` is in the format 'NOW - 1 day' or 'NOW + 1 hour
30 min', the current time plus/minus the time specified
with the time string is used. The time string format is
described in an appendix of Robot Framework User Guide.
Examples (expecting the current time is 2006-03-29 15:06:21):
| ${time} = | Get Time | | | |
| ${secs} = | Get Time | epoch | | |
| ${year} = | Get Time | return year | | |
| ${yyyy} | ${mm} | ${dd} = | Get Time | year,month,day |
| @{time} = | Get Time | year month day hour min sec | | |
| ${y} | ${s} = | Get Time | seconds and year | |
=>
| ${time} = '2006-03-29 15:06:21'
| ${secs} = 1143637581
| ${year} = '2006'
| ${yyyy} = '2006', ${mm} = '03', ${dd} = '29'
| @{time} = ['2006', '03', '29', '15', '06', '21']
| ${y} = '2006'
| ${s} = '21'
| ${time} = | Get Time | | 1177654467 |
| ${secs} = | Get Time | sec | 2007-04-27 09:14:27 |
| ${year} = | Get Time | year | NOW | # The time of execution |
| ${day} = | Get Time | day | NOW - 1d | # 1 day subtraced from NOW |
| @{time} = | Get Time | hour min sec | NOW + 1h 2min 3s | # 1h 2min 3s added to NOW |
=>
| ${time} = '2007-04-27 09:14:27'
| ${secs} = 27
| ${year} = '2006'
| ${day} = '28'
| @{time} = ['16', '08', '24']
"""
return utils.get_time(format, utils.parse_time(time_))
def evaluate(self, expression, modules=None):
"""Evaluates the given expression in Python and returns the results.
`modules` argument can be used to specify a comma separated
list of Python modules to be imported and added to the
namespace of the evaluated `expression`.
Examples (expecting ${result} is 3.14):
| ${status} = | Evaluate | 0 < ${result} < 10 |
| ${down} = | Evaluate | int(${result}) |
| ${up} = | Evaluate | math.ceil(${result}) | math |
| ${random} = | Evaluate | random.randint(0, sys.maxint) | random,sys |
=>
| ${status} = True
| ${down} = 3
| ${up} = 4.0
| ${random} = <random integer>
Notice that instead of creating complicated expressions, it is
recommended to move the logic into a test library.
"""
modules = modules.replace(' ','').split(',') if modules else []
namespace = dict((m, __import__(m)) for m in modules if m != '')
try:
return eval(expression, namespace)
except:
raise RuntimeError("Evaluating expression '%s' failed: %s"
% (expression, utils.get_error_message()))
def call_method(self, object, method_name, *args):
"""Calls the named method of the given object with the provided arguments.
The possible return value from the method is returned and can be
assigned to a variable. Keyword fails both if the object does not have
a method with the given name or if executing the method raises an
exception.
Examples:
| Call Method | ${hashtable} | put | myname | myvalue |
| ${isempty} = | Call Method | ${hashtable} | isEmpty | |
| Should Not Be True | ${isempty} | | | |
| ${value} = | Call Method | ${hashtable} | get | myname |
| Should Be Equal | ${value} | myvalue | | |
"""
try:
method = getattr(object, method_name)
except AttributeError:
raise RuntimeError("Object '%s' does not have a method '%s'"
% (object, method_name))
return method(*args)
def regexp_escape(self, *patterns):
"""Returns each argument string escaped for use as a regular expression.
This keyword can be used to escape strings to be used with
`Should Match Regexp` and `Should Not Match Regexp` keywords.
Escaping is done with Python's re.escape() function.
Examples:
| ${escaped} = | Regexp Escape | ${original} |
| @{strings} = | Regexp Escape | @{strings} |
"""
if len(patterns) == 0:
return ''
if len(patterns) == 1:
return re.escape(patterns[0])
return [re.escape(p) for p in patterns]
def set_test_message(self, message):
"""Sets message for for the current test.
This is overridden by possible failure message, except when this keyword
is used in test case teardown. In test case teardown this overrides
messages even for failed tests.
This keyword can not be used in suite setup or suite teardown.
"""
if not isinstance(message, unicode):
message = utils.unic(message)
test = self._namespace.test
if not test:
raise RuntimeError("'Set Test Message' keyword cannot be used in "
"suite setup or teardown")
test.message = message
self.log('Set test message to:\n%s' % message)
def set_test_documentation(self, doc):
"""Sets documentation for for the current test.
The current documentation is available from built-in variable
${TEST DOCUMENTATION}. This keyword can not be used in suite
setup or suite teardown.
New in Robot Framework 2.7.
"""
if not isinstance(doc, unicode):
doc = utils.unic(doc)
test = self._namespace.test
if not test:
raise RuntimeError("'Set Test Documentation' keyword cannot be used in "
"suite setup or teardown")
test.doc = doc
self._namespace.variables.set_test('${TEST_DOCUMENTATION}', test.doc)
self.log('Set test documentation to:\n%s' % doc)
def set_suite_documentation(self, doc):
"""Sets documentation for for the current suite.
The current documentation is available from built-in variable
${SUITE DOCUMENTATION}.
New in Robot Framework 2.7.
"""
if not isinstance(doc, unicode):
doc = utils.unic(doc)
suite = self._namespace.suite
suite.doc = doc
self._namespace.variables.set_suite('${SUITE_DOCUMENTATION}', suite.doc)
self.log('Set suite documentation to:\n%s' % doc)
def set_tags(self, *tags):
"""Adds given `tags` for the current test or all tests in a suite.
When this keyword is used inside a test case, that test gets
the specified tags and other tests are not affected.
If this keyword is used in a suite setup, all test cases in
that suite, recursively, gets the given tags. It is a failure
to use this keyword in a suite teardown.
See `Remove Tags` for another keyword to modify tags at test
execution time.
"""
tags = utils.normalize_tags(tags)
handler = lambda test: utils.normalize_tags(test.tags + tags)
self._set_or_remove_tags(handler)
self.log('Set tag%s %s.' % (utils.plural_or_not(tags),
utils.seq2str(tags)))
def remove_tags(self, *tags):
"""Removes given `tags` from the current test or all tests in a suite.
Tags can be given exactly or using a pattern where '*' matches
anything and '?' matches one character.
This keyword can affect either one test case or all test cases in a
test suite similarly as `Set Tags` keyword.
Example:
| Remove Tags | mytag | something-* | ?ython |
"""
tags = TagPatterns(tags)
handler = lambda test: [t for t in test.tags if not tags.match(t)]
self._set_or_remove_tags(handler)
self.log('Removed tag%s %s.' % (utils.plural_or_not(tags),
utils.seq2str(tags)))
def _set_or_remove_tags(self, handler, suite=None, test=None):
if not (suite or test):
ns = self._namespace
if ns.test is None:
if ns.suite.status != 'RUNNING':
raise RuntimeError("'Set Tags' and 'Remove Tags' keywords "
"cannot be used in suite teardown.")
self._set_or_remove_tags(handler, suite=ns.suite)
else:
self._set_or_remove_tags(handler, test=ns.test)
ns.variables.set_test('@{TEST_TAGS}', ns.test.tags)
ns.suite._set_critical_tags(ns.suite.critical)
elif suite:
for sub in suite.suites:
self._set_or_remove_tags(handler, suite=sub)
for test in suite.tests:
self._set_or_remove_tags(handler, test=test)
else:
test.tags = handler(test)
def get_library_instance(self, name):
"""Returns the currently active instance of the specified test library.
This keyword makes it easy for test libraries to interact with
other test libraries that have state. This is illustrated by
the Python example below:
| from robot.libraries.BuiltIn import BuiltIn
|
| def title_should_start_with(expected):
| seleniumlib = BuiltIn().get_library_instance('SeleniumLibrary')
| title = seleniumlib.get_title()
| if not title.startswith(expected):
| raise AssertionError("Title '%s' did not start with '%s'"
| % (title, expected))
It is also possible to use this keyword in the test data and
pass the returned library instance to another keyword. If a
library is imported with a custom name, the `name` used to get
the instance must be that name and not the original library name.
"""
try:
return self._namespace.get_library_instance(name)
except DataError, err:
raise RuntimeError(unicode(err))
class BuiltIn(_Verify, _Converter, _Variables, _RunKeyword, _Misc):
"""An always available standard library with often needed keywords.
`BuiltIn` is Robot Framework's standard library that provides a set
of generic keywords needed often. It is imported automatically and
thus always available. The provided keywords can be used, for example,
for verifications (e.g. `Should Be Equal`, `Should Contain`),
conversions (e.g. `Convert To Integer`) and for various other purposes
(e.g. `Log`, `Sleep`, `Run Keyword If`, `Set Global Variable`).
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = get_version()
@property
def _execution_context(self):
return EXECUTION_CONTEXTS.current
@property
def _namespace(self):
return self._execution_context.namespace
@property
def _variables(self):
return self._namespace.variables
def _matches(self, string, pattern):
# Must use this instead of fnmatch when string may contain newlines.
return utils.matches(string, pattern, caseless=False, spaceless=False)
def _is_true(self, condition):
if isinstance(condition, basestring):
try:
condition = eval(condition)
except:
raise RuntimeError("Evaluating condition '%s' failed: %s"
% (condition, utils.get_error_message()))
return bool(condition)
def register_run_keyword(library, keyword, args_to_process=None):
"""Registers 'run keyword' so that its arguments can be handled correctly.
1) Why is this method needed
Keywords running other keywords internally (normally using `Run Keyword`
or some variants of it in BuiltIn) must have the arguments meant to the
internally executed keyword handled specially to prevent processing them
twice. This is done ONLY for keywords registered using this method.
If the register keyword has same name as any keyword from Robot Framework
standard libraries, it can be used without getting warnings. Normally
there is a warning in such cases unless the keyword is used in long
format (e.g. MyLib.Keyword).
Starting from Robot Framework 2.5.2, keywords executed by registered run
keywords can be tested with dryrun runmode with following limitations:
- Registered keyword must have 'name' argument which takes keyword's name or
Registered keyword must have '*names' argument which takes keywords' names
- Keyword name does not contain variables
2) How to use this method
`library` is the name of the library where the registered keyword is
implemented.
`keyword` can be either a function or method implementing the
keyword, or name of the implemented keyword as a string.
`args_to_process` is needed when `keyword` is given as a string, and it
defines how many of the arguments to the registered keyword must be
processed normally. When `keyword` is a method or function, this
information is got directly from it so that varargs (those specified with
syntax '*args') are not processed but others are.
3) Examples
from robot.libraries.BuiltIn import BuiltIn, register_run_keyword
def my_run_keyword(name, *args):
# do something
return BuiltIn().run_keyword(name, *args)
# Either one of these works
register_run_keyword(__name__, my_run_keyword)
register_run_keyword(__name__, 'My Run Keyword', 1)
-------------
from robot.libraries.BuiltIn import BuiltIn, register_run_keyword
class MyLibrary:
def my_run_keyword_if(self, expression, name, *args):
# do something
return BuiltIn().run_keyword_if(expression, name, *args)
# Either one of these works
register_run_keyword('MyLibrary', MyLibrary.my_run_keyword_if)
register_run_keyword('MyLibrary', 'my_run_keyword_if', 2)
"""
RUN_KW_REGISTER.register_run_keyword(library, keyword, args_to_process)
for name in [attr for attr in dir(_RunKeyword) if not attr.startswith('_')]:
register_run_keyword('BuiltIn', getattr(_RunKeyword, name))
for name in ['set_test_variable', 'set_suite_variable', 'set_global_variable',
'variable_should_exist', 'variable_should_not_exist', 'comment',
'get_variable_value']:
register_run_keyword('BuiltIn', name, 0)
del name, attr
| apache-2.0 | -6,076,869,262,917,258,000 | 41.971555 | 94 | 0.605439 | false |
wintermind/pypedal | PyPedal/examples/sets/sets.py | 1 | 1026 | from PyPedal import *
import copy
def main():
options1 = {
'pedname': 'Fake Pedigree 1',
'messages': 'verbose',
'renumber': 1,
'pedfile': 'set1.ped',
'pedformat': 'asd',
'debug_messages': True,
}
options2 = copy.copy(options1)
options2['pedname'] = 'Fake Pedigree 2'
options2['pedfile'] = 'set2.ped'
set1 = pyp_newclasses.loadPedigree(options1, debugLoad=True)
print 'Animals in set1.ped:'
print set1.idmap.keys()
set2 = pyp_newclasses.loadPedigree(options2, debugLoad=True)
print 'Animals in set2.ped:'
print set2.idmap.keys()
print 'Testing the "+" operator...'
added = set1 + set2
print added.idmap.keys()
print '='*80
options3 = copy.copy(options1)
options3['pedname'] = 'Fake Pedigree 3'
options3['pedfile'] = 'set3.ped'
set3 = pyp_newclasses.loadPedigree(options3, debugLoad=True)
print 'Animals in set3.ped:'
print set3.idmap.keys()
print 'Testing the "+" operator...'
added2 = set1 + set3
print added2.idmap.keys()
if __name__ == '__main__':
main()
| gpl-2.0 | 8,673,712,461,370,547,000 | 21.8 | 68 | 0.659844 | false |
lotcom/automateBoringstuffPython | Chapter13PracBruteForcePDF.py | 1 | 1031 | #! /usr/bin/env python3
# Chapter 13 Practice Brute-Force PDF Password Breaker
# USAGE: Change the pdfFile varible below and run the script to try 44,000 English words
# from the dictionary.txt file to decrypt the encrypted PDF.
import PyPDF2
pdfFile = open('bruteForce.pdf', 'rb') #Change this file name and location
pdfReader = PyPDF2.PdfFileReader(pdfFile)
dictionaryFile = open('dictionary.txt')
passwordList = dictionaryFile.readlines()
for word in range(len(passwordList)):
passWord = passwordList[word].strip()
passWorkedUpper = pdfReader.decrypt(passWord.upper())
if passWorkedUpper == 1:
print('The password is: ' + passWord.upper())
break
else:
print(passWord.upper() + ' did NOT work...')
passWorkedLower = pdfReader.decrypt(passWord.lower())
if passWorkedLower == 1:
print('The password is: ' + passWord.lower())
break
else:
print(passWord.lower() + ' did NOT work...')
dictionaryFile.close()
pdfFile.close()
| cc0-1.0 | -7,848,625,733,136,509,000 | 31.21875 | 88 | 0.675073 | false |
MaxTyutyunnikov/lino | obsolete/tests/27.py | 1 | 3813 | # coding: latin1
## Copyright 2005 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import codecs
from lino.adamo.dbds.sqlite_dbd import sqlite
#import pysqlite2.dbapi2 as sqlite
from unittest import TestCase, main
filename=os.path.join(os.path.dirname(__file__),"27b.sql")
class Case(TestCase):
def test01(self):
conn = sqlite.connect(':memory:')
csr = conn.cursor()
f=codecs.open(filename,encoding="cp1252")
sql=""
lengths=[]
inserts=0
for ln in f:
ln=ln.strip()
if not ln.startswith('#'):
if ln.endswith(";"):
sql += ln[:-1]
csr.execute(sql)
#conn.commit()
#print sql
#print
if sql.startswith("SELECT "):
# use the cursor up to avoid work around
# pysqlite bug
#for t in csr:
# print t
lengths.append(len(csr.fetchall()))
#print "--> %d rows" % len(csr.fetchall())
elif sql.startswith("INSERT "):
inserts+=1
csr.close()
#else:
# conn.commit()
# print "(conn.commit())"
sql=""
else:
sql+=ln
conn.close()
#print lengths
#print "%d INSERT statements" % inserts
## self.assertEqual(lengths,
## [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
## 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
## 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 7])
self.assertEqual(
lengths,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 15, 1, 5]
)
self.assertEqual(inserts,5191)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,525,608,813,913,019,000 | 33.981651 | 73 | 0.419093 | false |
GregDMeyer/dynamite | dynamite/subspaces.py | 1 | 10964 | '''
Classes that define the various subspaces on which operators can be defined.
The methods generally are just an interface to the backend, so that there is only
one implementation of each of the functions.
'''
import numpy as np
from copy import deepcopy
from zlib import crc32
from . import validate, states
from ._backend import bsubspace
class Subspace:
'''
Base subspace class.
'''
def __init__(self):
self._L = None
self._chksum = None
def __eq__(self, s):
'''
Returns true if the two subspaces correspond to the same mapping, even if they
are different classes.
'''
if not isinstance(s, Subspace):
raise ValueError('Cannot compare Subspace to non-Subspace type')
if self.get_dimension() != s.get_dimension():
return False
return self.get_checksum() == s.get_checksum()
@property
def L(self):
'''
The spin chain length corresponding to this space.
'''
return self._L
def check_L(self, value):
# by default, any L that passes our normal validation checks works
return value
@L.setter
def L(self, value):
# check that this value of L is compatible with the subspace
value = validate.L(value)
value = self.check_L(value)
if value != self._L:
self._chksum = None
self._L = value
def get_dimension(self):
"""
Get the dimension of the subspace.
"""
raise NotImplementedError()
@classmethod
def _numeric_to_array(cls, x):
'''
Convert numeric values of any type to the type expected by the backend
functions.
'''
x = np.array(x, copy = False, dtype = bsubspace.dnm_int_t).reshape((-1,))
return np.ascontiguousarray(x)
def idx_to_state(self, idx):
"""
Maps an index to an integer that in binary corresponds to the spin configuration.
Vectorized implementation allows passing a numpy array of indices as idx.
"""
raise NotImplementedError()
def state_to_idx(self, state):
"""
The inverse mapping of :meth:`idx_to_state`.
"""
raise NotImplementedError()
def copy(self):
return deepcopy(self)
def get_checksum(self):
'''
Get a checksum of the state mapping for this subspace. This allows subspaces to
be compared quickly.
'''
if self._chksum is None:
BLOCK = 2**14
chksum = 0
for start in range(0, self.get_dimension(), BLOCK):
stop = min(start+BLOCK, self.get_dimension())
smap = self.idx_to_state(np.arange(start, stop))
chksum = crc32(smap, chksum)
self._chksum = chksum
return self._chksum
def __hash__(self):
return self.get_checksum()
def get_cdata(self):
'''
Returns an object containing the subspace data accessible by the backend C.
'''
raise NotImplementedError()
def to_enum(self):
'''
Convert the class types used in the Python frontend to the enum values
used in the C backend.
'''
raise NotImplementedError()
class Full(Subspace):
def __init__(self):
Subspace.__init__(self)
# Full is a special case
def __eq__(self, s):
if isinstance(s, Full):
return s.L == self.L
return Subspace.__eq__(self, s)
# overriding __eq__ causes this to get unset. :(
__hash__ = Subspace.__hash__
def get_dimension(self):
"""
Get the dimension of the subspace.
"""
return self._get_dimension(self.L)
@classmethod
def _get_dimension(cls, L):
return bsubspace.get_dimension_Full(cls._get_cdata(L))
def idx_to_state(self, idx):
"""
Maps an index to an integer that in binary corresponds to the spin configuration.
Vectorized implementation allows passing a numpy array of indices as idx.
"""
return self._idx_to_state(idx, self.L)
def state_to_idx(self, state):
"""
The inverse mapping of :meth:`idx_to_state`.
"""
return self._state_to_idx(state, self.L)
@classmethod
def _idx_to_state(cls, idx, L):
idx = cls._numeric_to_array(idx)
return bsubspace.idx_to_state_Full(idx, cls._get_cdata(L))
@classmethod
def _state_to_idx(cls, state, L):
state = cls._numeric_to_array(state)
return bsubspace.state_to_idx_Full(state, cls._get_cdata(L))
def get_cdata(self):
'''
Returns an object containing the subspace data accessible by the C backend.
'''
return self._get_cdata(self.L)
@classmethod
def _get_cdata(cls, L):
return bsubspace.CFull(L)
def to_enum(self):
'''
Convert the class types used in the Python frontend to the enum values
used in the C backend.
'''
return bsubspace.SubspaceType.FULL
class Parity(Subspace):
'''
The subspaces of states in which the number of up spins is even or odd.
Parameters
----------
space : int
Either 0 or 'even' for the even subspace, or 1 or 'odd' for the other.
'''
def __init__(self, space):
Subspace.__init__(self)
self._space = self._check_space(space)
@property
def space(self):
return self._space
@classmethod
def _check_space(cls, value):
if value in [0,'even']:
return 0
elif value in [1,'odd']:
return 1
else:
raise ValueError('Invalid parity space "'+str(value)+'" '
'(valid choices are 0, 1, "even", or "odd")')
def get_dimension(self):
"""
Get the dimension of the subspace.
"""
return self._get_dimension(self.L, self.space)
@classmethod
def _get_dimension(cls, L, space):
return bsubspace.get_dimension_Parity(cls._get_cdata(L, space))
def idx_to_state(self, idx):
"""
Maps an index to an integer that in binary corresponds to the spin configuration.
Vectorized implementation allows passing a numpy array of indices as idx.
"""
idx = self._numeric_to_array(idx)
return self._idx_to_state(idx, self.L, self.space)
def state_to_idx(self, state):
"""
The inverse mapping of :meth:`idx_to_state`.
"""
state = self._numeric_to_array(state)
return self._state_to_idx(state, self.L, self.space)
@classmethod
def _idx_to_state(cls, idx, L, space):
return bsubspace.idx_to_state_Parity(idx, cls._get_cdata(L, space))
@classmethod
def _state_to_idx(cls, state, L, space):
return bsubspace.state_to_idx_Parity(state, cls._get_cdata(L, space))
def get_cdata(self):
'''
Returns an object containing the subspace data accessible by the C backend.
'''
return self._get_cdata(self.L, self.space)
@classmethod
def _get_cdata(cls, L, space):
return bsubspace.CParity(L, space)
def to_enum(self):
'''
Convert the class types used in the Python frontend to the enum values
used in the C backend.
'''
return bsubspace.SubspaceType.PARITY
class Auto(Subspace):
'''
Automatically generate a mapping that takes advantage of any possible spin conservation
law, by performing a breadth-first search of the graph of possible states using the operator
as an adjacency matrix. The subspace is defined by providing a "start" state; the returned
subspace will be whatever subspace contains that state.
Currently the actual computation of the ordering only can occur on process 0, limiting
the scalability of this subspace.
Parameters
----------
H : dynamite.operators.Operator
The operator for which this custom subspace will be defined.
state : int or string
An integer whose binary representation corresponds to the spin configuration of the "start"
state mentioned above, or string representing the same. See
:meth:`dynamite.states.State.str_to_state` for more information.
size_guess : int
A guess for the dimension of the subspace. By default, memory is allocated for the full
space, and then trimmed off if not used.
sort : bool
Whether to reorder the mapping after computing it. In some cases this may
cause a speedup.
'''
def __init__(self, H, state, size_guess=None, sort=True):
Subspace.__init__(self)
self._L = H.get_length()
self.state = states.State.str_to_state(state, self.L)
if size_guess is None:
size_guess = 2**H.get_length()
self.state_map = np.ndarray((size_guess,), dtype=bsubspace.dnm_int_t)
H.reduce_msc()
dim = bsubspace.compute_rcm(H.msc['masks'], H.msc['signs'], H.msc['coeffs'],
self.state_map, self.state, H.get_length())
self.state_map = self.state_map[:dim]
self.rmap_indices = np.argsort(self.state_map).astype(bsubspace.dnm_int_t, copy=False)
self.rmap_states = self.state_map[self.rmap_indices]
if sort:
self.state_map = self.rmap_states
self.rmap_indices = np.arange(self.state_map.size, dtype=bsubspace.dnm_int_t)
def check_L(self, value):
if value != self.L:
raise TypeError('Cannot change L for Auto subspace type.')
return value
def get_dimension(self):
"""
Get the dimension of the subspace.
"""
return bsubspace.get_dimension_Auto(self.get_cdata())
def idx_to_state(self, idx):
"""
Maps an index to an integer that in binary corresponds to the spin configuration.
Vectorized implementation allows passing a numpy array of indices as idx.
"""
idx = self._numeric_to_array(idx)
return bsubspace.idx_to_state_Auto(idx, self.get_cdata())
def state_to_idx(self, state):
"""
The inverse mapping of :meth:`idx_to_state`.
"""
state = self._numeric_to_array(state)
return bsubspace.state_to_idx_Auto(state, self.get_cdata())
def get_cdata(self):
'''
Returns an object containing the subspace data accessible by the C backend.
'''
return bsubspace.CAuto(
self.L,
np.ascontiguousarray(self.state_map),
np.ascontiguousarray(self.rmap_indices),
np.ascontiguousarray(self.rmap_states)
)
def to_enum(self):
'''
Convert the class types used in the Python frontend to the enum values
used in the C backend.
'''
return bsubspace.SubspaceType.AUTO
| mit | -2,831,508,411,975,684,600 | 29.287293 | 99 | 0.598231 | false |
ccpgames/eve-metrics | web2py/applications/welcome/languages/hu.py | 1 | 6444 | # coding: utf8
{
'!langcode!': 'hu',
'!langname!': 'Magyar',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%s %%{row} deleted': '%s sorok törlődtek',
'%s %%{row} updated': '%s sorok frissítődtek',
'%s selected': '%s kiválasztott',
'%Y-%m-%d': '%Y.%m.%d.',
'%Y-%m-%d %H:%M:%S': '%Y.%m.%d. %H:%M:%S',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'az adminisztrációs felületért kattints ide',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': 'az appadmin a biztonságtalan csatorna miatt letiltva',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available Databases and Tables': 'Elérhető adatbázisok és táblák',
'Buy this book': 'Buy this book',
'cache': 'gyorsítótár',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Nem lehet üres',
'change password': 'jelszó megváltoztatása',
'Check to delete': 'Törléshez válaszd ki',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Jelenlegi lekérdezés',
'Current response': 'Jelenlegi válasz',
'Current session': 'Jelenlegi folyamat',
'customize me!': 'változtass meg!',
'data uploaded': 'adat feltöltve',
'Database': 'adatbázis',
'Database %s select': 'adatbázis %s kiválasztás',
'db': 'db',
'DB Model': 'DB Model',
'Delete:': 'Töröl:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Description',
'design': 'design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'kész!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Szerkeszt',
'Edit current record': 'Aktuális bejegyzés szerkesztése',
'edit profile': 'profil szerkesztése',
'Edit This App': 'Alkalmazást szerkeszt',
'Email and SMS': 'Email and SMS',
'Errors': 'Errors',
'export as csv file': 'exportál csv fájlba',
'FAQ': 'FAQ',
'First name': 'First name',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group ID': 'Group ID',
'Groups': 'Groups',
'Hello World': 'Hello Világ',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Import/Export',
'Index': 'Index',
'insert new': 'új beillesztése',
'insert new %s': 'új beillesztése %s',
'Internal State': 'Internal State',
'Introduction': 'Introduction',
'Invalid email': 'Invalid email',
'Invalid Query': 'Hibás lekérdezés',
'invalid request': 'hibás kérés',
'Key': 'Key',
'Last name': 'Last name',
'Layout': 'Szerkezet',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'login': 'belép',
'logout': 'kilép',
'lost password': 'elveszett jelszó',
'Lost Password': 'Lost Password',
'Main Menu': 'Főmenü',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menü model',
'My Sites': 'My Sites',
'Name': 'Name',
'New Record': 'Új bejegyzés',
'new record inserted': 'új bejegyzés felvéve',
'next 100 rows': 'következő 100 sor',
'No databases in this application': 'Nincs adatbázis ebben az alkalmazásban',
'Online examples': 'online példákért kattints ide',
'or import from csv file': 'vagy betöltés csv fájlból',
'Origin': 'Origin',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': 'előző 100 sor',
'Python': 'Python',
'Query:': 'Lekérdezés:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'bejegyzés',
'record does not exist': 'bejegyzés nem létezik',
'Record ID': 'Record ID',
'Record id': 'bejegyzés id',
'Register': 'Register',
'register': 'regisztráció',
'Registration key': 'Registration key',
'Reset Password key': 'Reset Password key',
'Role': 'Role',
'Rows in Table': 'Sorok a táblában',
'Rows selected': 'Kiválasztott sorok',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'állapot',
'Statistics': 'Statistics',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'Biztos törli ezt az objektumot?',
'Table': 'tábla',
'Table name': 'Table name',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'unable to parse csv file': 'nem lehet a csv fájlt beolvasni',
'Update:': 'Frissít:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User ID': 'User ID',
'Videos': 'Videos',
'View': 'Nézet',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'Isten hozott a web2py-ban',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| mit | 480,198,011,037,141,200 | 37.179012 | 281 | 0.661415 | false |
shincling/MemNN_and_Varieties | DataCoupus/list_document/timelist_answer.py | 1 | 1930 | # -*- coding: utf8 -*-
__author__ = 'shin'
import jieba
timelist_answer=[]
timelist_answer.append('明天')
timelist_answer.append('明天')
timelist_answer.append('明天')
timelist_answer.append('明天')
timelist_answer.append('明天')
timelist_answer.append('明天')
timelist_answer.append('明天。')
timelist_answer.append('明天。')
timelist_answer.append('明天。')
timelist_answer.append('明天。')
timelist_answer.append('明天。')
timelist_answer.append('时间是明天。')
timelist_answer.append('帮我预订明天的机票。')
timelist_answer.append('出行时间是明天。')
timelist_answer.append('订明天的机票。')
timelist_answer.append('明天走。')
timelist_answer.append('明天出发。')
timelist_answer.append('明天之前。')
timelist_answer.append('在明天出发就行。')
timelist_answer.append('需要明天出发。')
timelist_answer.append('我要订明天的飞机。')
timelist_answer.append('订购明天的机票。')
timelist_answer.append('出行时间应该是明天。')
timelist_answer.append('于明天出行。')
timelist_answer.append('在明天走。')
timelist_answer.append('明天出发')
timelist_answer.append('明天走')
timelist_answer.append('出发时间明天')
timelist_answer.append('时间明天')
timelist_answer.append('我打算明天出发')
timelist_answer.append('我想明天出发')
timelist_answer.append('明天出发的票')
timelist_answer.append('明天出发的机票')
timelist_answer.append('明天走的票')
timelist_answer.append('明天走的机票')
timelist_answer.append('明天的机票')
timelist_answer.append('明天的票')
timelist_answer_cut=[]
for ans in timelist_answer:
w_sent=''
sent=jieba._lcut(ans)
for word in (sent):
w_sent +=' '
w_sent +=word
w_sent += '\n'
w_sent=w_sent.replace('明天'.decode('utf8'),'[slot_time]')
timelist_answer_cut.append(w_sent)
pass | bsd-3-clause | 2,609,520,684,569,583,000 | 25.288136 | 60 | 0.72 | false |
nickmarton/Vivid | vivid/classes/test_classes/test_AttributeInterpretation.py | 1 | 17074 | """Attribute Interpretation unit tests."""
import pytest
from vivid.classes.attribute import Attribute
from vivid.classes.relation import Relation
from vivid.classes.attribute_structure import AttributeStructure
from vivid.classes.attribute_system import AttributeSystem
from vivid.classes.relation_symbol import RelationSymbol
from vivid.classes.vocabulary import Vocabulary
from vivid.classes.attribute_interpretation import AttributeInterpretation
def test___init__():
"""Test AttributeInterpretation constructor."""
def test_TypeError(vocabulary, attribute_structure, mapping, profiles):
"""Test TypeError catching in AttributeInterpretation constructor."""
with pytest.raises(TypeError) as excinfo:
AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
def test_ValueError(vocabulary, attribute_structure, mapping, profiles):
"""Test ValueError catching in AttributeInterpretation constructor."""
with pytest.raises(ValueError) as excinfo:
AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
a = Attribute('hour', ['0,...,23'])
a2 = Attribute('minute', ['0,...,59'])
r_ahead = Relation('R1(h1,m1,h2,m2) <=> h1 > h2 or (h1 = h2 and m1 > m2)',
['hour', 'minute', 'hour', 'minute'], 1)
r_behind = Relation('R2(h1,m1,h2,m2) <=> h1 < h2 or (h1 = h2 and m1 < m2)',
['hour', 'minute', 'hour', 'minute'], 2)
r_pm = Relation('R3(h1) <=> h1 > 12', ['hour'], 3)
r_am = Relation('R4(h1) <=> h1 < 12', ['hour'], 4)
attribute_structure = AttributeStructure(
a, a2, r_ahead, r_behind, r_pm, r_am)
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(
['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
profiles = [
[ahead_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]]
bad_profiles = [
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]]
mapping = {ahead_rs: 1, behind_rs: 2, pm_rs: 3}
bad_source_mapping = {ahead_rs: 1, "behind_rs": 2, pm_rs: 3}
bad_target_mapping = {ahead_rs: 1, behind_rs: 'R2', pm_rs: 3}
bad_target_mapping2 = {ahead_rs: 1, behind_rs: 2.0, pm_rs: 3}
dup_subscr_mapping = {ahead_rs: 2, behind_rs: 2, pm_rs: 3}
ai = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
test_TypeError(None, attribute_structure, mapping, profiles)
test_TypeError(object, attribute_structure, mapping, profiles)
test_TypeError(vocabulary, None, mapping, profiles)
test_TypeError(vocabulary, object, mapping, profiles)
test_TypeError(vocabulary, AttributeSystem(attribute_structure, ['o']),
mapping, profiles)
test_TypeError(vocabulary, attribute_structure, None, profiles)
test_TypeError(vocabulary, attribute_structure, object, profiles)
test_TypeError(vocabulary, attribute_structure, mapping, None)
test_TypeError(vocabulary, attribute_structure, mapping, object)
test_ValueError(
vocabulary, attribute_structure, bad_source_mapping, profiles)
test_ValueError(
vocabulary, attribute_structure, bad_target_mapping, profiles)
test_ValueError(
vocabulary, attribute_structure, bad_target_mapping2, profiles)
test_ValueError(vocabulary, attribute_structure, mapping, bad_profiles)
test_ValueError(
vocabulary, attribute_structure, dup_subscr_mapping, profiles)
bad_mapping = {RelationSymbol("not in Vocabulary or profiles", 2): 1,
behind_rs: 2, pm_rs: 3}
test_ValueError(vocabulary, attribute_structure, bad_mapping, profiles)
bad_vocabulary = Vocabulary(['C1', 'C2'],
[ahead_rs, behind_rs, pm_rs,
RelationSymbol("not in source/profiles", 2)],
['V1', 'V2'])
test_ValueError(bad_vocabulary, attribute_structure, mapping, profiles)
bad_target_mapping = {ahead_rs: 1, behind_rs: 6, pm_rs: 3}
test_ValueError(
vocabulary, attribute_structure, bad_target_mapping, profiles)
bad_profiles = [
[ahead_rs, ('doesn\'t exist', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]]
test_ValueError(vocabulary, attribute_structure, mapping, bad_profiles)
bad_profiles = [
[ahead_rs, ('hour', 10), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]]
test_ValueError(vocabulary, attribute_structure, mapping, bad_profiles)
AI = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
assert AI._attribute_structure == attribute_structure
assert AI._attribute_structure is not attribute_structure
assert AI._vocabulary == vocabulary
assert AI._vocabulary is vocabulary
AI._vocabulary.add_constant('cx')
AI._vocabulary.add_variable('vx')
assert 'cx' in vocabulary._C
assert 'vx' in vocabulary._V
vocabulary.add_constant('cx2')
vocabulary.add_variable('vx2')
assert 'cx2' in AI._vocabulary._C
assert 'vx2' in AI._vocabulary._V
def test___eq__():
"""Test == operator for AttributeInterpretation object."""
def test_TypeError(ai1, ai2):
"""Test TypeError in == operator for AttributeInterpretation."""
with pytest.raises(TypeError) as excinfo:
ai1 == ai2
a = Attribute('hour', ['0,...,23'])
a2 = Attribute('minute', ['0,...,59'])
r_ahead = Relation('R1(h1,m1,h2,m2) <=> h1 > h2 or (h1 = h2 and m1 > m2)',
['hour', 'minute', 'hour', 'minute'], 1)
r_behind = Relation('R2(h1,m1,h2,m2) <=> h1 < h2 or (h1 = h2 and m1 < m2)',
['hour', 'minute', 'hour', 'minute'], 2)
r_pm = Relation('R3(h1) <=> h1 > 12', ['hour'], 3)
r_am = Relation('R4(h1) <=> h1 < 12', ['hour'], 4)
attribute_structure = AttributeStructure(
a, a2, r_ahead, r_behind, r_pm, r_am)
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(
['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
profiles = [
[ahead_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]
]
profiles2 = [
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)],
[ahead_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)]
]
profiles3 = [
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[ahead_rs, ('hour', 1)],
[pm_rs, ('hour', 1), ('minute', 1), ('hour', 1), ('minute', 1)]
]
profiles4 = [
[ahead_rs, ('hour', 2), ('minute', 2), ('hour', 1), ('minute', 1)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]
]
profiles5 = [
[ahead_rs, ('minute', 1), ('hour', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]
]
mapping = {ahead_rs: 1, behind_rs: 2, pm_rs: 3}
ai1 = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
ai2 = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles2)
ai3 = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles3)
ai4 = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles4)
ai5 = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles5)
assert ai1 == ai1
assert ai1 == ai2
assert not ai1 == ai3
assert not ai1 == ai4
assert not ai1 == ai5
def test___ne__():
"""Test != operator for AttributeInterpretation object."""
def test_TypeError(ai1, ai2):
"""Test TypeError in != operator for AttributeInterpretation."""
with pytest.raises(TypeError) as excinfo:
ai1 == ai2
a = Attribute('hour', ['0,...,23'])
a2 = Attribute('minute', ['0,...,59'])
r_ahead = Relation('R1(h1,m1,h2,m2) <=> h1 > h2 or (h1 = h2 and m1 > m2)',
['hour', 'minute', 'hour', 'minute'], 1)
r_behind = Relation('R2(h1,m1,h2,m2) <=> h1 < h2 or (h1 = h2 and m1 < m2)',
['hour', 'minute', 'hour', 'minute'], 2)
r_pm = Relation('R3(h1) <=> h1 > 12', ['hour'], 3)
r_am = Relation('R4(h1) <=> h1 < 12', ['hour'], 4)
attribute_structure = AttributeStructure(
a, a2, r_ahead, r_behind, r_pm, r_am)
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(
['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
profiles = [
[ahead_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]
]
profiles2 = [
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)],
[ahead_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)]
]
profiles3 = [
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[ahead_rs, ('hour', 1)],
[pm_rs, ('hour', 1), ('minute', 1), ('hour', 1), ('minute', 1)]
]
profiles4 = [
[ahead_rs, ('hour', 2), ('minute', 2), ('hour', 1), ('minute', 1)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]
]
profiles5 = [
[ahead_rs, ('minute', 1), ('hour', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]
]
mapping = {ahead_rs: 1, behind_rs: 2, pm_rs: 3}
ai1 = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
ai2 = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles2)
ai3 = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles3)
ai4 = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles4)
ai5 = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles5)
assert not ai1 != ai1
assert not ai1 != ai2
assert ai1 != ai3
assert ai1 != ai4
assert ai1 != ai5
def test___deepcopy__():
"""Test copy.deepcopy for AttributeInterpretation object."""
a = Attribute('hour', ['0,...,23'])
a2 = Attribute('minute', ['0,...,59'])
r_ahead = Relation('R1(h1,m1,h2,m2) <=> h1 > h2 or (h1 = h2 and m1 > m2)',
['hour', 'minute', 'hour', 'minute'], 1)
r_behind = Relation('R2(h1,m1,h2,m2) <=> h1 < h2 or (h1 = h2 and m1 < m2)',
['hour', 'minute', 'hour', 'minute'], 2)
r_pm = Relation('R3(h1) <=> h1 > 12', ['hour'], 3)
r_am = Relation('R4(h1) <=> h1 < 12', ['hour'], 4)
attribute_structure = AttributeStructure(
a, a2, r_ahead, r_behind, r_pm, r_am)
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(
['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
profiles = [
[ahead_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]]
mapping = {ahead_rs: 1, behind_rs: 2, pm_rs: 3}
ai = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
from copy import deepcopy
ai_copy = deepcopy(ai)
assert ai == ai_copy
assert ai is not ai_copy
assert ai._vocabulary is ai_copy._vocabulary
assert ai._attribute_structure is not ai_copy._attribute_structure
assert ai._mapping is not ai_copy._mapping
assert ai._profiles is not ai_copy._profiles
assert ai._table is not ai_copy._table
assert ai._relation_symbols is not ai_copy._relation_symbols
def test___iter__():
"""Test AttributeInterpretation iterator."""
a = Attribute('hour', ['0,...,23'])
a2 = Attribute('minute', ['0,...,59'])
r_ahead = Relation('R1(h1,m1,h2,m2) <=> h1 > h2 or (h1 = h2 and m1 > m2)',
['hour', 'minute', 'hour', 'minute'], 1)
r_behind = Relation('R2(h1,m1,h2,m2) <=> h1 < h2 or (h1 = h2 and m1 < m2)',
['hour', 'minute', 'hour', 'minute'], 2)
r_pm = Relation('R3(h1) <=> h1 > 12', ['hour'], 3)
r_am = Relation('R4(h1) <=> h1 < 12', ['hour'], 4)
attribute_structure = AttributeStructure(
a, a2, r_ahead, r_behind, r_pm, r_am)
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(
['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
profiles = [
[ahead_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]
]
mapping = {ahead_rs: 1, behind_rs: 2, pm_rs: 3}
ai = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
assert ai._table == [entry for entry in ai]
assert ai._table == [entry for entry in iter(ai)]
def test___str__():
"""Test str(AttributeInterpretation)."""
a = Attribute('hour', ['0,...,23'])
a2 = Attribute('minute', ['0,...,59'])
r_ahead = Relation('R1(h1,m1,h2,m2) <=> h1 > h2 or (h1 = h2 and m1 > m2)',
['hour', 'minute', 'hour', 'minute'], 1)
r_behind = Relation('R2(h1,m1,h2,m2) <=> h1 < h2 or (h1 = h2 and m1 < m2)',
['hour', 'minute', 'hour', 'minute'], 2)
r_pm = Relation('R3(h1) <=> h1 > 12', ['hour'], 3)
r_am = Relation('R4(h1) <=> h1 < 12', ['hour'], 4)
attribute_structure = AttributeStructure(
a, a2, r_ahead, r_behind, r_pm, r_am)
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(
['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
profiles = [
[ahead_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]
]
mapping = {ahead_rs: 1, behind_rs: 2, pm_rs: 3}
ai = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
assert str(ai) == "[Ahead, 4, 'R1', [('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)]]\n" + \
"[Behind, 4, 'R2', [('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)]]\n" + \
"[PM, 1, 'R3', [('hour', 1)]]"
def test___repr__():
"""Test repr(AttributeInterpretation)."""
a = Attribute('hour', ['0,...,23'])
a2 = Attribute('minute', ['0,...,59'])
r_ahead = Relation('R1(h1,m1,h2,m2) <=> h1 > h2 or (h1 = h2 and m1 > m2)',
['hour', 'minute', 'hour', 'minute'], 1)
r_behind = Relation('R2(h1,m1,h2,m2) <=> h1 < h2 or (h1 = h2 and m1 < m2)',
['hour', 'minute', 'hour', 'minute'], 2)
r_pm = Relation('R3(h1) <=> h1 > 12', ['hour'], 3)
r_am = Relation('R4(h1) <=> h1 < 12', ['hour'], 4)
attribute_structure = AttributeStructure(
a, a2, r_ahead, r_behind, r_pm, r_am)
ahead_rs = RelationSymbol('Ahead', 4)
behind_rs = RelationSymbol('Behind', 4)
pm_rs = RelationSymbol('PM', 1)
vocabulary = Vocabulary(
['C1', 'C2'], [ahead_rs, behind_rs, pm_rs], ['V1', 'V2'])
profiles = [
[ahead_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[behind_rs, ('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)],
[pm_rs, ('hour', 1)]
]
mapping = {ahead_rs: 1, behind_rs: 2, pm_rs: 3}
ai = AttributeInterpretation(
vocabulary, attribute_structure, mapping, profiles)
assert repr(ai) == "[Ahead, 4, 'R1', [('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)]]\n" + \
"[Behind, 4, 'R2', [('hour', 1), ('minute', 1), ('hour', 2), ('minute', 2)]]\n" + \
"[PM, 1, 'R3', [('hour', 1)]]"
| mit | 8,102,723,487,795,146,000 | 41.262376 | 106 | 0.549608 | false |
endlessm/chromium-browser | third_party/angle/scripts/generate_stats.py | 4 | 33713 | #!/usr/bin/env vpython
#
# [VPYTHON:BEGIN]
# wheel: <
# name: "infra/python/wheels/google-auth-py2_py3"
# version: "version:1.2.1"
# >
#
# wheel: <
# name: "infra/python/wheels/pyasn1-py2_py3"
# version: "version:0.4.5"
# >
#
# wheel: <
# name: "infra/python/wheels/pyasn1_modules-py2_py3"
# version: "version:0.2.4"
# >
#
# wheel: <
# name: "infra/python/wheels/six"
# version: "version:1.10.0"
# >
#
# wheel: <
# name: "infra/python/wheels/cachetools-py2_py3"
# version: "version:2.0.1"
# >
# wheel: <
# name: "infra/python/wheels/rsa-py2_py3"
# version: "version:4.0"
# >
#
# wheel: <
# name: "infra/python/wheels/requests"
# version: "version:2.13.0"
# >
#
# wheel: <
# name: "infra/python/wheels/google-api-python-client-py2_py3"
# version: "version:1.6.2"
# >
#
# wheel: <
# name: "infra/python/wheels/httplib2-py2_py3"
# version: "version:0.12.1"
# >
#
# wheel: <
# name: "infra/python/wheels/oauth2client-py2_py3"
# version: "version:3.0.0"
# >
#
# wheel: <
# name: "infra/python/wheels/uritemplate-py2_py3"
# version: "version:3.0.0"
# >
#
# wheel: <
# name: "infra/python/wheels/google-auth-oauthlib-py2_py3"
# version: "version:0.3.0"
# >
#
# wheel: <
# name: "infra/python/wheels/requests-oauthlib-py2_py3"
# version: "version:1.2.0"
# >
#
# wheel: <
# name: "infra/python/wheels/oauthlib-py2_py3"
# version: "version:3.0.1"
# >
#
# wheel: <
# name: "infra/python/wheels/google-auth-httplib2-py2_py3"
# version: "version:0.0.3"
# >
# [VPYTHON:END]
#
# Copyright 2019 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# generate_deqp_stats.py:
# Checks output of deqp testers and generates stats using the GDocs API
#
# prerequirements:
# https://devsite.googleplex.com/sheets/api/quickstart/python
# Follow the quickstart guide.
#
# usage: generate_deqp_stats.py [-h] [--auth_path [AUTH_PATH]] [--spreadsheet [SPREADSHEET]]
# [--verbosity [VERBOSITY]]
#
# optional arguments:
# -h, --help show this help message and exit
# --auth_path [AUTH_PATH]
# path to directory containing authorization data (credentials.json and
# token.pickle). [default=<home>/.auth]
# --spreadsheet [SPREADSHEET]
# ID of the spreadsheet to write stats to. [default
# ='1D6Yh7dAPP-aYLbX3HHQD8WubJV9XPuxvkKowmn2qhIw']
# --verbosity [VERBOSITY]
# Verbosity of output. Valid options are [DEBUG, INFO, WARNING, ERROR].
# [default=INFO]
import argparse
import datetime
import logging
import os
import pickle
import re
import subprocess
import sys
import urllib
from google.auth.transport.requests import Request
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
####################
# Global Constants #
####################
HOME_DIR = os.path.expanduser('~')
SCRIPT_DIR = sys.path[0]
ROOT_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, '..'))
LOGGER = logging.getLogger('generate_stats')
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
BOT_NAMES = [
'Win10 FYI x64 dEQP Release (NVIDIA)',
'Win10 FYI x64 dEQP Release (Intel HD 630)',
'Win7 FYI dEQP Release (AMD)',
'Win7 FYI x64 dEQP Release (NVIDIA)',
'Mac FYI dEQP Release Intel',
'Mac FYI dEQP Release AMD',
'Linux FYI dEQP Release (Intel HD 630)',
'Linux FYI dEQP Release (NVIDIA)',
'Android FYI dEQP Release (Nexus 5X)',
'Android FYI 32 dEQP Vk Release (Pixel 2)',
'Android FYI 64 dEQP Vk Release (Pixel 2)',
]
BOT_NAME_PREFIX = 'chromium/ci/'
BUILD_LINK_PREFIX = 'https://ci.chromium.org/p/chromium/builders/ci/'
REQUIRED_COLUMNS = ['build_link', 'time', 'date', 'revision', 'angle_revision', 'duplicate']
MAIN_RESULT_COLUMNS = ['Passed', 'Failed', 'Skipped', 'Not Supported', 'Exception', 'Crashed']
INFO_TAG = '*RESULT'
WORKAROUND_FORMATTING_ERROR_STRING = "Still waiting for the following processes to finish:"
######################
# Build Info Parsing #
######################
# Returns a struct with info about the latest successful build given a bot name. Info contains the
# build_name, time, date, angle_revision, and chrome revision.
# Uses: bb ls '<botname>' -n 1 -status success -p
def get_latest_success_build_info(bot_name):
bb = subprocess.Popen(['bb', 'ls', bot_name, '-n', '1', '-status', 'success', '-p'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb ls '" + bot_name + "' -n 1 -status success -p]")
out, err = bb.communicate()
if err:
raise ValueError("Unexpected error from bb ls: '" + err + "'")
if not out:
raise ValueError("Unexpected empty result from bb ls of bot '" + bot_name + "'")
# Example output (line 1):
# ci.chromium.org/b/8915280275579996928 SUCCESS 'chromium/ci/Win10 FYI dEQP Release (NVIDIA)/26877'
# ...
if 'SUCCESS' not in out:
raise ValueError("Unexpected result from bb ls: '" + out + "'")
info = {}
for line in out.splitlines():
# The first line holds the build name
if 'build_name' not in info:
info['build_name'] = line.strip().split("'")[1]
# Remove the bot name and prepend the build link
info['build_link'] = BUILD_LINK_PREFIX + urllib.quote(
info['build_name'].split(BOT_NAME_PREFIX)[1])
if 'Created' in line:
# Example output of line with 'Created':
# ...
# Created today at 12:26:39, waited 2.056319s, started at 12:26:41, ran for 1h16m48.14963s, ended at 13:43:30
# ...
info['time'] = re.findall(r'[0-9]{1,2}:[0-9]{2}:[0-9]{2}', line.split(',', 1)[0])[0]
# Format today's date in US format so Sheets can read it properly
info['date'] = datetime.datetime.now().strftime('%m/%d/%y')
if 'got_angle_revision' in line:
# Example output of line with angle revision:
# ...
# "parent_got_angle_revision": "8cbd321cafa92ffbf0495e6d0aeb9e1a97940fee",
# ...
info['angle_revision'] = filter(str.isalnum, line.split(':')[1])
if '"revision"' in line:
# Example output of line with chromium revision:
# ...
# "revision": "3b68405a27f1f9590f83ae07757589dba862f141",
# ...
info['revision'] = filter(str.isalnum, line.split(':')[1])
if 'build_name' not in info:
raise ValueError("Could not find build_name from bot '" + bot_name + "'")
return info
# Returns a list of step names that we're interested in given a build name. We are interested in
# step names starting with 'angle_'. May raise an exception.
# Uses: bb get '<build_name>' -steps
def get_step_names(build_name):
bb = subprocess.Popen(['bb', 'get', build_name, '-steps'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb get '" + build_name + "' -steps]")
out, err = bb.communicate()
if err:
raise ValueError("Unexpected error from bb get: '" + err + "'")
step_names = []
# Example output (relevant lines to a single step):
# ...
# Step "angle_deqp_egl_vulkan_tests on (nvidia-quadro-p400-win10-stable) GPU on Windows on Windows-10" SUCCESS 4m12s Logs: "stdout", "chromium_swarming.summary", "Merge script log", "Flaky failure: dEQP.EGL/info_version (status CRASH,SUCCESS)", "step_metadata"
# Run on OS: 'Windows-10'<br>Max shard duration: 0:04:07.309848 (shard \#1)<br>Min shard duration: 0:02:26.402128 (shard \#0)<br/>flaky failures [ignored]:<br/>dEQP.EGL/info\_version<br/>
# * [shard #0 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=9a5999a59d332e55f54f495948d0c9f959e60ed2)
# * [shard #0 (128.3 sec)](https://chromium-swarm.appspot.com/user/task/446903ae365b8110)
# * [shard #1 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=d71e1bdd91dee61b536b4057a9222e642bd3809f)
# * [shard #1 (229.3 sec)](https://chromium-swarm.appspot.com/user/task/446903b7b0d90210)
# * [shard #2 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=ac9ba85b1cca77774061b87335c077980e1eef85)
# * [shard #2 (144.5 sec)](https://chromium-swarm.appspot.com/user/task/446903c18e15a010)
# * [shard #3 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=976d586386864abecf53915fbac3e085f672e30f)
# * [shard #3 (138.4 sec)](https://chromium-swarm.appspot.com/user/task/446903cc8da0ad10)
# ...
for line in out.splitlines():
if 'Step "angle_' not in line:
continue
step_names.append(line.split('"')[1])
return step_names
# Performs some heuristic validation of the step_info struct returned from a single step log.
# Returns True if valid, False if invalid. May write to stderr
def validate_step_info(step_info, build_name, step_name):
print_name = "'" + build_name + "': '" + step_name + "'"
if not step_info:
LOGGER.warning('Step info empty for ' + print_name + '\n')
return False
if 'Total' in step_info:
partial_sum_keys = MAIN_RESULT_COLUMNS
partial_sum_values = [int(step_info[key]) for key in partial_sum_keys if key in step_info]
computed_total = sum(partial_sum_values)
if step_info['Total'] != computed_total:
LOGGER.warning('Step info does not sum to total for ' + print_name + ' | Total: ' +
str(step_info['Total']) + ' - Computed total: ' + str(computed_total) +
'\n')
return True
# Returns a struct containing parsed info from a given step log. The info is parsed by looking for
# lines with the following format in stdout:
# '[TESTSTATS]: <key>: <value>''
# May write to stderr
# Uses: bb log '<build_name>' '<step_name>'
def get_step_info(build_name, step_name):
bb = subprocess.Popen(['bb', 'log', build_name, step_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb log '" + build_name + "' '" + step_name + "']")
out, err = bb.communicate()
if err:
LOGGER.warning("Unexpected error from bb log '" + build_name + "' '" + step_name + "': '" +
err + "'")
return None
step_info = {}
# Example output (relevant lines of stdout):
# ...
# *RESULT: Total: 155
# *RESULT: Passed: 11
# *RESULT: Failed: 0
# *RESULT: Skipped: 12
# *RESULT: Not Supported: 132
# *RESULT: Exception: 0
# *RESULT: Crashed: 0
# *RESULT: Unexpected Passed: 12
# ...
append_errors = []
# Hacky workaround to fix issue where messages are dropped into the middle of lines by another
# process:
# eg.
# *RESULT: <start_of_result>Still waiting for the following processes to finish:
# "c:\b\s\w\ir\out\Release\angle_deqp_gles3_tests.exe" --deqp-egl-display-type=angle-vulkan --gtest_flagfile="c:\b\s\w\itlcgdrz\scoped_dir7104_364984996\8ad93729-f679-406d-973b-06b9d1bf32de.tmp" --single-process-tests --test-launcher-batch-limit=400 --test-launcher-output="c:\b\s\w\itlcgdrz\7104_437216092\test_results.xml" --test-launcher-summary-output="c:\b\s\w\iosuk8ai\output.json"
# <end_of_result>
#
# Removes the message and skips the line following it, and then appends the <start_of_result>
# and <end_of_result> back together
workaround_prev_line = ""
workaround_prev_line_count = 0
for line in out.splitlines():
# Skip lines if the workaround still has lines to skip
if workaround_prev_line_count > 0:
workaround_prev_line_count -= 1
continue
# If there are no more lines to skip and there is a previous <start_of_result> to append,
# append it and finish the workaround
elif workaround_prev_line != "":
line = workaround_prev_line + line
workaround_prev_line = ""
workaround_prev_line_count = 0
LOGGER.debug("Formatting error workaround rebuilt line as: '" + line + "'\n")
if INFO_TAG not in line:
continue
# When the workaround string is detected, start the workaround with 1 line to skip and save
# the <start_of_result>, but continue the loop until the workaround is finished
if WORKAROUND_FORMATTING_ERROR_STRING in line:
workaround_prev_line = line.split(WORKAROUND_FORMATTING_ERROR_STRING)[0]
workaround_prev_line_count = 1
continue
found_stat = True
line_columns = line.split(INFO_TAG, 1)[1].split(':')
if len(line_columns) is not 3:
LOGGER.warning("Line improperly formatted: '" + line + "'\n")
continue
key = line_columns[1].strip()
# If the value is clearly an int, sum it. Otherwise, concatenate it as a string
isInt = False
intVal = 0
try:
intVal = int(line_columns[2])
if intVal is not None:
isInt = True
except Exception as error:
isInt = False
if isInt:
if key not in step_info:
step_info[key] = 0
step_info[key] += intVal
else:
if key not in step_info:
step_info[key] = line_columns[2].strip()
else:
append_string = '\n' + line_columns[2].strip()
# Sheets has a limit of 50000 characters per cell, so make sure to stop appending
# below this limit
if len(step_info[key]) + len(append_string) < 50000:
step_info[key] += append_string
else:
if key not in append_errors:
append_errors.append(key)
LOGGER.warning("Too many characters in column '" + key +
"'. Output capped.")
return step_info
# Returns the info for each step run on a given bot_name.
def get_bot_info(bot_name):
info = get_latest_success_build_info(bot_name)
info['step_names'] = get_step_names(info['build_name'])
broken_step_names = []
for step_name in info['step_names']:
LOGGER.info("Parsing step '" + step_name + "'...")
step_info = get_step_info(info['build_name'], step_name)
if validate_step_info(step_info, info['build_name'], step_name):
info[step_name] = step_info
else:
broken_step_names += step_name
for step_name in broken_step_names:
info['step_names'].remove(step_name)
return info
#####################
# Sheets Formatting #
#####################
# Get an individual spreadsheet based on the spreadsheet id. Returns the result of
# spreadsheets.get(), or throws an exception if the sheet could not open.
def get_spreadsheet(service, spreadsheet_id):
LOGGER.debug("Called [spreadsheets.get(spreadsheetId='" + spreadsheet_id + "')]")
request = service.get(spreadsheetId=spreadsheet_id)
spreadsheet = request.execute()
if not spreadsheet:
raise Exception("Did not open spreadsheet '" + spreadsheet_id + "'")
return spreadsheet
# Returns a nicely formatted string based on the bot_name and step_name
def format_sheet_name(bot_name, step_name):
# Some tokens should be ignored for readability in the name
unneccesary_tokens = ['FYI', 'Release', 'Vk', 'dEQP', '(', ')']
for token in unneccesary_tokens:
bot_name = bot_name.replace(token, '')
bot_name = ' '.join(bot_name.strip().split()) # Remove extra spaces
step_name = re.findall(r'angle\w*', step_name)[0] # Separate test name
# Test names are formatted as 'angle_deqp_<frontend>_<backend>_tests'
new_step_name = ''
# Put the frontend first
if '_egl_' in step_name:
step_name = step_name.replace('_egl_', '_')
new_step_name += ' EGL'
if '_gles2_' in step_name:
step_name = step_name.replace('_gles2_', '_')
new_step_name += ' GLES 2.0 '
if '_gles3_' in step_name:
step_name = step_name.replace('_gles3_', '_')
new_step_name += ' GLES 3.0 '
if '_gles31_' in step_name:
step_name = step_name.replace('_gles31_', '_')
new_step_name += ' GLES 3.1 '
# Put the backend second
if '_d3d9_' in step_name:
step_name = step_name.replace('_d3d9_', '_')
new_step_name += ' D3D9 '
if '_d3d11' in step_name:
step_name = step_name.replace('_d3d11_', '_')
new_step_name += ' D3D11 '
if '_gl_' in step_name:
step_name = step_name.replace('_gl_', '_')
new_step_name += ' Desktop OpenGL '
if '_gles_' in step_name:
step_name = step_name.replace('_gles_', '_')
new_step_name += ' OpenGLES '
if '_vulkan_' in step_name:
step_name = step_name.replace('_vulkan_', '_')
new_step_name += ' Vulkan '
# Add any remaining keywords from the step name into the formatted name (formatted nicely)
step_name = step_name.replace('angle_', '_')
step_name = step_name.replace('_deqp_', '_')
step_name = step_name.replace('_tests', '_')
step_name = step_name.replace('_', ' ').strip()
new_step_name += ' ' + step_name
new_step_name = ' '.join(new_step_name.strip().split()) # Remove extra spaces
return new_step_name + ' ' + bot_name
# Returns the full list of sheet names that should be populated based on the info struct
def get_sheet_names(info):
sheet_names = []
for bot_name in info:
for step_name in info[bot_name]['step_names']:
sheet_name = format_sheet_name(bot_name, step_name)
sheet_names.append(sheet_name)
return sheet_names
# Returns True if the sheet is found in the spreadsheets object
def sheet_exists(spreadsheet, step_name):
for sheet in spreadsheet['sheets']:
if sheet['properties']['title'] == step_name:
return True
return False
# Validates the spreadsheets object against the list of sheet names which should appear. Returns a
# list of sheets that need creation.
def validate_sheets(spreadsheet, sheet_names):
create_sheets = []
for sheet_name in sheet_names:
if not sheet_exists(spreadsheet, sheet_name):
create_sheets.append(sheet_name)
return create_sheets
# Performs a batch update with a given service, spreadsheet id, and list <object(Request)> of
# updates to do.
def batch_update(service, spreadsheet_id, updates):
batch_update_request_body = {
'requests': updates,
}
LOGGER.debug("Called [spreadsheets.batchUpdate(spreadsheetId='" + spreadsheet_id + "', body=" +
str(batch_update_request_body) + ')]')
request = service.batchUpdate(spreadsheetId=spreadsheet_id, body=batch_update_request_body)
request.execute()
# Creates sheets given a service and spreadsheed id based on a list of sheet names input
def create_sheets(service, spreadsheet_id, sheet_names):
updates = [{'addSheet': {'properties': {'title': sheet_name,}}} for sheet_name in sheet_names]
batch_update(service, spreadsheet_id, updates)
# Calls a values().batchGet() on the service to find the list of column names from each sheet in
# sheet_names. Returns a dictionary with one list per sheet_name.
def get_headers(service, spreadsheet_id, sheet_names):
header_ranges = [sheet_name + '!A1:Z' for sheet_name in sheet_names]
LOGGER.debug("Called [spreadsheets.values().batchGet(spreadsheetId='" + spreadsheet_id +
', ranges=' + str(header_ranges) + "')]")
request = service.values().batchGet(spreadsheetId=spreadsheet_id, ranges=header_ranges)
response = request.execute()
headers = {}
for k, sheet_name in enumerate(sheet_names):
if 'values' in response['valueRanges'][k]:
# Headers are in the first row of values
headers[sheet_name] = response['valueRanges'][k]['values'][0]
else:
headers[sheet_name] = []
return headers
# Calls values().batchUpdate() with supplied list of data <object(ValueRange)> to update on the
# service.
def batch_update_values(service, spreadsheet_id, data):
batch_update_values_request_body = {
'valueInputOption': 'USER_ENTERED', # Helps with formatting of dates
'data': data,
}
LOGGER.debug("Called [spreadsheets.values().batchUpdate(spreadsheetId='" + spreadsheet_id +
"', body=" + str(batch_update_values_request_body) + ')]')
request = service.values().batchUpdate(
spreadsheetId=spreadsheet_id, body=batch_update_values_request_body)
request.execute()
# Get the sheetId of a sheet based on its name
def get_sheet_id(spreadsheet, sheet_name):
for sheet in spreadsheet['sheets']:
if sheet['properties']['title'] == sheet_name:
return sheet['properties']['sheetId']
return -1
# Update the filters on sheets with a 'duplicate' column. Filter out any duplicate rows
def update_filters(service, spreadsheet_id, headers, info, spreadsheet):
updates = []
for bot_name in info:
for step_name in info[bot_name]['step_names']:
sheet_name = format_sheet_name(bot_name, step_name)
duplicate_found = 'duplicate' in headers[sheet_name]
if duplicate_found:
sheet_id = get_sheet_id(spreadsheet, sheet_name)
if sheet_id > -1:
updates.append({
"setBasicFilter": {
"filter": {
"range": {
"sheetId": sheet_id,
"startColumnIndex": 0,
"endColumnIndex": len(headers[sheet_name])
},
"sortSpecs": [{
"dimensionIndex": headers[sheet_name].index('date'),
"sortOrder": "ASCENDING"
}],
"criteria": {
str(headers[sheet_name].index('duplicate')): {
"hiddenValues":
["1"] # Hide rows when duplicate is 1 (true)
}
}
}
}
})
if updates:
LOGGER.info('Updating sheet filters...')
batch_update(service, spreadsheet_id, updates)
# Populates the headers with any missing/desired rows based on the info struct, and calls
# batch update to update the corresponding sheets if necessary.
def update_headers(service, spreadsheet_id, headers, info):
data = []
sheet_names = []
for bot_name in info:
for step_name in info[bot_name]['step_names']:
if not step_name in info[bot_name]:
LOGGER.error("Missing info for step name: '" + step_name + "'")
sheet_name = format_sheet_name(bot_name, step_name)
headers_stale = False
# Headers should always contain the following columns
for req in REQUIRED_COLUMNS:
if req not in headers[sheet_name]:
headers_stale = True
headers[sheet_name].append(req)
# Headers also must contain all the keys seen in this step
for key in info[bot_name][step_name].keys():
if key not in headers[sheet_name]:
headers_stale = True
headers[sheet_name].append(key)
# Update the Gdoc headers if necessary
if headers_stale:
sheet_names.append(sheet_name)
header_range = sheet_name + '!A1:Z'
data.append({
'range': header_range,
'majorDimension': 'ROWS',
'values': [headers[sheet_name]]
})
if data:
LOGGER.info('Updating sheet headers...')
batch_update_values(service, spreadsheet_id, data)
# Calls values().append() to append a list of values to a given sheet.
def append_values(service, spreadsheet_id, sheet_name, values):
header_range = sheet_name + '!A1:Z'
insert_data_option = 'INSERT_ROWS'
value_input_option = 'USER_ENTERED' # Helps with formatting of dates
append_values_request_body = {
'range': header_range,
'majorDimension': 'ROWS',
'values': [values],
}
LOGGER.debug("Called [spreadsheets.values().append(spreadsheetId='" + spreadsheet_id +
"', body=" + str(append_values_request_body) + ", range='" + header_range +
"', insertDataOption='" + insert_data_option + "', valueInputOption='" +
value_input_option + "')]")
request = service.values().append(
spreadsheetId=spreadsheet_id,
body=append_values_request_body,
range=header_range,
insertDataOption=insert_data_option,
valueInputOption=value_input_option)
request.execute()
# Formula to determine whether a row is a duplicate of the previous row based on checking the
# columns listed in filter_columns.
# Eg.
# date | pass | fail
# Jan 1 100 50
# Jan 2 100 50
# Jan 3 99 51
#
# If we want to filter based on only the "pass" and "fail" columns, we generate the following
# formula in the 'duplicate' column: 'IF(B1=B0, IF(C1=C0,1,0) ,0);
# This formula is recursively generated for each column in filter_columns, using the column
# position as determined by headers. The formula uses a more generalized form with
# 'INDIRECT(ADDRESS(<row>, <col>))'' instead of 'B1', where <row> is Row() and Row()-1, and col is
# determined by the column's position in headers
def generate_duplicate_formula(headers, filter_columns):
# No more columns, put a 1 in the IF statement true branch
if len(filter_columns) == 0:
return '1'
# Next column is found, generate the formula for duplicate checking, and remove from the list
# for recursion
for i in range(len(headers)):
if headers[i] == filter_columns[0]:
col = str(i + 1)
formula = "IF(INDIRECT(ADDRESS(ROW(), " + col + "))=INDIRECT(ADDRESS(ROW() - 1, " + \
col + "))," + generate_duplicate_formula(headers, filter_columns[1:]) + ",0)"
return formula
# Next column not found, remove from recursion but just return whatever the next one is
return generate_duplicate_formula(headers, filter_columns[1:])
# Helper function to start the recursive call to generate_duplicate_formula
def generate_duplicate_formula_helper(headers):
filter_columns = MAIN_RESULT_COLUMNS
formula = generate_duplicate_formula(headers, filter_columns)
if (formula == "1"):
return ""
else:
# Final result needs to be prepended with =
return "=" + formula
# Uses the list of headers and the info struct to come up with a list of values for each step
# from the latest builds.
def update_values(service, spreadsheet_id, headers, info):
data = []
for bot_name in info:
for step_name in info[bot_name]['step_names']:
sheet_name = format_sheet_name(bot_name, step_name)
values = []
# For each key in the list of headers, either add the corresponding value or add a blank
# value. It's necessary for the values to match the order of the headers
for key in headers[sheet_name]:
if key in info[bot_name] and key in REQUIRED_COLUMNS:
values.append(info[bot_name][key])
elif key in info[bot_name][step_name]:
values.append(info[bot_name][step_name][key])
elif key == "duplicate" and key in REQUIRED_COLUMNS:
values.append(generate_duplicate_formula_helper(headers[sheet_name]))
else:
values.append('')
LOGGER.info("Appending new rows to sheet '" + sheet_name + "'...")
try:
append_values(service, spreadsheet_id, sheet_name, values)
except Exception as error:
LOGGER.warning('%s\n' % str(error))
# Updates the given spreadsheed_id with the info struct passed in.
def update_spreadsheet(service, spreadsheet_id, info):
LOGGER.info('Opening spreadsheet...')
spreadsheet = get_spreadsheet(service, spreadsheet_id)
LOGGER.info('Parsing sheet names...')
sheet_names = get_sheet_names(info)
new_sheets = validate_sheets(spreadsheet, sheet_names)
if new_sheets:
LOGGER.info('Creating new sheets...')
create_sheets(service, spreadsheet_id, new_sheets)
LOGGER.info('Parsing sheet headers...')
headers = get_headers(service, spreadsheet_id, sheet_names)
update_headers(service, spreadsheet_id, headers, info)
update_filters(service, spreadsheet_id, headers, info, spreadsheet)
update_values(service, spreadsheet_id, headers, info)
#####################
# Main/helpers #
#####################
# Loads or creates credentials and connects to the Sheets API. Returns a Spreadsheets object with
# an open connection.
def get_sheets_service(auth_path):
credentials_path = auth_path + '/credentials.json'
token_path = auth_path + '/token.pickle'
creds = None
if not os.path.exists(auth_path):
LOGGER.info("Creating auth dir '" + auth_path + "'")
os.makedirs(auth_path)
if not os.path.exists(credentials_path):
raise Exception('Missing credentials.json.\n'
'Go to: https://developers.google.com/sheets/api/quickstart/python\n'
"Under Step 1, click 'ENABLE THE GOOGLE SHEETS API'\n"
"Click 'DOWNLOAD CLIENT CONFIGURATION'\n"
'Save to your auth_path (' + auth_path + ') as credentials.json')
if os.path.exists(token_path):
with open(token_path, 'rb') as token:
creds = pickle.load(token)
LOGGER.info('Loaded credentials from ' + token_path)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
LOGGER.info('Refreshing credentials...')
creds.refresh(Request())
else:
LOGGER.info('Could not find credentials. Requesting new credentials.')
flow = InstalledAppFlow.from_client_secrets_file(credentials_path, SCOPES)
creds = flow.run_local_server()
with open(token_path, 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
sheets = service.spreadsheets()
return sheets
# Parse the input to the script
def parse_args():
parser = argparse.ArgumentParser(os.path.basename(sys.argv[0]))
parser.add_argument(
'--auth_path',
default=HOME_DIR + '/.auth',
nargs='?',
help='path to directory containing authorization data '
'(credentials.json and token.pickle). '
'[default=<home>/.auth]')
parser.add_argument(
'--spreadsheet',
default='1uttk1z8lJ4ZsUY7wMdFauMzUxb048nh5l52zdrAznek',
nargs='?',
help='ID of the spreadsheet to write stats to. '
"[default='1uttk1z8lJ4ZsUY7wMdFauMzUxb048nh5l52zdrAznek']")
parser.add_argument(
'--verbosity',
default='INFO',
nargs='?',
help='Verbosity of output. Valid options are '
'[DEBUG, INFO, WARNING, ERROR]. '
'[default=INFO]')
return parser.parse_args()
# Set up the logging with the right verbosity and output.
def initialize_logging(verbosity):
handler = logging.StreamHandler()
formatter = logging.Formatter(fmt='%(levelname)s: %(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
if 'DEBUG' in verbosity:
LOGGER.setLevel(level=logging.DEBUG)
elif 'INFO' in verbosity:
LOGGER.setLevel(level=logging.INFO)
elif 'WARNING' in verbosity:
LOGGER.setLevel(level=logging.WARNING)
elif 'ERROR' in verbosity:
LOGGER.setLevel(level=logging.ERROR)
else:
LOGGER.setLevel(level=logging.INFO)
def main():
os.chdir(ROOT_DIR)
args = parse_args()
verbosity = args.verbosity.strip().upper()
initialize_logging(verbosity)
auth_path = args.auth_path.replace('\\', '/')
try:
service = get_sheets_service(auth_path)
except Exception as error:
LOGGER.error('%s\n' % str(error))
exit(1)
info = {}
LOGGER.info('Building info struct...')
for bot_name in BOT_NAMES:
LOGGER.info("Parsing bot '" + bot_name + "'...")
try:
info[bot_name] = get_bot_info(BOT_NAME_PREFIX + bot_name)
except Exception as error:
LOGGER.error('%s\n' % str(error))
LOGGER.info('Updating sheets...')
try:
update_spreadsheet(service, args.spreadsheet, info)
except Exception as error:
LOGGER.error('%s\n' % str(error))
quit(1)
LOGGER.info('Info was successfully parsed to sheet: https://docs.google.com/spreadsheets/d/' +
args.spreadsheet)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 4,829,267,775,993,492,000 | 40.213936 | 391 | 0.605671 | false |
datalyze-solutions/pandas-qt | pandasqt/views/CSVDialogs.py | 1 | 23796 | # -*- coding: utf-8 -*-
import os
from encodings.aliases import aliases as _encodings
import pandas
from pandasqt.compat import Qt, QtCore, QtGui, Slot, Signal
from pandasqt.encoding import Detector
from pandasqt.models.DataFrameModel import DataFrameModel
from pandasqt.views.CustomDelegates import DtypeComboDelegate
from pandasqt.views._ui import icons_rc
from pandasqt.utils import fillNoneValues, convertTimestamps
class DelimiterValidator(QtGui.QRegExpValidator):
"""A Custom RegEx Validator.
The validator checks, if the input has a length of 1.
The input may contain any non-whitespace-character
as denoted by the RegEx term `\S`.
"""
def __init__(self, parent=None):
"""Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
"""
super(DelimiterValidator, self).__init__(parent)
re = QtCore.QRegExp('\S{1}')
self.setRegExp(re)
class DelimiterSelectionWidget(QtGui.QGroupBox):
"""A custom widget with different text delimiter signs.
A user can choose between 3 predefined and one user defined
text delimiter characters. Default delimiters include `semicolon`,
`colon` and `tabulator`. The user defined delimiter may only have
a length of 1 and may not include any whitespace character.
Attributes:
delimiter (QtCore.pyqtSignal): This signal is emitted, whenever a
delimiter character is selected by the user.
semicolonRadioButton (QtGui.QRadioButton): A radio button to
select the `semicolon` character as delimiter.
commaRadioButton (QtGui.QRadioButton): A radio button to select
the `comma` character as delimiter.
tabRadioButton (QtGui.QRadioButton): A radio button to select
the `tabulator` character as delimiter.
otherRadioButton (QtGui.QRadioButton): A radio button to select
the given input text as delimiter.
otherSeparatorLineEdit (QtGui.QLineEdit): An input line to let the
user enter one character only, which may be used as delimiter.
"""
delimiter = Signal('QString')
def __init__(self, parent=None):
"""Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
"""
super(DelimiterSelectionWidget, self).__init__(parent)
self.semicolonRadioButton = None
self.commaRadioButton = None
self.tabRadioButton = None
self.otherRadioButton = None
self.otherSeparatorLineEdit = None
self._initUI()
def _initUI(self):
"""Creates the inital layout with all subwidgets.
The layout is a `QHBoxLayout`. Each time a radio button is
selected or unselected, a slot
`DelimiterSelectionWidget._delimiter` is called.
Furthermore the `QLineEdit` widget has a custom regex validator
`DelimiterValidator` enabled.
"""
#layout = QtGui.QHBoxLayout(self)
self.semicolonRadioButton = QtGui.QRadioButton(u'Semicolon')
self.commaRadioButton = QtGui.QRadioButton(u'Comma')
self.tabRadioButton = QtGui.QRadioButton(u'Tab')
self.otherRadioButton = QtGui.QRadioButton(u'Other')
self.semicolonRadioButton.setChecked(True)
self.otherSeparatorLineEdit = QtGui.QLineEdit(self)
self.otherSeparatorLineEdit.setEnabled(False)
self.semicolonRadioButton.toggled.connect(self._delimiter)
self.commaRadioButton.toggled.connect(self._delimiter)
self.tabRadioButton.toggled.connect(self._delimiter)
self.otherRadioButton.toggled.connect(self._enableLine)
self.otherSeparatorLineEdit.textChanged.connect(lambda: self._delimiter(True))
self.otherSeparatorLineEdit.setValidator(DelimiterValidator(self))
currentLayout = self.layout()
# unset and delete the current layout in order to set a new one
if currentLayout is not None:
del currentLayout
layout = QtGui.QHBoxLayout()
layout.addWidget(self.semicolonRadioButton)
layout.addWidget(self.commaRadioButton)
layout.addWidget(self.tabRadioButton)
layout.addWidget(self.otherRadioButton)
layout.addWidget(self.otherSeparatorLineEdit)
self.setLayout(layout)
@Slot('QBool')
def _enableLine(self, toggled):
self.otherSeparatorLineEdit.setEnabled(toggled)
def currentSelected(self):
"""Returns the currently selected delimiter character.
Returns:
str: One of `,`, `;`, `\t`, `*other*`.
"""
if self.commaRadioButton.isChecked():
return ','
elif self.semicolonRadioButton.isChecked():
return ';'
elif self.tabRadioButton.isChecked():
return '\t'
elif self.otherRadioButton.isChecked():
return self.otherSeparatorLineEdit.text()
return
@Slot('QBool')
def _delimiter(self, checked):
if checked:
if self.commaRadioButton.isChecked():
self.delimiter.emit(',')
elif self.semicolonRadioButton.isChecked():
self.delimiter.emit(';')
elif self.tabRadioButton.isChecked():
self.delimiter.emit('\t')
elif self.otherRadioButton.isChecked():
ret = self.otherSeparatorLineEdit.text()
if len(ret) > 0:
self.delimiter.emit(ret)
def reset(self):
"""Resets this widget to its initial state.
"""
self.semicolonRadioButton.setChecked(True)
self.otherSeparatorLineEdit.setText('')
class CSVImportDialog(QtGui.QDialog):
"""A dialog to import any csv file into a pandas data frame.
This modal dialog enables the user to enter any path to a csv
file and parse this file with or without a header and with special
delimiter characters.
On a successful load, the data can be previewed and the column data
types may be edited by the user.
After all configuration is done, the dataframe and the underlying model
may be used by the main application.
Attributes:
load (QtCore.pyqtSignal): This signal is emitted, whenever the
dialog is successfully closed, e.g. when the ok button is
pressed. Returns DataFrameModel and path of chosen csv file.
"""
load = Signal('QAbstractItemModel', str)
def __init__(self, parent=None):
"""Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
"""
super(CSVImportDialog, self).__init__(parent)
self._modal = True
self._windowTitle = u'Import CSV'
self._encodingKey = None
self._filename = None
self._delimiter = None
self._header = None
self._detector = Detector()
self._initUI()
def _initUI(self):
"""Initiates the user interface with a grid layout and several widgets.
"""
self.setModal(self._modal)
self.setWindowTitle(self._windowTitle)
layout = QtGui.QGridLayout()
self._filenameLabel = QtGui.QLabel(u'Choose File', self)
self._filenameLineEdit = QtGui.QLineEdit(self)
self._filenameLineEdit.textEdited.connect(self._updateFilename)
chooseFileButtonIcon = QtGui.QIcon(QtGui.QPixmap(':/icons/document-open.png'))
self._chooseFileAction = QtGui.QAction(self)
self._chooseFileAction.setIcon(chooseFileButtonIcon)
self._chooseFileAction.triggered.connect(self._openFile)
self._chooseFileButton = QtGui.QToolButton(self)
self._chooseFileButton.setDefaultAction(self._chooseFileAction)
layout.addWidget(self._filenameLabel, 0, 0)
layout.addWidget(self._filenameLineEdit, 0, 1, 1, 2)
layout.addWidget(self._chooseFileButton, 0, 3)
self._encodingLabel = QtGui.QLabel(u'File Encoding', self)
encoding_names = map(lambda x: x.upper(), sorted(list(set(_encodings.viewvalues()))))
self._encodingComboBox = QtGui.QComboBox(self)
self._encodingComboBox.addItems(encoding_names)
self._encodingComboBox.activated.connect(self._updateEncoding)
layout.addWidget(self._encodingLabel, 1, 0)
layout.addWidget(self._encodingComboBox, 1, 1, 1, 1)
self._hasHeaderLabel = QtGui.QLabel(u'Header Available?', self)
self._headerCheckBox = QtGui.QCheckBox(self)
self._headerCheckBox.toggled.connect(self._updateHeader)
layout.addWidget(self._hasHeaderLabel, 2, 0)
layout.addWidget(self._headerCheckBox, 2, 1)
self._delimiterLabel = QtGui.QLabel(u'Column Delimiter', self)
self._delimiterBox = DelimiterSelectionWidget(self)
self._delimiter = self._delimiterBox.currentSelected()
self._delimiterBox.delimiter.connect(self._updateDelimiter)
layout.addWidget(self._delimiterLabel, 3, 0)
layout.addWidget(self._delimiterBox, 3, 1, 1, 3)
self._tabWidget = QtGui.QTabWidget(self)
self._previewTableView = QtGui.QTableView(self)
self._datatypeTableView = QtGui.QTableView(self)
self._tabWidget.addTab(self._previewTableView, u'Preview')
self._tabWidget.addTab(self._datatypeTableView, u'Change Column Types')
layout.addWidget(self._tabWidget, 4, 0, 3, 4)
self._datatypeTableView.horizontalHeader().setDefaultSectionSize(200)
self._datatypeTableView.setItemDelegateForColumn(1, DtypeComboDelegate(self._datatypeTableView))
self._loadButton = QtGui.QPushButton(u'Load Data', self)
#self.loadButton.setAutoDefault(False)
self._cancelButton = QtGui.QPushButton(u'Cancel', self)
# self.cancelButton.setDefault(False)
# self.cancelButton.setAutoDefault(True)
self._buttonBox = QtGui.QDialogButtonBox(self)
self._buttonBox.addButton(self._loadButton, QtGui.QDialogButtonBox.AcceptRole)
self._buttonBox.addButton(self._cancelButton, QtGui.QDialogButtonBox.RejectRole)
self._buttonBox.accepted.connect(self.accepted)
self._buttonBox.rejected.connect(self.rejected)
layout.addWidget(self._buttonBox, 9, 2, 1, 2)
self._loadButton.setDefault(False)
self._filenameLineEdit.setFocus()
self._statusBar = QtGui.QStatusBar(self)
self._statusBar.setSizeGripEnabled(False)
layout.addWidget(self._statusBar, 8, 0, 1, 4)
self.setLayout(layout)
@Slot('QString')
def updateStatusBar(self, message):
"""Updates the status bar widget of this dialog with the given message.
This method is also a `SLOT()`.
The message will be shown for only 5 seconds.
Args:
message (QString): The new message which will be displayed.
"""
self._statusBar.showMessage(message, 5000)
@Slot()
def _openFile(self):
"""Opens a file dialog and sets a value for the QLineEdit widget.
This method is also a `SLOT`.
"""
ret = QtGui.QFileDialog.getOpenFileName(self, self.tr(u'open file'), filter='Comma Separated Values (*.csv)')
if ret:
self._filenameLineEdit.setText(ret)
self._updateFilename()
@Slot('QBool')
def _updateHeader(self, toggled):
"""Changes the internal flag, whether the csv file contains a header or not.
This method is also a `SLOT`.
In addition, after toggling the corresponding checkbox, the
`_previewFile` method will be called.
Args:
toggled (boolean): A flag indicating the status of the checkbox.
The flag will be used to update an internal variable.
"""
self._header = 0 if toggled else None
self._previewFile()
@Slot()
def _updateFilename(self):
"""Calls several methods after the filename changed.
This method is also a `SLOT`.
It checks the encoding of the changed filename and generates a
preview of the data.
"""
self._filename = self._filenameLineEdit.text()
self._guessEncoding(self._filename)
self._previewFile()
def _guessEncoding(self, path):
"""Opens a file from the given `path` and checks the file encoding.
The file must exists on the file system and end with the extension
`.csv`. The file is read line by line until the encoding could be
guessed.
On a successfull identification, the widgets of this dialog will be
updated.
Args:
path (string): Path to a csv file on the file system.
"""
if os.path.exists(path) and path.lower().endswith('csv'):
encoding = self._detector.detect(path)
if encoding is not None:
if encoding.startswith('utf'):
encoding = encoding.replace('-', '')
encoding = encoding.replace('-','_')
viewValue = _encodings.get(encoding)
self._encodingKey = encoding
index = self._encodingComboBox.findText(viewValue.upper())
self._encodingComboBox.setCurrentIndex(index)
@Slot('int')
def _updateEncoding(self, index):
"""Changes the value of the encoding combo box to the value of given index.
This method is also a `SLOT`.
After the encoding is changed, the file will be reloaded and previewed.
Args:
index (int): An valid index of the combo box.
"""
encoding = self._encodingComboBox.itemText(index)
encoding = encoding.lower()
self._encodingKey = _calculateEncodingKey(encoding)
self._previewFile()
@Slot('QString')
def _updateDelimiter(self, delimiter):
"""Changes the value of the delimiter for the csv file.
This method is also a `SLOT`.
Args:
delimiter (string): The new delimiter.
"""
self._delimiter = delimiter
self._previewFile()
def _previewFile(self):
"""Updates the preview widgets with new models for both tab panes.
"""
dataFrame = self._loadCSVDataFrame()
dataFrameModel = DataFrameModel(dataFrame)
dataFrameModel.enableEditing(True)
self._previewTableView.setModel(dataFrameModel)
columnModel = dataFrameModel.columnDtypeModel()
columnModel.changeFailed.connect(self.updateStatusBar)
self._datatypeTableView.setModel(columnModel)
def _loadCSVDataFrame(self):
"""Loads the given csv file with pandas and generate a new dataframe.
The file will be loaded with the configured encoding, delimiter
and header.git
If any execptions will occur, an empty Dataframe is generated
and a message will appear in the status bar.
Returns:
pandas.DataFrame: A dataframe containing all the available
information of the csv file.
"""
if self._filename and os.path.exists(self._filename) and self._filename.endswith('.csv'):
# default fallback if no encoding was found/selected
encoding = self._encodingKey or 'uft8'
try:
dataFrame = pandas.read_csv(self._filename,
sep=self._delimiter, encoding=encoding,
header=self._header)
dataFrame = dataFrame.apply(fillNoneValues)
dataFrame = dataFrame.apply(convertTimestamps)
except Exception, err:
self.updateStatusBar(str(err))
return pandas.DataFrame()
self.updateStatusBar('Preview generated.')
return dataFrame
self.updateStatusBar('File does not exists or does not end with .csv')
return pandas.DataFrame()
def _resetWidgets(self):
"""Resets all widgets of this dialog to its inital state.
"""
self._filenameLineEdit.setText('')
self._encodingComboBox.setCurrentIndex(0)
self._delimiterBox.reset()
self._headerCheckBox.setChecked(False)
self._statusBar.showMessage('')
self._previewTableView.setModel(None)
self._datatypeTableView.setModel(None)
@Slot()
def accepted(self):
"""Successfully close the widget and return the loaded model.
This method is also a `SLOT`.
The dialog will be closed, when the `ok` button is pressed. If
a `DataFrame` was loaded, it will be emitted by the signal `load`.
"""
model = self._previewTableView.model()
if model is not None:
df = model.dataFrame().copy()
dfModel = DataFrameModel(df)
self.load.emit(dfModel, self._filename)
self._resetWidgets()
self.accept()
@Slot()
def rejected(self):
"""Close the widget and reset its inital state.
This method is also a `SLOT`.
The dialog will be closed and all changes reverted, when the
`cancel` button is pressed.
"""
self._resetWidgets()
self.reject()
class CSVExportDialog(QtGui.QDialog):
"""An widget to serialize a `DataFrameModel` to a `CSV-File`.
"""
exported = Signal('QBool')
def __init__(self, model=None, parent=None):
super(CSVExportDialog, self).__init__(parent)
self._model = model
self._modal = True
self._windowTitle = u'Export to CSV'
self._idx = -1
self._initUI()
def _initUI(self):
"""Initiates the user interface with a grid layout and several widgets.
"""
self.setModal(self._modal)
self.setWindowTitle(self._windowTitle)
layout = QtGui.QGridLayout()
self._filenameLabel = QtGui.QLabel(u'Output File', self)
self._filenameLineEdit = QtGui.QLineEdit(self)
chooseFileButtonIcon = QtGui.QIcon(QtGui.QPixmap(':/icons/document-save-as.png'))
self._chooseFileAction = QtGui.QAction(self)
self._chooseFileAction.setIcon(chooseFileButtonIcon)
self._chooseFileAction.triggered.connect(self._createFile)
self._chooseFileButton = QtGui.QToolButton(self)
self._chooseFileButton.setDefaultAction(self._chooseFileAction)
layout.addWidget(self._filenameLabel, 0, 0)
layout.addWidget(self._filenameLineEdit, 0, 1, 1, 2)
layout.addWidget(self._chooseFileButton, 0, 3)
self._encodingLabel = QtGui.QLabel(u'File Encoding', self)
encoding_names = map(lambda x: x.upper(), sorted(list(set(_encodings.viewvalues()))))
self._encodingComboBox = QtGui.QComboBox(self)
self._encodingComboBox.addItems(encoding_names)
self._idx = encoding_names.index('UTF_8')
self._encodingComboBox.setCurrentIndex(self._idx)
#self._encodingComboBox.activated.connect(self._updateEncoding)
layout.addWidget(self._encodingLabel, 1, 0)
layout.addWidget(self._encodingComboBox, 1, 1, 1, 1)
self._hasHeaderLabel = QtGui.QLabel(u'Header Available?', self)
self._headerCheckBox = QtGui.QCheckBox(self)
#self._headerCheckBox.toggled.connect(self._updateHeader)
layout.addWidget(self._hasHeaderLabel, 2, 0)
layout.addWidget(self._headerCheckBox, 2, 1)
self._delimiterLabel = QtGui.QLabel(u'Column Delimiter', self)
self._delimiterBox = DelimiterSelectionWidget(self)
layout.addWidget(self._delimiterLabel, 3, 0)
layout.addWidget(self._delimiterBox, 3, 1, 1, 3)
self._exportButton = QtGui.QPushButton(u'Export Data', self)
self._cancelButton = QtGui.QPushButton(u'Cancel', self)
self._buttonBox = QtGui.QDialogButtonBox(self)
self._buttonBox.addButton(self._exportButton, QtGui.QDialogButtonBox.AcceptRole)
self._buttonBox.addButton(self._cancelButton, QtGui.QDialogButtonBox.RejectRole)
self._buttonBox.accepted.connect(self.accepted)
self._buttonBox.rejected.connect(self.rejected)
layout.addWidget(self._buttonBox, 5, 2, 1, 2)
self._exportButton.setDefault(False)
self._filenameLineEdit.setFocus()
self._statusBar = QtGui.QStatusBar(self)
self._statusBar.setSizeGripEnabled(False)
layout.addWidget(self._statusBar, 4, 0, 1, 4)
self.setLayout(layout)
def setExportModel(self, model):
if not isinstance(model, DataFrameModel):
return False
self._model = model
return True
@Slot()
def _createFile(self):
ret = QtGui.QFileDialog.getSaveFileName(self, 'Save File', filter='Comma Separated Value (*.csv)')
self._filenameLineEdit.setText(ret)
def _saveModel(self):
delimiter = self._delimiterBox.currentSelected()
header = self._headerCheckBox.isChecked() # column labels
filename = self._filenameLineEdit.text()
index = False # row labels
encodingIndex = self._encodingComboBox.currentIndex()
encoding = self._encodingComboBox.itemText(encodingIndex)
encoding = _calculateEncodingKey(encoding.lower())
try:
dataFrame = self._model.dataFrame()
except AttributeError, err:
raise AttributeError('No data loaded to export.')
else:
try:
dataFrame.to_csv(filename, encoding=encoding, header=header, index=index, sep=delimiter)
except IOError, err:
raise IOError('No filename given')
except UnicodeError, err:
raise UnicodeError('Could not encode all data. Choose a different encoding')
except Exception:
raise
def _resetWidgets(self):
"""Resets all widgets of this dialog to its inital state.
"""
self._filenameLineEdit.setText('')
self._encodingComboBox.setCurrentIndex(self._idx)
self._delimiterBox.reset()
self._headerCheckBox.setChecked(False)
self._statusBar.showMessage('')
@Slot()
def accepted(self):
"""Successfully close the widget and emit an export signal.
This method is also a `SLOT`.
The dialog will be closed, when the `Export Data` button is
pressed. If errors occur during the export, the status bar
will show the error message and the dialog will not be closed.
"""
try:
self._saveModel()
except Exception, err:
self._statusBar.showMessage(str(err))
else:
self._resetWidgets()
self.exported.emit(True)
self.accept()
@Slot()
def rejected(self):
"""Close the widget and reset its inital state.
This method is also a `SLOT`.
The dialog will be closed and all changes reverted, when the
`cancel` button is pressed.
"""
self._resetWidgets()
self.exported.emit(False)
self.reject()
def _calculateEncodingKey(comparator):
"""Gets the first key of all available encodings where the corresponding
value matches the comparator.
Args:
comparator (string): A view name for an encoding.
Returns:
str: A key for a specific encoding used by python.
"""
encodingName = None
for k, v in _encodings.viewitems():
if v == comparator:
encodingName = k
break
return encodingName | mit | -5,449,141,558,697,622,000 | 35.001513 | 117 | 0.643512 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.