repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
fhe-odoo/odoo | addons/purchase_requisition/wizard/bid_line_qty.py | 374 | 1711 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class bid_line_qty(osv.osv_memory):
_name = "bid.line.qty"
_description = "Change Bid line quantity"
_columns = {
'qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def change_qty(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.order.line').write(cr, uid, active_ids, {'quantity_bid': data.qty})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aronysidoro/django-angular | djangular/views/mixins.py | 16 | 5168 | # -*- coding: utf-8 -*-
import json
import warnings
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
def allow_remote_invocation(func, method='auto'):
"""
All methods which shall be callable through a given Ajax 'action' must be
decorated with @allowed_action. This is required for safety reasons. It
inhibits the caller to invoke all available methods of a class.
"""
setattr(func, 'allow_rmi', method)
return func
def allowed_action(func):
warnings.warn("Decorator `@allowed_action` is deprecated. Use `@allow_remote_invocation` instead.", DeprecationWarning)
return allow_remote_invocation(func)
class JSONResponseException(Exception):
"""
Exception class for triggering HTTP 4XX responses with JSON content, where expected.
"""
status_code = 400
def __init__(self, message=None, status=None, *args, **kwargs):
if status is not None:
self.status_code = status
super(JSONResponseException, self).__init__(message, *args, **kwargs)
class JSONBaseMixin(object):
"""
Basic mixin for encoding HTTP responses in JSON format.
"""
json_encoder = DjangoJSONEncoder
json_content_type = 'application/json;charset=UTF-8'
def json_response(self, response_data, status=200, **kwargs):
out_data = json.dumps(response_data, cls=self.json_encoder, **kwargs)
response = HttpResponse(out_data, self.json_content_type, status=status)
response['Cache-Control'] = 'no-cache'
return response
class JSONResponseMixin(JSONBaseMixin):
"""
A mixin for View classes that dispatches requests containing the private HTTP header
``DjNg-Remote-Method`` onto a method of an instance of this class, with the given method name.
This named method must be decorated with ``@allow_remote_invocation`` and shall return a
list or dictionary which is serializable to JSON.
The returned HTTP responses are of kind ``application/json;charset=UTF-8``.
"""
def get(self, request, *args, **kwargs):
if not request.is_ajax():
return self._dispatch_super(request, *args, **kwargs)
if 'action' in kwargs:
warnings.warn("Using the keyword 'action' in URLresolvers is deprecated. Please use 'invoke_method' instead", DeprecationWarning)
remote_method = kwargs['action']
else:
remote_method = kwargs.get('invoke_method')
if remote_method:
# method for invocation is determined programmatically
handler = getattr(self, remote_method)
else:
# method for invocation is determined by HTTP header
remote_method = request.META.get('HTTP_DJNG_REMOTE_METHOD')
handler = remote_method and getattr(self, remote_method, None)
if not callable(handler):
return self._dispatch_super(request, *args, **kwargs)
if not hasattr(handler, 'allow_rmi'):
return HttpResponseForbidden("Method '{0}.{1}' has no decorator '@allow_remote_invocation'"
.format(self.__class__.__name__, remote_method))
try:
response_data = handler()
except JSONResponseException as e:
return self.json_response({'message': e.args[0]}, e.status_code)
return self.json_response(response_data)
def post(self, request, *args, **kwargs):
if not request.is_ajax():
return self._dispatch_super(request, *args, **kwargs)
try:
in_data = json.loads(request.body.decode('utf-8'))
except ValueError:
in_data = request.body.decode('utf-8')
if 'action' in in_data:
warnings.warn("Using the keyword 'action' inside the payload is deprecated. Please use 'djangoRMI' from module 'ng.django.forms'", DeprecationWarning)
remote_method = in_data.pop('action')
else:
remote_method = request.META.get('HTTP_DJNG_REMOTE_METHOD')
handler = remote_method and getattr(self, remote_method, None)
if not callable(handler):
return self._dispatch_super(request, *args, **kwargs)
if not hasattr(handler, 'allow_rmi'):
return HttpResponseForbidden("Method '{0}.{1}' has no decorator '@allow_remote_invocation'"
.format(self.__class__.__name__, remote_method), 403)
try:
response_data = handler(in_data)
except JSONResponseException as e:
return self.json_response({'message': e.args[0]}, e.status_code)
return self.json_response(response_data)
def _dispatch_super(self, request, *args, **kwargs):
base = super(JSONResponseMixin, self)
handler = getattr(base, request.method.lower(), None)
if callable(handler):
return handler(request, *args, **kwargs)
# HttpResponseNotAllowed expects permitted methods.
return HttpResponseBadRequest('This view can not handle method {0}'.format(request.method), status=405)
| mit |
Eddy0402/Environment | vim/ycmd/cpp/ycm/tests/gmock/test/gmock_test_utils.py | 769 | 3684 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
import gtest_test_utils # pylint: disable-msg=C6204
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| gpl-3.0 |
quanhua92/learning-notes | libs/wxpython/design/mvc.py | 1 | 3804 | import wx
class ModelInterface(object):
""""Defines an interface for a simple value generator model"""
def __init__(self):
super(ModelInterface, self).__init__()
self.value = 0
self.observers = list()
def Generate(self):
"""Interface method to be implemented by subclasses"""
raise NotImplementedError
def SetValue(self, value):
self.value = value
self.NotifyObservers()
def GetValue(self):
return self.value
def RegisterObserver(self, callback):
"""Register an observer callback
:param callable(newvalue)
"""
self.observers.append(callback)
def NotifyObservers(self):
"""Notify all observers of current value"""
for observer in self.observers:
observer()
class ControllerInterface(object):
"""Defines an interface a value generator controller"""
def __init__(self, model):
super(ControllerInterface, self).__init__()
# Attributes
self.model = model
self.view = TheView(None, self, self.model, "Fibonacci Generator")
# Setup
self.view.Show()
def DoGenerateNext(self):
"""User action request next value"""
raise NotImplementedError
class FibonacciModel(ModelInterface):
def Generate(self):
cval = self.GetValue()
# Get the next one
for fib in self.fibonacci():
if fib > cval:
self.SetValue(fib)
break
@staticmethod
def fibonacci():
"""FIbonacci generator method"""
a, b = 0, 1
while True:
yield a
a, b = b, a + b
class FibonacciController(ControllerInterface):
def DoGenerateNext(self):
self.view.EnableButton(False)
self.model.Generate()
class TheView(wx.Frame):
def __init__(self, parent, controller, model, title, *args, **kwargs):
super(TheView, self).__init__(parent, title=title, *args, **kwargs)
# Attributes
self.panel = ViewPanel(self, controller, model)
# Layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetInitialSize((300, 300))
def EnableButton(self, enable=True):
self.panel.button.Enable(enable)
class ViewPanel(wx.Panel):
def __init__(self, parent, controller, model):
super(ViewPanel, self).__init__(parent)
# Attributes
self.model = model
self.controller = controller
initial = str(self.model.GetValue())
self.text = wx.TextCtrl(self, value=initial)
self.button = wx.Button(self, label="Generate")
# Layout
self.__DoLayout()
# Setup
self.model.RegisterObserver(self.OnModelUpdate)
# Event Handlers
self.Bind(wx.EVT_BUTTON, self.OnAction)
def __DoLayout(self):
vsizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
vsizer.AddStretchSpacer()
vsizer.Add(self.text, 0, wx.ALIGN_CENTER|wx.ALL, 8)
hsizer.AddStretchSpacer()
hsizer.Add(self.button)
hsizer.AddStretchSpacer()
vsizer.Add(hsizer, 0, wx.EXPAND)
vsizer.AddStretchSpacer()
self.SetSizer(vsizer)
def OnModelUpdate(self):
"""Observer Method"""
value = self.model.GetValue()
self.text.SetValue(str(value))
self.button.Enable(True)
def OnAction(self, event):
self.controller.DoGenerateNext()
class ModelViewApp(wx.App):
def OnInit(self):
self.model = FibonacciModel()
self.controller = FibonacciController(self.model)
return True
if __name__ == "__main__":
app = ModelViewApp(False)
app.MainLoop()
| apache-2.0 |
richardotis/scipy | scipy/odr/models.py | 113 | 4659 | """ Collection of Model instances for use with the odrpack fitting package.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.odr.odrpack import Model
__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic',
'polynomial']
def _lin_fcn(B, x):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + (x*b).sum(axis=0)
def _lin_fjb(B, x):
a = np.ones(x.shape[-1], float)
res = np.concatenate((a, x.ravel()))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _lin_fjd(B, x):
b = B[1:]
b = np.repeat(b, (x.shape[-1],)*b.shape[-1],axis=0)
b.shape = x.shape
return b
def _lin_est(data):
# Eh. The answer is analytical, so just return all ones.
# Don't return zeros since that will interfere with
# ODRPACK's auto-scaling procedures.
if len(data.x.shape) == 2:
m = data.x.shape[0]
else:
m = 1
return np.ones((m + 1,), float)
def _poly_fcn(B, x, powers):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + np.sum(b * np.power(x, powers), axis=0)
def _poly_fjacb(B, x, powers):
res = np.concatenate((np.ones(x.shape[-1], float), np.power(x,
powers).flat))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _poly_fjacd(B, x, powers):
b = B[1:]
b.shape = (b.shape[0], 1)
b = b * powers
return np.sum(b * np.power(x, powers-1),axis=0)
def _exp_fcn(B, x):
return B[0] + np.exp(B[1] * x)
def _exp_fjd(B, x):
return B[1] * np.exp(B[1] * x)
def _exp_fjb(B, x):
res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x)))
res.shape = (2, x.shape[-1])
return res
def _exp_est(data):
# Eh.
return np.array([1., 1.])
multilinear = Model(_lin_fcn, fjacb=_lin_fjb,
fjacd=_lin_fjd, estimate=_lin_est,
meta={'name': 'Arbitrary-dimensional Linear',
'equ':'y = B_0 + Sum[i=1..m, B_i * x_i]',
'TeXequ':'$y=\\beta_0 + \sum_{i=1}^m \\beta_i x_i$'})
def polynomial(order):
"""
Factory function for a general polynomial model.
Parameters
----------
order : int or sequence
If an integer, it becomes the order of the polynomial to fit. If
a sequence of numbers, then these are the explicit powers in the
polynomial.
A constant term (power 0) is always included, so don't include 0.
Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)).
Returns
-------
polynomial : Model instance
Model instance.
"""
powers = np.asarray(order)
if powers.shape == ():
# Scalar.
powers = np.arange(1, powers + 1)
powers.shape = (len(powers), 1)
len_beta = len(powers) + 1
def _poly_est(data, len_beta=len_beta):
# Eh. Ignore data and return all ones.
return np.ones((len_beta,), float)
return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb,
estimate=_poly_est, extra_args=(powers,),
meta={'name': 'Sorta-general Polynomial',
'equ':'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1),
'TeXequ':'$y=\\beta_0 + \sum_{i=1}^{%s} \\beta_i x^i$' %
(len_beta-1)})
exponential = Model(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb,
estimate=_exp_est, meta={'name':'Exponential',
'equ':'y= B_0 + exp(B_1 * x)',
'TeXequ':'$y=\\beta_0 + e^{\\beta_1 x}$'})
def _unilin(B, x):
return x*B[0] + B[1]
def _unilin_fjd(B, x):
return np.ones(x.shape, float) * B[0]
def _unilin_fjb(B, x):
_ret = np.concatenate((x, np.ones(x.shape, float)))
_ret.shape = (2,) + x.shape
return _ret
def _unilin_est(data):
return (1., 1.)
def _quadratic(B, x):
return x*(x*B[0] + B[1]) + B[2]
def _quad_fjd(B, x):
return 2*x*B[0] + B[1]
def _quad_fjb(B, x):
_ret = np.concatenate((x*x, x, np.ones(x.shape, float)))
_ret.shape = (3,) + x.shape
return _ret
def _quad_est(data):
return (1.,1.,1.)
unilinear = Model(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb,
estimate=_unilin_est, meta={'name': 'Univariate Linear',
'equ': 'y = B_0 * x + B_1',
'TeXequ': '$y = \\beta_0 x + \\beta_1$'})
quadratic = Model(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb,
estimate=_quad_est, meta={'name': 'Quadratic',
'equ': 'y = B_0*x**2 + B_1*x + B_2',
'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'})
| bsd-3-clause |
SOKP/external_chromium_org | tools/telemetry/telemetry/value/summary.py | 58 | 6381 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
from telemetry.value import failure
from telemetry.value import merge_values
from telemetry.value import skip
class Summary(object):
"""Computes summary values from the per-page-run values produced by a test.
Some telemetry benchmark repeat a number of times in order to get a reliable
measurement. The test does not have to handle merging of these runs:
summarizer does it for you.
For instance, if two pages run, 3 and 1 time respectively:
ScalarValue(page1, 'foo', units='ms', 1)
ScalarValue(page1, 'foo', units='ms', 1)
ScalarValue(page1, 'foo', units='ms', 1)
ScalarValue(page2, 'foo', units='ms', 2)
Then summarizer will produce two sets of values. First,
computed_per_page_values:
[
ListOfScalarValues(page1, 'foo', units='ms', [1,1,1])],
ListOfScalarValues(page2, 'foo', units='ms', [2])]
]
In addition, it will produce a summary value:
[
ListOfScalarValues(page=None, 'foo', units='ms', [1,1,1,2])]
]
"""
def __init__(self, all_page_specific_values):
had_failures = any(isinstance(v, failure.FailureValue) for v in
all_page_specific_values)
self.had_failures = had_failures
self._computed_per_page_values = []
self._computed_summary_values = []
self._interleaved_computed_per_page_values_and_summaries = []
self._ComputePerPageValues(all_page_specific_values)
@property
def computed_per_page_values(self):
return self._computed_per_page_values
@property
def computed_summary_values(self):
return self._computed_summary_values
@property
def interleaved_computed_per_page_values_and_summaries(self):
"""Returns the computed per page values and summary values interleaved.
All the results for a given name are printed together. First per page
values, then summary values.
"""
return self._interleaved_computed_per_page_values_and_summaries
def _ComputePerPageValues(self, all_page_specific_values):
all_successful_page_values = [
v for v in all_page_specific_values if not (isinstance(
v, failure.FailureValue) or isinstance(v, skip.SkipValue))]
# We will later need to determine how many values were originally created
# for each value name, to apply a workaround meant to clean up the printf
# output.
num_successful_pages_for_value_name = defaultdict(int)
for v in all_successful_page_values:
num_successful_pages_for_value_name[v.name] += 1
# By here, due to page repeat options, all_values_from_successful_pages
# contains values of the same name not only from mulitple pages, but also
# from the same name. So even if, for instance, only one page ran, it may
# have run twice, producing two 'x' values.
#
# So, get rid of the repeated pages by merging.
merged_page_values = merge_values.MergeLikeValuesFromSamePage(
all_successful_page_values)
# Now we have a bunch of values, but there is only one value_name per page.
# Suppose page1 and page2 ran, producing values x and y. We want to print
# x for page1
# x for page2
# x for page1, page2 combined
#
# y for page1
# y for page2
# y for page1, page2 combined
#
# We already have the x values in the values array. But, we will need
# them indexable by the value name.
#
# The following dict maps value_name -> list of pages that have values of
# that name.
per_page_values_by_value_name = defaultdict(list)
for value in merged_page_values:
per_page_values_by_value_name[value.name].append(value)
# We already have the x values in the values array. But, we also need
# the values merged across the pages. And, we will need them indexed by
# value name so that we can find them when printing out value names in
# alphabetical order.
merged_pages_value_by_value_name = {}
if not self.had_failures:
for value in merge_values.MergeLikeValuesFromDifferentPages(
all_successful_page_values):
assert value.name not in merged_pages_value_by_value_name
merged_pages_value_by_value_name[value.name] = value
# sorted_value names will govern the order we start printing values.
value_names = set([v.name for v in merged_page_values])
sorted_value_names = sorted(value_names)
# Time to walk through the values by name, printing first the page-specific
# values and then the merged_site value.
for value_name in sorted_value_names:
per_page_values = per_page_values_by_value_name.get(value_name, [])
# Sort the values by their url
sorted_per_page_values = list(per_page_values)
sorted_per_page_values.sort(
key=lambda per_page_values: per_page_values.page.display_name)
# Output the page-specific results.
num_successful_pages_for_this_value_name = (
num_successful_pages_for_value_name[value_name])
for per_page_value in sorted_per_page_values:
self._ComputePerPageValue(per_page_value,
num_successful_pages_for_this_value_name)
# Output the combined values.
merged_pages_value = merged_pages_value_by_value_name.get(value_name,
None)
if merged_pages_value:
self._computed_summary_values.append(merged_pages_value)
self._interleaved_computed_per_page_values_and_summaries.append(
merged_pages_value)
def _ComputePerPageValue(
self, value, num_successful_pages_for_this_value_name):
# If there were any page errors, we typically will print nothing.
#
# Note: this branch is structured less-densely to improve legibility.
if num_successful_pages_for_this_value_name > 1:
should_print = True
elif (self.had_failures and
num_successful_pages_for_this_value_name == 1):
should_print = True
else:
should_print = False
if not should_print:
return
# Actually save the result.
self._computed_per_page_values.append(value)
self._interleaved_computed_per_page_values_and_summaries.append(value)
| bsd-3-clause |
ayseyo/oclapi | django-nonrel/ocl/mappings/tests.py | 1 | 46964 | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
import os
from unittest import skip
from urlparse import urlparse
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.test import Client
from django.test.client import MULTIPART_CONTENT, FakePayload
from django.utils.encoding import force_str
from collection.models import Collection, CollectionVersion
from concepts.models import Concept, LocalizedText
from mappings.models import Mapping, MappingVersion
from mappings.validation_messages import OPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS
from oclapi.models import ACCESS_TYPE_EDIT, ACCESS_TYPE_VIEW, CUSTOM_VALIDATION_SCHEMA_OPENMRS
from oclapi.utils import add_user_to_org
from orgs.models import Organization
from sources.models import Source, SourceVersion
from test_helper.base import *
from users.models import UserProfile
class OCLClient(Client):
def put(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Requests a response from the server using POST.
"""
response = self.my_put(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def my_put(self, path, data={}, content_type=MULTIPART_CONTENT, **extra):
"Construct a PUT request."
post_data = self._encode_data(data, content_type)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': force_str(parsed[4]),
'REQUEST_METHOD': str('PUT'),
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
class MappingBaseTest(OclApiBaseTestCase):
def setUp(self):
super(MappingBaseTest, self).setUp()
self.user1 = User.objects.create_user(
username='user1',
email='[email protected]',
password='user1',
last_name='One',
first_name='User'
)
self.user2 = User.objects.create_user(
username='user2',
email='[email protected]',
password='user2',
last_name='Two',
first_name='User'
)
self.userprofile1 = UserProfile.objects.create(user=self.user1, mnemonic='user1')
self.userprofile2 = UserProfile.objects.create(user=self.user2, mnemonic='user2')
self.org1 = Organization.objects.create(name='org1', mnemonic='org1')
self.org2 = Organization.objects.create(name='org2', mnemonic='org2')
add_user_to_org(self.userprofile2, self.org2)
self.source1 = Source(
name='source1',
mnemonic='source1',
full_name='Source One',
source_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.source1.com',
description='This is the first test source',
)
kwargs = {
'parent_resource': self.userprofile1
}
Source.persist_new(self.source1, self.user1, **kwargs)
self.source1 = Source.objects.get(id=self.source1.id)
self.source2 = Source(
name='source2',
mnemonic='source2',
full_name='Source Two',
source_type='Reference',
public_access=ACCESS_TYPE_VIEW,
default_locale='fr',
supported_locales=['fr'],
website='www.source2.com',
description='This is the second test source',
)
kwargs = {
'parent_resource': self.org2,
}
Source.persist_new(self.source2, self.user1, **kwargs)
self.source2 = Source.objects.get(id=self.source2.id)
self.name = LocalizedText.objects.create(name='Fred', locale='en', type='FULLY_SPECIFIED')
(self.concept1, errors) = create_concept(mnemonic='concept1', user=self.user1, source=self.source1, names=[self.name], descriptions=[self.name])
(self.concept2, errors) = create_concept(mnemonic='concept2', user=self.user1, source=self.source1, names=[self.name], descriptions=[self.name])
(self.concept3, errors) = create_concept(mnemonic='concept3', user=self.user1, source=self.source2, names=[self.name], descriptions=[self.name])
(self.concept4, errors) = create_concept(mnemonic='concept4', user=self.user1, source=self.source2, names=[self.name], descriptions=[self.name])
class MappingVersionBaseTest(MappingBaseTest):
def setUp(self):
super(MappingVersionBaseTest, self).setUp()
self.mapping1 = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='versionmapping1',
)
self.mapping1.full_clean()
self.mapping1.save()
class MappingTest(MappingBaseTest):
def test_create_mapping_positive(self):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
self.assertEquals(ACCESS_TYPE_VIEW, mapping.public_access)
self.assertEquals('user1', mapping.created_by)
self.assertEquals('user1', mapping.updated_by)
self.assertEquals(self.source1, mapping.parent)
self.assertEquals('Same As', mapping.map_type)
self.assertEquals(self.concept1, mapping.from_concept)
self.assertEquals(self.concept2, mapping.to_concept)
self.assertEquals(self.source1, mapping.from_source)
self.assertEquals(self.source1.owner_name, mapping.from_source_owner)
self.assertEquals(self.source1.mnemonic, mapping.from_source_name)
self.assertEquals(self.source1, mapping.get_to_source())
self.assertEquals(self.source1.owner_name, mapping.to_source_owner)
self.assertEquals(self.concept2.mnemonic, mapping.get_to_concept_code())
self.assertEquals(self.concept2.display_name, mapping.get_to_concept_name())
def test_create_mapping_negative__no_created_by(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__no_updated_by(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__no_parent(self):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
errors = Mapping.persist_new(mapping, self.user1)
self.assertTrue(errors)
def test_create_mapping_negative__no_map_type(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__no_from_concept(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__no_to_concept(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__two_to_concepts(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
to_concept_code='code',
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__self_mapping(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept1,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__same_mapping_type1(self):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__same_mapping_type2(self):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_source=self.source1,
to_concept_code='code',
to_concept_name='name',
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
self.assertEquals(ACCESS_TYPE_VIEW, mapping.public_access)
self.assertEquals('user1', mapping.created_by)
self.assertEquals('user1', mapping.updated_by)
self.assertEquals(self.source1, mapping.parent)
self.assertEquals('Same As', mapping.map_type)
self.assertEquals(self.concept1, mapping.from_concept)
self.assertIsNone(mapping.to_concept)
self.assertEquals(self.source1, mapping.from_source)
self.assertEquals(self.source1.owner_name, mapping.from_source_owner)
self.assertEquals(self.source1.mnemonic, mapping.from_source_name)
self.assertEquals(self.source1, mapping.get_to_source())
self.assertEquals(self.source1.owner_name, mapping.to_source_owner)
self.assertEquals('code', mapping.get_to_concept_code())
self.assertEquals('name', mapping.get_to_concept_name())
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
with self.assertRaises(IntegrityError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_source=self.source1,
to_concept_code='code',
to_concept_name='name',
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_mapping_access_changes_with_source(self):
public_access = self.source1.public_access
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
public_access=public_access,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
self.assertEquals(self.source1.public_access, mapping.public_access)
self.source1.public_access = ACCESS_TYPE_VIEW
self.source1.save()
mapping = Mapping.objects.get(id=mapping.id)
self.assertNotEquals(public_access, self.source1.public_access)
self.assertEquals(self.source1.public_access, mapping.public_access)
@skip('Skip this test until development of map type validation feature is complete.')
def test_create_mapping_negative__invalid_mapping_type(self):
maptypes_source = Source.objects.get(name="MapTypes")
create_concept(self.user1, maptypes_source, concept_class="MapType",names=[create_localized_text("SAME-AS")])
create_concept(self.user1, maptypes_source, concept_class="MapType",names=[create_localized_text("NARROWER-THAN")])
user = create_user()
source = create_source(user)
(concept1, _) = create_concept(user, source)
(concept2, _) = create_concept(user, source)
mapping = Mapping(
created_by=user,
updated_by=user,
parent=source,
map_type='XYZQWERT',
from_concept=concept1,
to_concept=concept2,
public_access=ACCESS_TYPE_VIEW,
)
kwargs = {
'parent_resource': source,
}
errors = Mapping.persist_new(mapping, user, **kwargs)
self.assertEquals(1, len(errors))
self.assertEquals(errors['names'][0], 'Mapping type should be valid attribute')
@skip('Skip this test until development of map type validation feature is complete.')
def test_create_mapping_positive__valid_mapping_type(self):
maptypes_source = Source.objects.get(name="MapTypes")
create_concept(self.user1, maptypes_source, concept_class="MapType", names=[create_localized_text("SAME-AS")])
create_concept(self.user1, maptypes_source, concept_class="MapType", names=[create_localized_text("NARROWER-THAN")])
user = create_user()
source = create_source(user)
(concept1, _) = create_concept(user, source)
(concept2, _) = create_concept(user, source)
mapping = Mapping(
created_by=user,
updated_by=user,
parent=source,
map_type='SAME-AS',
from_concept=concept1,
to_concept=concept2,
public_access=ACCESS_TYPE_VIEW,
)
kwargs = {
'parent_resource': source,
}
errors = Mapping.persist_new(mapping, user, **kwargs)
self.assertEquals(0, len(errors))
class MappingVersionTest(MappingVersionBaseTest):
def test_create_mapping_positive(self):
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mappingversion1',
versioned_object_id=self.mapping1.id,
mnemonic='tempid',
versioned_object_type=ContentType.objects.get_for_model(Mapping),
)
mapping_version.full_clean()
mapping_version.save()
self.assertTrue(MappingVersion.objects.filter(versioned_object_id = self.mapping1.id).exists())
mapping_version = MappingVersion.objects.get(versioned_object_id = self.mapping1.id)
self.assertEquals(ACCESS_TYPE_VIEW, mapping_version.public_access)
self.assertEquals('user1', mapping_version.created_by)
self.assertEquals('user1', mapping_version.updated_by)
self.assertEquals(self.source1, mapping_version.parent)
self.assertEquals('Same As', mapping_version.map_type)
self.assertEquals(self.concept1, mapping_version.from_concept)
self.assertEquals(self.concept2, mapping_version.to_concept)
self.assertEquals(self.source1, mapping_version.from_source)
self.assertEquals(self.source1.owner_name, mapping_version.from_source_owner)
self.assertEquals(self.source1.mnemonic, mapping_version.from_source_name)
self.assertEquals(self.source1, mapping_version.get_to_source())
self.assertEquals(self.source1.owner_name, mapping_version.to_source_owner)
self.assertEquals(self.concept2.mnemonic, mapping_version.get_to_concept_code())
self.assertEquals(self.concept2.display_name, mapping_version.get_to_concept_name())
def test_create_mapping_negative__no_created_by(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping111',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_create_mapping_negative__no_updated_by(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
created_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_create_mapping_negative__no_parent(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_create_mapping_negative__no_map_type(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_create_mapping_negative__no_from_concept(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
to_concept=self.concept2,
external_id='mapping1',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_create_mapping_negative__no_version_object(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_mapping_access_changes_with_source(self):
public_access = self.source1.public_access
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
public_access=public_access,
external_id='mapping1',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
self.assertEquals(self.source1.public_access, mapping_version.public_access)
self.source1.public_access = ACCESS_TYPE_VIEW
self.source1.save()
def test_collections_ids(self):
kwargs = {
'parent_resource': self.userprofile1
}
collection = Collection(
name='collection2',
mnemonic='collection2',
full_name='Collection Two',
collection_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.collection2.com',
description='This is the second test collection'
)
Collection.persist_new(collection, self.user1, **kwargs)
source = Source(
name='source',
mnemonic='source',
full_name='Source One',
source_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.source1.com',
description='This is the first test source'
)
kwargs = {
'parent_resource': self.org1
}
Source.persist_new(source, self.user1, **kwargs)
(concept1, errors) = create_concept(mnemonic='concept12', user=self.user1, source=source)
(from_concept, errors) = create_concept(mnemonic='fromConcept', user=self.user1, source=source)
(to_concept, errors) = create_concept(mnemonic='toConcept', user=self.user1, source=source)
mapping = Mapping(
map_type='Same As',
from_concept=from_concept,
to_concept=to_concept,
external_id='mapping',
)
kwargs = {
'parent_resource': source,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
initial_mapping_version = MappingVersion.objects.get(versioned_object_id=mapping.id)
new_mapping_version = MappingVersion.for_mapping(mapping)
new_mapping_version.mnemonic = 98
new_mapping_version.save()
from_concept_reference = '/orgs/org1/sources/source/concepts/' + Concept.objects.get(mnemonic=from_concept.mnemonic).mnemonic + '/'
concept1_reference = '/orgs/org1/sources/source/concepts/' + Concept.objects.get(mnemonic=concept1.mnemonic).mnemonic + '/'
mapping = Mapping.objects.filter()[1]
references = [concept1_reference, from_concept_reference, mapping.uri, initial_mapping_version.uri]
collection.expressions = references
collection.full_clean()
collection.save()
self.assertEquals(initial_mapping_version.collection_ids, [collection.id])
self.assertEquals(new_mapping_version.collection_ids, [collection.id])
def test_collections_version_ids(self):
kwargs = {
'parent_resource': self.userprofile1
}
collection = Collection(
name='collection2',
mnemonic='collection2',
full_name='Collection Two',
collection_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.collection2.com',
description='This is the second test collection'
)
Collection.persist_new(collection, self.user1, **kwargs)
source = Source(
name='source',
mnemonic='source',
full_name='Source One',
source_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.source1.com',
description='This is the first test source'
)
kwargs = {
'parent_resource': self.org1
}
Source.persist_new(source, self.user1, **kwargs)
(concept1, errors) = create_concept(mnemonic='concept12', user=self.user1, source=source)
(from_concept, errors) = create_concept(mnemonic='fromConcept', user=self.user1, source=source)
(to_concept, errors) = create_concept(mnemonic='toConcept', user=self.user1, source=source)
mapping = Mapping(
map_type='Same As',
from_concept=from_concept,
to_concept=to_concept,
external_id='mapping',
)
kwargs = {
'parent_resource': source,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
mapping = Mapping.objects.filter()[1]
mapping_reference = '/orgs/org1/sources/source/mappings/' + mapping.id + '/'
references = [mapping_reference]
collection.expressions = references
collection.full_clean()
collection.save()
mapping_version = MappingVersion.objects.filter()[0]
collection_version = CollectionVersion(
name='version1',
mnemonic='version1',
versioned_object=collection,
released=True,
created_by=self.user1,
updated_by=self.user1,
mappings=[mapping_version.id]
)
collection_version.full_clean()
collection_version.save()
self.assertEquals(mapping_version.collection_version_ids, [collection_version.id])
class MappingClassMethodsTest(MappingBaseTest):
def test_persist_new_positive(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
self.assertEquals(self.source1.public_access, mapping.public_access)
self.assertEquals('user1', mapping.created_by)
self.assertEquals('user1', mapping.updated_by)
self.assertEquals(self.source1, mapping.parent)
self.assertEquals('Same As', mapping.map_type)
self.assertEquals(self.concept1, mapping.from_concept)
self.assertEquals(self.concept2, mapping.to_concept)
self.assertEquals(self.source1, mapping.from_source)
self.assertEquals(self.source1.owner_name, mapping.from_source_owner)
self.assertEquals(self.source1.mnemonic, mapping.from_source_name)
self.assertEquals(self.source1, mapping.get_to_source())
self.assertEquals(self.source1.owner_name, mapping.to_source_owner)
self.assertEquals(self.concept2.mnemonic, mapping.get_to_concept_code())
self.assertEquals(self.concept2.display_name, mapping.get_to_concept_name())
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
def test_persist_new_version_created_positive(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
self.assertTrue(MappingVersion.objects.filter(versioned_object_id=mapping.id, is_latest_version=True).exists())
mapping_version = MappingVersion.objects.get(versioned_object_id=mapping.id, is_latest_version=True)
self.assertEquals(self.source1.public_access, mapping_version.public_access)
self.assertEquals('user1', mapping_version.created_by)
self.assertEquals('user1', mapping_version.updated_by)
self.assertEquals(self.source1, mapping_version.parent)
self.assertEquals('Same As', mapping_version.map_type)
self.assertEquals(self.concept1, mapping_version.from_concept)
self.assertEquals(self.concept2, mapping_version.to_concept)
self.assertEquals(self.source1, mapping_version.from_source)
self.assertEquals(self.source1.owner_name, mapping_version.from_source_owner)
self.assertEquals(self.source1.mnemonic, mapping_version.from_source_name)
self.assertEquals(self.source1, mapping_version.get_to_source())
self.assertEquals(self.source1.owner_name, mapping_version.to_source_owner)
self.assertEquals(self.concept2.mnemonic, mapping_version.get_to_concept_code())
self.assertEquals(self.concept2.display_name, mapping_version.get_to_concept_name())
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
def test_persist_new_negative__no_creator(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, None, **kwargs)
self.assertEquals(1, len(errors))
self.assertTrue('non_field_errors' in errors)
non_field_errors = errors['non_field_errors']
self.assertEquals(1, len(non_field_errors))
self.assertTrue('creator' in non_field_errors[0])
self.assertFalse(Mapping.objects.filter(external_id='mapping1').exists())
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(0, len(source_version.mappings))
def test_persist_new_negative__no_parent(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(1, len(errors))
self.assertTrue('non_field_errors' in errors)
non_field_errors = errors['non_field_errors']
self.assertEquals(1, len(non_field_errors))
self.assertTrue('parent' in non_field_errors[0])
self.assertFalse(Mapping.objects.filter(external_id='mapping1').exists())
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(0, len(source_version.mappings))
def test_persist_new_negative__same_mapping(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
mv = MappingVersion.objects.get(versioned_object_id=mapping.id)
self.assertTrue(mv.id in source_version.mappings)
# Repeat with same concepts
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping2',
)
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(1, len(errors))
self.assertEquals(1, len(errors))
self.assertTrue('__all__' in errors)
non_field_errors = errors['__all__']
self.assertEquals(1, len(non_field_errors))
self.assertTrue('already exists' in non_field_errors[0])
self.assertEquals(1, len(source_version.mappings))
def test_persist_new_positive__same_mapping_different_source(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
# Repeat with same concepts
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping2',
)
kwargs = {
'parent_resource': self.source2,
}
source_version = SourceVersion.get_latest_version_of(self.source2)
self.assertEquals(0, len(source_version.mappings))
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping2').exists())
mapping = Mapping.objects.get(external_id='mapping2')
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
def test_persist_new_positive__earlier_source_version(self):
version1 = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(version1.mappings))
version2 = SourceVersion.for_base_object(self.source1, label='version2')
version2.save()
self.assertEquals(0, len(version2.mappings))
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
kwargs = {
'parent_resource': self.source1,
'parent_resource_version': version1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
version1 = SourceVersion.objects.get(id=version1.id)
self.assertEquals(1, len(version1.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in version1.mappings)
version2 = SourceVersion.objects.get(id=version2.id)
self.assertEquals(0, len(version2.mappings))
latest_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(latest_version.mappings))
def test_persist_persist_changes_positive(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
mapping = Mapping.objects.get(external_id='mapping1')
to_concept = mapping.to_concept
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
mapping.to_concept = self.concept3
errors = Mapping.persist_changes(mapping, self.user1)
self.assertEquals(0, len(errors))
mapping = Mapping.objects.get(external_id='mapping1')
self.assertEquals(self.concept3, mapping.to_concept)
self.assertNotEquals(to_concept, mapping.to_concept)
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
mv = MappingVersion.objects.filter(versioned_object_id=mapping.id)
self.assertTrue(mv[1].id in source_version.mappings)
def test_persist_persist_changes_negative__no_updated_by(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
mapping = Mapping.objects.get(external_id='mapping1')
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
mapping.to_concept = self.concept3
errors = Mapping.persist_changes(mapping, None)
self.assertEquals(1, len(errors))
self.assertTrue('updated_by' in errors)
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
def test_retire_positive(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
kwargs = {
'parent_resource': self.source1,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
mapping = Mapping.objects.get(external_id='mapping1')
self.assertFalse(mapping.retired)
Mapping.retire(mapping, self.user1)
self.assertTrue(mapping.retired)
mapping = Mapping.objects.get(external_id='mapping1')
self.assertTrue(mapping.retired)
mappingVersion=MappingVersion.objects.get(versioned_object_id=mapping.mnemonic, mnemonic=2)
self.assertTrue(mappingVersion.retired)
def test_retire_negative(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
retired=True,
)
kwargs = {
'parent_resource': self.source1,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
mapping = Mapping.objects.get(external_id='mapping1')
self.assertTrue(mapping.retired)
result=Mapping.retire(mapping, self.user1)
self.assertFalse(result)
mapping = Mapping.objects.get(external_id='mapping1')
self.assertTrue(mapping.retired)
def test_edit_mapping_make_new_version_positive(self):
mapping1 = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping1, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertEquals(1,len(MappingVersion.objects.filter(versioned_object_id=mapping1.id)))
mapping1.map_type='BROADER_THAN'
Mapping.persist_changes(mapping1, self.user1)
self.assertEquals(2, len(MappingVersion.objects.filter(versioned_object_id=mapping1.id)))
old_version = MappingVersion.objects.get(versioned_object_id=mapping1.id, is_latest_version=False)
new_version= MappingVersion.objects.get(versioned_object_id=mapping1.id, is_latest_version=True)
self.assertFalse(old_version.is_latest_version)
self.assertTrue(new_version.is_latest_version)
self.assertEquals(new_version.map_type,'BROADER_THAN')
self.assertEquals(old_version.map_type,'Same As')
class OpenMRSMappingValidationTest(MappingBaseTest):
def test_create_same_from_and_to_pair_with_different_map_types_should_throw_validation_error(self):
user = create_user()
source = create_source(user, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
(concept1, _) = create_concept(user, source)
(concept2, _) = create_concept(user, source)
create_mapping(user, source, concept1, concept2, "Same As")
mapping = Mapping(
created_by=user,
updated_by=user,
parent=source,
map_type='Is Subset of',
from_concept=concept1,
to_concept=concept2,
public_access=ACCESS_TYPE_VIEW,
)
kwargs = {
'parent_resource': source,
}
errors = Mapping.persist_new(mapping, user, **kwargs)
self.assertTrue(OPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS in errors["__all__"])
def test_update_different_from_and_to_pairs_to_same_from_and_to_pairs_should_throw_validation_error(self):
user = create_user()
source1 = create_source(user, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
source2 = create_source(user, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
(concept1, _) = create_concept(user, source1)
(concept2, _) = create_concept(user, source2)
(concept3, _) = create_concept(user, source2)
create_mapping(user, source1, concept1, concept2, "Same As")
mapping = create_mapping(user, source1, concept2, concept3, "Same As")
mapping.from_concept = concept1
mapping.to_concept = concept2
mapping.map_type = "Different"
errors = Mapping.persist_changes(mapping, user)
self.assertTrue(
OPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS in errors["__all__"])
| mpl-2.0 |
ajose01/rethinkdb | test/rql_test/connections/http_support/werkzeug/contrib/iterio.py | 147 | 10718 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.iterio
~~~~~~~~~~~~~~~~~~~~~~~
This module implements a :class:`IterIO` that converts an iterator into
a stream object and the other way round. Converting streams into
iterators requires the `greenlet`_ module.
To convert an iterator into a stream all you have to do is to pass it
directly to the :class:`IterIO` constructor. In this example we pass it
a newly created generator::
def foo():
yield "something\n"
yield "otherthings"
stream = IterIO(foo())
print stream.read() # read the whole iterator
The other way round works a bit different because we have to ensure that
the code execution doesn't take place yet. An :class:`IterIO` call with a
callable as first argument does two things. The function itself is passed
an :class:`IterIO` stream it can feed. The object returned by the
:class:`IterIO` constructor on the other hand is not an stream object but
an iterator::
def foo(stream):
stream.write("some")
stream.write("thing")
stream.flush()
stream.write("otherthing")
iterator = IterIO(foo)
print iterator.next() # prints something
print iterator.next() # prints otherthing
iterator.next() # raises StopIteration
.. _greenlet: http://codespeak.net/py/dist/greenlet.html
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
import greenlet
except ImportError:
greenlet = None
from werkzeug._compat import implements_iterator
def _mixed_join(iterable, sentinel):
"""concatenate any string type in an intelligent way."""
iterator = iter(iterable)
first_item = next(iterator, sentinel)
if isinstance(first_item, bytes):
return first_item + b''.join(iterator)
return first_item + u''.join(iterator)
def _newline(reference_string):
if isinstance(reference_string, bytes):
return b'\n'
return u'\n'
@implements_iterator
class IterIO(object):
"""Instances of this object implement an interface compatible with the
standard Python :class:`file` object. Streams are either read-only or
write-only depending on how the object is created.
If the first argument is an iterable a file like object is returned that
returns the contents of the iterable. In case the iterable is empty
read operations will return the sentinel value.
If the first argument is a callable then the stream object will be
created and passed to that function. The caller itself however will
not receive a stream but an iterable. The function will be be executed
step by step as something iterates over the returned iterable. Each
call to :meth:`flush` will create an item for the iterable. If
:meth:`flush` is called without any writes in-between the sentinel
value will be yielded.
Note for Python 3: due to the incompatible interface of bytes and
streams you should set the sentinel value explicitly to an empty
bytestring (``b''``) if you are expecting to deal with bytes as
otherwise the end of the stream is marked with the wrong sentinel
value.
.. versionadded:: 0.9
`sentinel` parameter was added.
"""
def __new__(cls, obj, sentinel=''):
try:
iterator = iter(obj)
except TypeError:
return IterI(obj, sentinel)
return IterO(iterator, sentinel)
def __iter__(self):
return self
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return self.pos
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def truncate(self, size=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def writelines(self, list):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readlines(self, sizehint=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def __next__(self):
if self.closed:
raise StopIteration()
line = self.readline()
if not line:
raise StopIteration()
return line
class IterI(IterIO):
"""Convert an stream into an iterator."""
def __new__(cls, func, sentinel=''):
if greenlet is None:
raise RuntimeError('IterI requires greenlet support')
stream = object.__new__(cls)
stream._parent = greenlet.getcurrent()
stream._buffer = []
stream.closed = False
stream.sentinel = sentinel
stream.pos = 0
def run():
func(stream)
stream.close()
g = greenlet.greenlet(run, stream._parent)
while 1:
rv = g.switch()
if not rv:
return
yield rv[0]
def close(self):
if not self.closed:
self.closed = True
self._flush_impl()
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
if s:
self.pos += len(s)
self._buffer.append(s)
def writelines(self, list):
for item in list:
self.write(item)
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
self._flush_impl()
def _flush_impl(self):
data = _mixed_join(self._buffer, self.sentinel)
self._buffer = []
if not data and self.closed:
self._parent.switch()
else:
self._parent.switch((data,))
class IterO(IterIO):
"""Iter output. Wrap an iterator and give it a stream like interface."""
def __new__(cls, gen, sentinel=''):
self = object.__new__(cls)
self._gen = gen
self._buf = None
self.sentinel = sentinel
self.closed = False
self.pos = 0
return self
def __iter__(self):
return self
def _buf_append(self, string):
'''Replace string directly without appending to an empty string,
avoiding type issues.'''
if not self._buf:
self._buf = string
else:
self._buf += string
def close(self):
if not self.closed:
self.closed = True
if hasattr(self._gen, 'close'):
self._gen.close()
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
if mode == 1:
pos += self.pos
elif mode == 2:
self.read()
self.pos = min(self.pos, self.pos + pos)
return
elif mode != 0:
raise IOError('Invalid argument')
buf = []
try:
tmp_end_pos = len(self._buf)
while pos > tmp_end_pos:
item = self._gen.next()
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
self.pos = max(0, pos)
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
if n < 0:
self._buf_append(_mixed_join(self._gen, self.sentinel))
result = self._buf[self.pos:]
self.pos += len(result)
return result
new_pos = self.pos + n
buf = []
try:
tmp_end_pos = 0 if self._buf is None else len(self._buf)
while new_pos > tmp_end_pos or (self._buf is None and not buf):
item = next(self._gen)
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
new_pos = max(0, new_pos)
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
nl_pos = -1
if self._buf:
nl_pos = self._buf.find(_newline(self._buf), self.pos)
buf = []
try:
pos = self.pos
while nl_pos < 0:
item = next(self._gen)
local_pos = item.find(_newline(item))
buf.append(item)
if local_pos >= 0:
nl_pos = pos + local_pos
break
pos += len(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
if nl_pos < 0:
new_pos = len(self._buf)
else:
new_pos = nl_pos + 1
if length is not None and self.pos + length < new_pos:
new_pos = self.pos + length
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
| agpl-3.0 |
denny820909/builder | lib/python2.7/site-packages/python_dateutil-1.5-py2.7.egg/dateutil/tz.py | 270 | 32741 | """
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import datetime
import struct
import time
import sys
import os
relativedelta = None
parser = None
rrule = None
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
`self._name`,
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, basestring):
self._filename = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = `fileobj`
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4) != "TZif":
raise ValueError, "magic not found"
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt)
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1,-1,-1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._filename`)
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError, "Unpickable %s class" % self.__class__.__name__
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year,1,1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError, "unknown string format"
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
class _tzicalvtzcomp:
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % `self._tzid`
__reduce__ = object.__reduce__
class tzical:
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, basestring):
self._s = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = `fileobj`
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return self._vtz.keys()
def get(self, tzid=None):
if tzid is None:
keys = self._vtz.keys()
if len(keys) == 0:
raise ValueError, "no timezones defined"
elif len(keys) > 1:
raise ValueError, "more than one timezone available"
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError, "empty offset"
if s[0] in ('+', '-'):
signal = (-1,+1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError, "invalid offset: "+s
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError, "empty string"
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError, "unknown component: "+value
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError, \
"component not closed: "+comptype
if not tzid:
raise ValueError, \
"mandatory TZID not found"
if not comps:
raise ValueError, \
"at least one component is needed"
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError, \
"mandatory DTSTART not found"
if tzoffsetfrom is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
if tzoffsetto is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError, \
"invalid component end: "+value
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError, \
"unsupported %s parm: %s "%(name, parms[0])
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError, \
"unsupported TZOFFSETTO parm: "+parms[0]
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError, \
"unsupported TZNAME parm: "+parms[0]
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError, "unsupported property: "+name
else:
if name == "TZID":
if parms:
raise ValueError, \
"unsupported TZID parm: "+parms[0]
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError, "unsupported property: "+name
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ','_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| mit |
fuzeman/Catalytic | deluge/ui/console/commands/help.py | 8 | 3063 | # help.py
#
# Copyright (C) 2008-2009 Ido Abramovich <[email protected]>
# Copyright (C) 2009 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from twisted.internet import defer
from deluge.ui.console.main import BaseCommand
import deluge.ui.console.colors as colors
import deluge.component as component
class Command(BaseCommand):
"""displays help on other commands"""
usage = "Usage: help [command]"
def handle(self, *args, **options):
self.console = component.get("ConsoleUI")
self._commands = self.console._commands
deferred = defer.succeed(True)
if args:
if len(args) > 1:
self.console.write(usage)
return deferred
try:
cmd = self._commands[args[0]]
except KeyError:
self.console.write("{!error!}Unknown command %r" % args[0])
return deferred
try:
parser = cmd.create_parser()
self.console.write(parser.format_help())
except AttributeError, e:
self.console.write(cmd.__doc__ or 'No help for this command')
else:
max_length = max( len(k) for k in self._commands)
self.console.set_batch_write(True)
for cmd in sorted(self._commands):
self.console.write("{!info!}" + cmd + "{!input!} - " + self._commands[cmd].__doc__ or '')
self.console.write(" ")
self.console.write('For help on a specific command, use "<command> --help"')
self.console.set_batch_write(False)
return deferred
def complete(self, line):
return [x for x in component.get("ConsoleUI")._commands if x.startswith(line)]
| gpl-3.0 |
tcffisher/namebench | libnamebench/better_webbrowser.py | 175 | 4191 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for webbrowser library, to invoke the http handler on win32."""
__author__ = '[email protected] (Thomas Stromberg)'
import os.path
import subprocess
import sys
import traceback
import webbrowser
import util
def output(string):
print string
def create_win32_http_cmd(url):
"""Create a command-line tuple to launch a web browser for a given URL.
Args:
url: string
Returns:
tuple of: (executable, arg1, arg2, ...)
At the moment, this ignores all default arguments to the browser.
TODO(tstromberg): Properly parse the command-line arguments.
"""
browser_type = None
try:
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
'Software\Classes\http\shell\open\command')
browser_type = 'user'
except WindowsError:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
'Software\Classes\http\shell\open\command')
browser_type = 'machine'
except:
return False
cmd = _winreg.EnumValue(key, 0)[1]
# "C:\blah blah\iexplore.exe" -nohome
# "C:\blah blah\firefox.exe" -requestPending -osint -url "%1"
if '"' in cmd:
executable = cmd.split('"')[1]
else:
executable = cmd.split(' ')[0]
if not os.path.exists(executable):
output('$ Default HTTP browser does not exist: %s' % executable)
return False
else:
output('$ %s HTTP handler: %s' % (browser_type, executable))
return (executable, url)
def open(url):
"""Opens a URL, overriding the normal webbrowser.open methods for sanity."""
try:
webbrowser.open(url, new=1, autoraise=True)
# If the user is missing the osascript binary - see
# http://code.google.com/p/namebench/issues/detail?id=88
except:
output('Failed to open: [%s]: %s' % (url, util.GetLastExceptionString()))
if os.path.exists('/usr/bin/open'):
try:
output('trying open: %s' % url)
p = subprocess.Popen(('open', url))
p.wait()
except:
output('open did not seem to work: %s' % util.GetLastExceptionString())
elif sys.platform[:3] == 'win':
try:
output('trying default Windows controller: %s' % url)
controller = webbrowser.get('windows-default')
controller.open_new(url)
except:
output('WindowsController did not work: %s' % util.GetLastExceptionString())
# *NOTE*: EVIL IMPORT SIDE EFFECTS AHEAD!
#
# If we are running on Windows, register the WindowsHttpDefault class.
if sys.platform[:3] == 'win':
import _winreg
# We don't want to load this class by default, because Python 2.4 doesn't have BaseBrowser.
class WindowsHttpDefault(webbrowser.BaseBrowser):
"""Provide an alternate open class for Windows user, using the http handler."""
def open(self, url, new=0, autoraise=1):
command_args = create_win32_http_cmd(url)
if not command_args:
output('$ Could not find HTTP handler')
return False
output('command_args:')
output(command_args)
# Avoid some unicode path issues by moving our current directory
old_pwd = os.getcwd()
os.chdir('C:\\')
try:
_unused = subprocess.Popen(command_args)
os.chdir(old_pwd)
return True
except:
traceback.print_exc()
output('$ Failed to run HTTP handler, trying next browser.')
os.chdir(old_pwd)
return False
webbrowser.register('windows-http', WindowsHttpDefault, update_tryorder=-1)
| apache-2.0 |
aesteve/vertx-web | vertx-web/src/test/sockjs-protocol/ws4py/__init__.py | 4 | 2689 | # -*- coding: utf-8 -*-
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of ws4py nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
import logging.handlers as handlers
__author__ = "Sylvain Hellegouarch"
__version__ = "0.5.1"
__all__ = ['WS_KEY', 'WS_VERSION', 'configure_logger', 'format_addresses']
WS_KEY = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
WS_VERSION = (8, 13)
def configure_logger(stdout=True, filepath=None, level=logging.INFO):
logger = logging.getLogger('ws4py')
logger.setLevel(level)
logfmt = logging.Formatter("[%(asctime)s] %(levelname)s %(message)s")
if filepath:
h = handlers.RotatingFileHandler(filepath, maxBytes=10485760, backupCount=3)
h.setLevel(level)
h.setFormatter(logfmt)
logger.addHandler(h)
if stdout:
import sys
h = logging.StreamHandler(sys.stdout)
h.setLevel(level)
h.setFormatter(logfmt)
logger.addHandler(h)
return logger
def format_addresses(ws):
me = ws.local_address
peer = ws.peer_address
if isinstance(me, tuple) and isinstance(peer, tuple):
me_ip, me_port = ws.local_address
peer_ip, peer_port = ws.peer_address
return "[Local => %s:%d | Remote => %s:%d]" % (me_ip, me_port, peer_ip, peer_port)
return "[Bound to '%s']" % me
| apache-2.0 |
yyamano/RESTx | src/python/restxclient/restx_resource.py | 2 | 5575 | """
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Definition of the L{RestxResource} class.
"""
from restxclient.restx_client_exception import RestxClientException
from restxclient.restx_parameter import RestxParameter
from restxclient.restx_service import RestxAccessibleService
class RestxResource(object):
"""
Represents information about a resource on a RESTx server.
This representation can be used by clients to find out about
component capabilities and also as a starting point to create
new resources, by utilizing the get_resource_template() function.
"""
# The keys to the component's meta data dictionary.
__NAME_KEY = "name"
__DESC_KEY = "desc"
__URI_KEY = "uri"
__SERVICES_KEY = "services"
__server = None # Reference to the server on which we reside (RestxServer)
__name = None # Name of this resource
__description = None # Description of this resource
__uri = None # URI of this resource
__services = None # Dictionary of service definitions
def __init__(self, server, rdesc):
"""
Create a new resource representation in memomory.
@param server: The RESTx server on which the resource resides.
@type server: L{RestxServer}
@param rdesc: Dictionary describing the server resource. This
is the dictionary returned by the server when a
reource URI is accessed.
@type rdesc: dict
"""
self.__server = server
try:
self.__name = rdesc[self.__NAME_KEY]
self.__description = rdesc[self.__DESC_KEY]
self.__uri = rdesc[self.__URI_KEY]
sdict = rdesc[self.__SERVICES_KEY]
# Parse the service dictionary and attempt to translate
# this to a dictionary of proper RestxAccessibleService objects.
self.__services = dict()
for sname, sdef in sdict.items():
self.__services[sname] = RestxAccessibleService(self, sname, sdef)
except KeyError, e:
raise RestxClientException("Server error: Expected key '%s' missing in definition of resource '%s'." % (str(e), self.__name))
def __str__(self):
"""
Return a string representation of this resource.
"""
buf = \
"""RestxResource: %s
Description: %s
URI: %s
Services:""" % (self.__name, self.__description, self.__uri)
if self.__services:
for sname, sdef in self.__services.items():
buf += "\n----------------\n" + str(sdef)
return buf
def get_name(self):
"""
Return the name of the resource.
@return: Name of resource.
@rtype: string
"""
return self.__name
def get_description(self):
"""
Return the description of the resource.
@return: Description of the resource.
@rtype: string
"""
return self.__description
def get_uri(self):
"""
Return the URI of the resource.
@return: URI of the resource.
@rtype: string
"""
return self.__uri
def get_server(self):
"""
Return the L{RestxServer} object of the server on which this resource lives.
@return: The server of this resource.
@rtype: L{RestxServer}
"""
return self.__server
def get_all_services(self):
"""
Return all services defined for this resource.
@return: Dictionary of all services.
@rtype: dict of L{RestxAccessibleService}
"""
return self.__services
def get_service(self, name):
"""
Return one service of this resource.
@param name: Name of the service.
@type name: string
@return: Dictionary of service definition.
@rtype: L{RestxAccessibleService}
"""
try:
return self.__services[name]
except KeyError:
raise RestxClientException("Service '%s' not defined." % name)
def delete(self):
"""
Delete the resource on the server.
"""
self.__server._send(self.__uri, method="DELETE", status=200)
#
# For convenience, we offer read access to several
# elements via properties.
#
name = property(get_name, None)
description = property(get_description, None)
uri = property(get_uri, None)
server = property(get_server, None)
| gpl-3.0 |
provaleks/o8 | addons/report_webkit/__init__.py | 382 | 1593 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import header
import company
import report_helper
import webkit_report
import ir_report
import wizard
import convert
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
openstack/vitrage | vitrage/evaluator/actions/priority_tools.py | 1 | 1972 | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.common.constants import VertexProperties as VProps
from vitrage.entity_graph.mappings.datasource_info_mapper \
import DEFAULT_INFO_MAPPER
from vitrage.evaluator.template_fields import TemplateFields
class BaselineTools(object):
@staticmethod
def get_score(action_info):
return 1 # no priorities
@classmethod
def get_extra_info(cls, action_specs):
return None
class RaiseAlarmTools(object):
def __init__(self, scores):
self.scores = scores
def get_score(self, action_info):
severity = action_info.specs.properties[TemplateFields.SEVERITY]
return self.scores.get(severity.upper(), 0)
@classmethod
def get_extra_info(cls, action_specs):
return action_specs.properties[TemplateFields.ALARM_NAME]
class SetStateTools(object):
def __init__(self, scores):
self.scores = scores
def get_score(self, action_info):
state = action_info.specs.properties[TemplateFields.STATE].upper()
target_resource = action_info.specs.targets[TemplateFields.TARGET]
target_vitrage_type = target_resource[VProps.VITRAGE_TYPE]
score_name = target_vitrage_type \
if target_vitrage_type in self.scores else DEFAULT_INFO_MAPPER
return self.scores[score_name].get(state, 0)
@classmethod
def get_extra_info(cls, action_specs):
return None
| apache-2.0 |
jasonmccampbell/scipy-refactor | scipy/sparse/linalg/dsolve/linsolve.py | 8 | 9161 | from warnings import warn
from numpy import asarray
from scipy.sparse import isspmatrix_csc, isspmatrix_csr, isspmatrix, \
SparseEfficiencyWarning, csc_matrix
import _superlu
noScikit = False
try:
import scikits.umfpack as umfpack
except ImportError:
import umfpack
noScikit = True
isUmfpack = hasattr( umfpack, 'UMFPACK_OK' )
useUmfpack = True
__all__ = [ 'use_solver', 'spsolve', 'splu', 'spilu', 'factorized' ]
def use_solver( **kwargs ):
"""
Valid keyword arguments with defaults (other ignored):
useUmfpack = True
assumeSortedIndices = False
The default sparse solver is umfpack when available. This can be changed by
passing useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass assumeSortedIndices=True
to gain some speed.
"""
if 'useUmfpack' in kwargs:
globals()['useUmfpack'] = kwargs['useUmfpack']
if isUmfpack:
umfpack.configure( **kwargs )
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b
"""
if isspmatrix( b ):
b = b.toarray()
if b.ndim > 1:
if max( b.shape ) == b.size:
b = b.squeeze()
else:
raise ValueError("rhs must be a vector (has shape %s)" % (b.shape,))
if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
A = csc_matrix(A)
warn('spsolve requires CSC or CSR matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("matrix must be square (has shape %s)" % ((M, N),))
if M != b.size:
raise ValueError("matrix - rhs size mismatch (%s - %s)"
% (A.shape, b.size))
use_umfpack = use_umfpack and useUmfpack
if isUmfpack and use_umfpack:
if noScikit:
warn( 'scipy.sparse.linalg.dsolve.umfpack will be removed,'
' install scikits.umfpack instead', DeprecationWarning )
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
b = asarray(b, dtype=A.dtype).reshape(-1)
family = {'d' : 'di', 'D' : 'zi'}
umf = umfpack.UmfpackContext( family[A.dtype.char] )
return umf.linsolve( umfpack.UMFPACK_A, A, b,
autoTranspose = True )
else:
if isspmatrix_csc(A):
flag = 1 # CSC format
elif isspmatrix_csr(A):
flag = 0 # CSR format
else:
A = csc_matrix(A)
flag = 1
b = asarray(b, dtype=A.dtype)
options = dict(ColPerm=permc_spec)
return _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr, b, flag,
options=options)[0]
def splu(A, permc_spec=None, diag_pivot_thresh=None,
drop_tol=None, relax=None, panel_size=None, options=dict()):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A
Sparse matrix to factorize. Should be in CSR or CSC format.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [SLU]_
drop_tol : float, optional
(deprecated) No effect.
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [SLU]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [SLU]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [SLU]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.dsolve._superlu.SciPyLUType
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
This function uses the SuperLU library.
References
----------
.. [SLU] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") #is this true?
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix A.
The resulting object is an approximation to the inverse of A.
Parameters
----------
A
Sparse matrix to factorize
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
milu : str, optional
Which version of modified ILU to use. (Choices: ``silu``,
``smilu_1``, ``smilu_2`` (default), ``smilu_3``.)
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.dsolve._superlu.SciPyLUType
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
To improve the better approximation to the inverse, you may need to
increase ``fill_factor`` AND decrease ``drop_tol``.
This function uses the SuperLU library.
References
----------
.. [SLU] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") #is this true?
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=True, options=_options)
def factorized( A ):
"""
Return a fuction for solving a sparse linear system, with A pre-factorized.
Example:
solve = factorized( A ) # Makes LU decomposition.
x1 = solve( rhs1 ) # Uses the LU factors.
x2 = solve( rhs2 ) # Uses again the LU factors.
"""
if isUmfpack and useUmfpack:
if noScikit:
warn( 'scipy.sparse.linalg.dsolve.umfpack will be removed,'
' install scikits.umfpack instead', DeprecationWarning )
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
family = {'d' : 'di', 'D' : 'zi'}
umf = umfpack.UmfpackContext( family[A.dtype.char] )
# Make LU decomposition.
umf.numeric( A )
def solve( b ):
return umf.solve( umfpack.UMFPACK_A, A, b, autoTranspose = True )
return solve
else:
return splu( A ).solve
| bsd-3-clause |
Skoda091/alfred-deepl | lib/urllib3/fields.py | 288 | 5943 | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if not six.PY3 and isinstance(value, six.text_type): # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit |
xidui/shadowsocks | shadowsocks/crypto/table.py | 1044 | 8108 | # !/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import string
import struct
import hashlib
__all__ = ['ciphers']
cached_tables = {}
if hasattr(string, 'maketrans'):
maketrans = string.maketrans
translate = string.translate
else:
maketrans = bytes.maketrans
translate = bytes.translate
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
a, b = struct.unpack('<QQ', s)
table = maketrans(b'', b'')
table = [table[i: i + 1] for i in range(len(table))]
for i in range(1, 1024):
table.sort(key=lambda x: int(a % (ord(x) + i)))
return table
def init_table(key):
if key not in cached_tables:
encrypt_table = b''.join(get_table(key))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
cached_tables[key] = [encrypt_table, decrypt_table]
return cached_tables[key]
class TableCipher(object):
def __init__(self, cipher_name, key, iv, op):
self._encrypt_table, self._decrypt_table = init_table(key)
self._op = op
def update(self, data):
if self._op:
return translate(data, self._encrypt_table)
else:
return translate(data, self._decrypt_table)
ciphers = {
'table': (0, 0, TableCipher)
}
def test_table_result():
from shadowsocks.common import ord
target1 = [
[60, 53, 84, 138, 217, 94, 88, 23, 39, 242, 219, 35, 12, 157, 165, 181,
255, 143, 83, 247, 162, 16, 31, 209, 190, 171, 115, 65, 38, 41, 21,
245, 236, 46, 121, 62, 166, 233, 44, 154, 153, 145, 230, 49, 128, 216,
173, 29, 241, 119, 64, 229, 194, 103, 131, 110, 26, 197, 218, 59, 204,
56, 27, 34, 141, 221, 149, 239, 192, 195, 24, 155, 170, 183, 11, 254,
213, 37, 137, 226, 75, 203, 55, 19, 72, 248, 22, 129, 33, 175, 178,
10, 198, 71, 77, 36, 113, 167, 48, 2, 117, 140, 142, 66, 199, 232,
243, 32, 123, 54, 51, 82, 57, 177, 87, 251, 150, 196, 133, 5, 253,
130, 8, 184, 14, 152, 231, 3, 186, 159, 76, 89, 228, 205, 156, 96,
163, 146, 18, 91, 132, 85, 80, 109, 172, 176, 105, 13, 50, 235, 127,
0, 189, 95, 98, 136, 250, 200, 108, 179, 211, 214, 106, 168, 78, 79,
74, 210, 30, 73, 201, 151, 208, 114, 101, 174, 92, 52, 120, 240, 15,
169, 220, 182, 81, 224, 43, 185, 40, 99, 180, 17, 212, 158, 42, 90, 9,
191, 45, 6, 25, 4, 222, 67, 126, 1, 116, 124, 206, 69, 61, 7, 68, 97,
202, 63, 244, 20, 28, 58, 93, 134, 104, 144, 227, 147, 102, 118, 135,
148, 47, 238, 86, 112, 122, 70, 107, 215, 100, 139, 223, 225, 164,
237, 111, 125, 207, 160, 187, 246, 234, 161, 188, 193, 249, 252],
[151, 205, 99, 127, 201, 119, 199, 211, 122, 196, 91, 74, 12, 147, 124,
180, 21, 191, 138, 83, 217, 30, 86, 7, 70, 200, 56, 62, 218, 47, 168,
22, 107, 88, 63, 11, 95, 77, 28, 8, 188, 29, 194, 186, 38, 198, 33,
230, 98, 43, 148, 110, 177, 1, 109, 82, 61, 112, 219, 59, 0, 210, 35,
215, 50, 27, 103, 203, 212, 209, 235, 93, 84, 169, 166, 80, 130, 94,
164, 165, 142, 184, 111, 18, 2, 141, 232, 114, 6, 131, 195, 139, 176,
220, 5, 153, 135, 213, 154, 189, 238, 174, 226, 53, 222, 146, 162,
236, 158, 143, 55, 244, 233, 96, 173, 26, 206, 100, 227, 49, 178, 34,
234, 108, 207, 245, 204, 150, 44, 87, 121, 54, 140, 118, 221, 228,
155, 78, 3, 239, 101, 64, 102, 17, 223, 41, 137, 225, 229, 66, 116,
171, 125, 40, 39, 71, 134, 13, 193, 129, 247, 251, 20, 136, 242, 14,
36, 97, 163, 181, 72, 25, 144, 46, 175, 89, 145, 113, 90, 159, 190,
15, 183, 73, 123, 187, 128, 248, 252, 152, 24, 197, 68, 253, 52, 69,
117, 57, 92, 104, 157, 170, 214, 81, 60, 133, 208, 246, 172, 23, 167,
160, 192, 76, 161, 237, 45, 4, 58, 10, 182, 65, 202, 240, 185, 241,
79, 224, 132, 51, 42, 126, 105, 37, 250, 149, 32, 243, 231, 67, 179,
48, 9, 106, 216, 31, 249, 19, 85, 254, 156, 115, 255, 120, 75, 16]]
target2 = [
[124, 30, 170, 247, 27, 127, 224, 59, 13, 22, 196, 76, 72, 154, 32,
209, 4, 2, 131, 62, 101, 51, 230, 9, 166, 11, 99, 80, 208, 112, 36,
248, 81, 102, 130, 88, 218, 38, 168, 15, 241, 228, 167, 117, 158, 41,
10, 180, 194, 50, 204, 243, 246, 251, 29, 198, 219, 210, 195, 21, 54,
91, 203, 221, 70, 57, 183, 17, 147, 49, 133, 65, 77, 55, 202, 122,
162, 169, 188, 200, 190, 125, 63, 244, 96, 31, 107, 106, 74, 143, 116,
148, 78, 46, 1, 137, 150, 110, 181, 56, 95, 139, 58, 3, 231, 66, 165,
142, 242, 43, 192, 157, 89, 175, 109, 220, 128, 0, 178, 42, 255, 20,
214, 185, 83, 160, 253, 7, 23, 92, 111, 153, 26, 226, 33, 176, 144,
18, 216, 212, 28, 151, 71, 206, 222, 182, 8, 174, 205, 201, 152, 240,
155, 108, 223, 104, 239, 98, 164, 211, 184, 34, 193, 14, 114, 187, 40,
254, 12, 67, 93, 217, 6, 94, 16, 19, 82, 86, 245, 24, 197, 134, 132,
138, 229, 121, 5, 235, 238, 85, 47, 103, 113, 179, 69, 250, 45, 135,
156, 25, 61, 75, 44, 146, 189, 84, 207, 172, 119, 53, 123, 186, 120,
171, 68, 227, 145, 136, 100, 90, 48, 79, 159, 149, 39, 213, 236, 126,
52, 60, 225, 199, 105, 73, 233, 252, 118, 215, 35, 115, 64, 37, 97,
129, 161, 177, 87, 237, 141, 173, 191, 163, 140, 234, 232, 249],
[117, 94, 17, 103, 16, 186, 172, 127, 146, 23, 46, 25, 168, 8, 163, 39,
174, 67, 137, 175, 121, 59, 9, 128, 179, 199, 132, 4, 140, 54, 1, 85,
14, 134, 161, 238, 30, 241, 37, 224, 166, 45, 119, 109, 202, 196, 93,
190, 220, 69, 49, 21, 228, 209, 60, 73, 99, 65, 102, 7, 229, 200, 19,
82, 240, 71, 105, 169, 214, 194, 64, 142, 12, 233, 88, 201, 11, 72,
92, 221, 27, 32, 176, 124, 205, 189, 177, 246, 35, 112, 219, 61, 129,
170, 173, 100, 84, 242, 157, 26, 218, 20, 33, 191, 155, 232, 87, 86,
153, 114, 97, 130, 29, 192, 164, 239, 90, 43, 236, 208, 212, 185, 75,
210, 0, 81, 227, 5, 116, 243, 34, 18, 182, 70, 181, 197, 217, 95, 183,
101, 252, 248, 107, 89, 136, 216, 203, 68, 91, 223, 96, 141, 150, 131,
13, 152, 198, 111, 44, 222, 125, 244, 76, 251, 158, 106, 24, 42, 38,
77, 2, 213, 207, 249, 147, 113, 135, 245, 118, 193, 47, 98, 145, 66,
160, 123, 211, 165, 78, 204, 80, 250, 110, 162, 48, 58, 10, 180, 55,
231, 79, 149, 74, 62, 50, 148, 143, 206, 28, 15, 57, 159, 139, 225,
122, 237, 138, 171, 36, 56, 115, 63, 144, 154, 6, 230, 133, 215, 41,
184, 22, 104, 254, 234, 253, 187, 226, 247, 188, 156, 151, 40, 108,
51, 83, 178, 52, 3, 31, 255, 195, 53, 235, 126, 167, 120]]
encrypt_table = b''.join(get_table(b'foobar!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target1[0][i] == ord(encrypt_table[i]))
assert (target1[1][i] == ord(decrypt_table[i]))
encrypt_table = b''.join(get_table(b'barfoo!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target2[0][i] == ord(encrypt_table[i]))
assert (target2[1][i] == ord(decrypt_table[i]))
def test_encryption():
from shadowsocks.crypto import util
cipher = TableCipher('table', b'test', b'', 1)
decipher = TableCipher('table', b'test', b'', 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_table_result()
test_encryption()
| apache-2.0 |
PennyQ/glue-3d-viewer | glue_vispy_viewers/extern/vispy/scene/cameras/turntable.py | 20 | 5029 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .perspective import Base3DRotationCamera
class TurntableCamera(Base3DRotationCamera):
""" 3D camera class that orbits around a center point while
maintaining a view on a center point.
For this camera, the ``scale_factor`` indicates the zoom level, and
the ``center`` indicates the position to put at the center of the
view.
Parameters
----------
fov : float
Field of view. Zero (default) means orthographic projection.
elevation : float
Elevation angle in degrees. Positive angles place the camera
above the cente point, negative angles place the camera below
the center point.
azimuth : float
Azimuth angle in degrees. Zero degrees places the camera on the
positive x-axis, pointing in the negative x direction.
roll : float
Roll angle in degrees
distance : float | None
The distance of the camera from the rotation point (only makes sense
if fov > 0). If None (default) the distance is determined from the
scale_factor and fov.
**kwargs : dict
Keyword arguments to pass to `BaseCamera`.
Notes
-----
Interaction:
* LMB: orbits the view around its center point.
* RMB or scroll: change scale_factor (i.e. zoom level)
* SHIFT + LMB: translate the center point
* SHIFT + RMB: change FOV
"""
_state_props = Base3DRotationCamera._state_props + ('elevation',
'azimuth', 'roll')
def __init__(self, fov=0.0, elevation=30.0, azimuth=30.0, roll=0.0,
distance=None, **kwargs):
super(TurntableCamera, self).__init__(fov=fov, **kwargs)
# Set camera attributes
self.azimuth = azimuth
self.elevation = elevation
self.roll = roll # interaction not implemented yet
self.distance = distance # None means auto-distance
@property
def elevation(self):
""" The angle of the camera in degrees above the horizontal (x, z)
plane.
"""
return self._elevation
@elevation.setter
def elevation(self, elev):
elev = float(elev)
self._elevation = min(90, max(-90, elev))
self.view_changed()
@property
def azimuth(self):
""" The angle of the camera in degrees around the y axis. An angle of
0 places the camera within the (y, z) plane.
"""
return self._azimuth
@azimuth.setter
def azimuth(self, azim):
azim = float(azim)
while azim < -180:
azim += 360
while azim > 180:
azim -= 360
self._azimuth = azim
self.view_changed()
@property
def roll(self):
""" The angle of the camera in degrees around the z axis. An angle of
0 places puts the camera upright.
"""
return self._roll
@roll.setter
def roll(self, roll):
roll = float(roll)
while roll < -180:
roll += 360
while roll > 180:
roll -= 360
self._roll = roll
self.view_changed()
def orbit(self, azim, elev):
""" Orbits the camera around the center position.
Parameters
----------
azim : float
Angle in degrees to rotate horizontally around the center point.
elev : float
Angle in degrees to rotate vertically around the center point.
"""
self.azimuth += azim
self.elevation = np.clip(self.elevation + elev, -90, 90)
self.view_changed()
def _update_rotation(self, event):
"""Update rotation parmeters based on mouse movement"""
p1 = event.mouse_event.press_event.pos
p2 = event.mouse_event.pos
if self._event_value is None:
self._event_value = self.azimuth, self.elevation
self.azimuth = self._event_value[0] - (p2 - p1)[0] * 0.5
self.elevation = self._event_value[1] + (p2 - p1)[1] * 0.5
def _rotate_tr(self):
"""Rotate the transformation matrix based on camera parameters"""
up, forward, right = self._get_dim_vectors()
self.transform.rotate(self.elevation, -right)
self.transform.rotate(self.azimuth, up)
def _dist_to_trans(self, dist):
"""Convert mouse x, y movement into x, y, z translations"""
rae = np.array([self.roll, self.azimuth, self.elevation]) * np.pi / 180
sro, saz, sel = np.sin(rae)
cro, caz, cel = np.cos(rae)
dx = (+ dist[0] * (cro * caz + sro * sel * saz)
+ dist[1] * (sro * caz - cro * sel * saz))
dy = (+ dist[0] * (cro * saz - sro * sel * caz)
+ dist[1] * (sro * saz + cro * sel * caz))
dz = (- dist[0] * sro * cel + dist[1] * cro * cel)
return dx, dy, dz
| bsd-2-clause |
benesch/pip | pip/_vendor/requests/packages/urllib3/util/connection.py | 365 | 4744 | from __future__ import absolute_import
import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
# One additional modification is that we avoid binding to IPv6 servers
# discovered in DNS if the system doesn't have IPv6 functionality.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
# Using the value from allowed_gai_family() in the context of getaddrinfo lets
# us select whether to work with IPv4 DNS records, IPv6 records, or both.
# The original create_connection function always returns all records.
family = allowed_gai_family()
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
def allowed_gai_family():
"""This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records."""
family = socket.AF_INET
if HAS_IPV6:
family = socket.AF_UNSPEC
return family
def _has_ipv6(host):
""" Returns True if the system can bind an IPv6 address. """
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/shazow/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except Exception:
pass
if sock:
sock.close()
return has_ipv6
HAS_IPV6 = _has_ipv6('::1')
| mit |
kevinlondon/httpie | httpie/output/formatters/xml.py | 51 | 1959 | from __future__ import absolute_import
import re
from xml.etree import ElementTree
from httpie.plugins import FormatterPlugin
DECLARATION_RE = re.compile('<\?xml[^\n]+?\?>', flags=re.I)
DOCTYPE_RE = re.compile('<!DOCTYPE[^\n]+?>', flags=re.I)
DEFAULT_INDENT = 4
def indent(elem, indent_text=' ' * DEFAULT_INDENT):
"""
In-place prettyprint formatter
C.f. http://effbot.org/zone/element-lib.htm#prettyprint
"""
def _indent(elem, level=0):
i = "\n" + level * indent_text
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + indent_text
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
return _indent(elem)
class XMLFormatter(FormatterPlugin):
# TODO: tests
def format_body(self, body, mime):
if 'xml' in mime:
# FIXME: orig NS names get forgotten during the conversion, etc.
try:
root = ElementTree.fromstring(body.encode('utf8'))
except ElementTree.ParseError:
# Ignore invalid XML errors (skips attempting to pretty print)
pass
else:
indent(root)
# Use the original declaration
declaration = DECLARATION_RE.match(body)
doctype = DOCTYPE_RE.match(body)
body = ElementTree.tostring(root, encoding='utf-8')\
.decode('utf8')
if doctype:
body = '%s\n%s' % (doctype.group(0), body)
if declaration:
body = '%s\n%s' % (declaration.group(0), body)
return body
| bsd-3-clause |
tmenjo/cinder-2015.1.1 | cinder/flow_utils.py | 6 | 2961 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
# For more information please visit: https://wiki.openstack.org/wiki/TaskFlow
from taskflow.listeners import base
from taskflow.listeners import logging as logging_listener
from taskflow import task
from cinder import exception
LOG = logging.getLogger(__name__)
def _make_task_name(cls, addons=None):
"""Makes a pretty name for a task class."""
base_name = ".".join([cls.__module__, cls.__name__])
extra = ''
if addons:
extra = ';%s' % (", ".join([str(a) for a in addons]))
return base_name + extra
class CinderTask(task.Task):
"""The root task class for all cinder tasks.
It automatically names the given task using the module and class that
implement the given task as the task name.
"""
def __init__(self, addons=None, **kwargs):
super(CinderTask, self).__init__(_make_task_name(self.__class__,
addons),
**kwargs)
class DynamicLogListener(logging_listener.DynamicLoggingListener):
"""This is used to attach to taskflow engines while they are running.
It provides a bunch of useful features that expose the actions happening
inside a taskflow engine, which can be useful for developers for debugging,
for operations folks for monitoring and tracking of the resource actions
and more...
"""
#: Exception is an excepted case, don't include traceback in log if fails.
_NO_TRACE_EXCEPTIONS = (exception.InvalidInput, exception.QuotaError)
def __init__(self, engine,
task_listen_for=base.DEFAULT_LISTEN_FOR,
flow_listen_for=base.DEFAULT_LISTEN_FOR,
retry_listen_for=base.DEFAULT_LISTEN_FOR,
logger=LOG):
super(DynamicLogListener, self).__init__(
engine,
task_listen_for=task_listen_for,
flow_listen_for=flow_listen_for,
retry_listen_for=retry_listen_for,
log=logger)
def _format_failure(self, fail):
if fail.check(*self._NO_TRACE_EXCEPTIONS) is not None:
exc_info = None
exc_details = '%s%s' % (os.linesep, fail.pformat(traceback=False))
return (exc_info, exc_details)
else:
return super(DynamicLogListener, self)._format_failure(fail)
| apache-2.0 |
Maistho/CouchPotatoServer | couchpotato/core/media/_base/media/main.py | 65 | 21493 | from datetime import timedelta
import time
import traceback
from string import ascii_lowercase
from CodernityDB.database import RecordNotFound, RecordDeleted
from couchpotato import tryInt, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString, getImdb, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex, MediaTagIndex
log = CPLog(__name__)
class MediaPlugin(MediaBase):
_database = {
'media': MediaIndex,
'media_search_title': TitleSearchIndex,
'media_status': MediaStatusIndex,
'media_tag': MediaTagIndex,
'media_by_type': MediaTypeIndex,
'media_title': TitleIndex,
'media_startswith': StartsWithIndex,
'media_children': MediaChildrenIndex,
}
def __init__(self):
addApiView('media.refresh', self.refresh, docs = {
'desc': 'Refresh a any media type by ID',
'params': {
'id': {'desc': 'Movie, Show, Season or Episode ID(s) you want to refresh.', 'type': 'int (comma separated)'},
}
})
addApiView('media.list', self.listView, docs = {
'desc': 'List media',
'params': {
'type': {'type': 'string', 'desc': 'Media type to filter on.'},
'status': {'type': 'array or csv', 'desc': 'Filter media by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter media by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the media list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all media starting with the letter "a"'},
'search': {'desc': 'Search media title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any media returned or not,
'media': array, media found,
}"""}
})
addApiView('media.get', self.getView, docs = {
'desc': 'Get media by id',
'params': {
'id': {'desc': 'The id of the media'},
}
})
addApiView('media.delete', self.deleteView, docs = {
'desc': 'Delete a media from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete media from this page', 'type': 'string: all (default), wanted, manage'},
}
})
addApiView('media.available_chars', self.charView)
addEvent('app.load', self.addSingleRefreshView, priority = 100)
addEvent('app.load', self.addSingleListView, priority = 100)
addEvent('app.load', self.addSingleCharView, priority = 100)
addEvent('app.load', self.addSingleDeleteView, priority = 100)
addEvent('app.load', self.cleanupFaults)
addEvent('media.get', self.get)
addEvent('media.with_status', self.withStatus)
addEvent('media.with_identifiers', self.withIdentifiers)
addEvent('media.list', self.list)
addEvent('media.delete', self.delete)
addEvent('media.restatus', self.restatus)
addEvent('media.tag', self.tag)
addEvent('media.untag', self.unTag)
# Wrongly tagged media files
def cleanupFaults(self):
medias = fireEvent('media.with_status', 'ignored', single = True) or []
db = get_db()
for media in medias:
try:
media['status'] = 'done'
db.update(media)
except:
pass
def refresh(self, id = '', **kwargs):
handlers = []
ids = splitString(id)
for x in ids:
refresh_handler = self.createRefreshHandler(x)
if refresh_handler:
handlers.append(refresh_handler)
fireEvent('notify.frontend', type = 'media.busy', data = {'_id': ids})
fireEventAsync('schedule.queue', handlers = handlers)
return {
'success': True,
}
def createRefreshHandler(self, media_id):
try:
media = get_db().get('id', media_id)
event = '%s.update' % media.get('type')
def handler():
fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id))
return handler
except:
log.error('Refresh handler for non existing media: %s', traceback.format_exc())
def addSingleRefreshView(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.refresh' % media_type, self.refresh)
def get(self, media_id):
try:
db = get_db()
imdb_id = getImdb(str(media_id))
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if media:
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
return media
except (RecordNotFound, RecordDeleted):
log.error('Media with id "%s" not found', media_id)
except:
raise
def getView(self, id = None, **kwargs):
media = self.get(id) if id else None
return {
'success': media is not None,
'media': media,
}
def withStatus(self, status, types = None, with_doc = True):
db = get_db()
if types and not isinstance(types, (list, tuple)):
types = [types]
status = list(status if isinstance(status, (list, tuple)) else [status])
for s in status:
for ms in db.get_many('media_status', s):
if with_doc:
try:
doc = db.get('id', ms['_id'])
if types and doc.get('type') not in types:
continue
yield doc
except (RecordDeleted, RecordNotFound):
log.debug('Record not found, skipping: %s', ms['_id'])
except (ValueError, EOFError):
fireEvent('database.delete_corrupted', ms.get('_id'), traceback_error = traceback.format_exc(0))
else:
yield ms
def withIdentifiers(self, identifiers, with_doc = False):
db = get_db()
for x in identifiers:
try:
return db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
except:
pass
log.debug('No media found with identifiers: %s', identifiers)
return False
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, with_tags = None, starts_with = None, search = None):
db = get_db()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
if with_tags and not isinstance(with_tags, (list, tuple)):
with_tags = [with_tags]
# query media ids
if types:
all_media_ids = set()
for media_type in types:
all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
else:
all_media_ids = set([x['_id'] for x in db.all('media')])
media_ids = list(all_media_ids)
filter_by = {}
# Filter on movie status
if status and len(status) > 0:
filter_by['media_status'] = set()
for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
filter_by['media_status'].add(media_status.get('_id'))
# Filter on release status
if release_status and len(release_status) > 0:
filter_by['release_status'] = set()
for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
filter_by['release_status'].add(release_status.get('media_id'))
# Add search filters
if starts_with:
starts_with = toUnicode(starts_with.lower())[0]
starts_with = starts_with if starts_with in ascii_lowercase else '#'
filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)]
# Add tag filter
if with_tags:
filter_by['with_tags'] = set()
for tag in with_tags:
for x in db.get_many('media_tag', tag):
filter_by['with_tags'].add(x['_id'])
# Filter with search query
if search:
filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)]
if status_or and 'media_status' in filter_by and 'release_status' in filter_by:
filter_by['status'] = list(filter_by['media_status']) + list(filter_by['release_status'])
del filter_by['media_status']
del filter_by['release_status']
# Filter by combining ids
for x in filter_by:
media_ids = [n for n in media_ids if n in filter_by[x]]
total_count = len(media_ids)
if total_count == 0:
return 0, []
offset = 0
limit = -1
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = tryInt(splt[0])
offset = tryInt(0 if len(splt) is 1 else splt[1])
# List movies based on title order
medias = []
for m in db.all('media_title'):
media_id = m['_id']
if media_id not in media_ids: continue
if offset > 0:
offset -= 1
continue
media = fireEvent('media.get', media_id, single = True)
# Skip if no media has been found
if not media:
continue
# Merge releases with movie dict
medias.append(media)
# remove from media ids
media_ids.remove(media_id)
if len(media_ids) == 0 or len(medias) == limit: break
return total_count, medias
def listView(self, **kwargs):
total_movies, movies = self.list(
types = splitString(kwargs.get('type')),
status = splitString(kwargs.get('status')),
release_status = splitString(kwargs.get('release_status')),
status_or = kwargs.get('status_or') is not None,
limit_offset = kwargs.get('limit_offset'),
with_tags = splitString(kwargs.get('with_tags')),
starts_with = kwargs.get('starts_with'),
search = kwargs.get('search')
)
return {
'success': True,
'empty': len(movies) == 0,
'total': total_movies,
'movies': movies,
}
def addSingleListView(self):
for media_type in fireEvent('media.types', merge = True):
tempList = lambda *args, **kwargs : self.listView(type = media_type, **kwargs)
addApiView('%s.list' % media_type, tempList, docs = {
'desc': 'List media',
'params': {
'status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the ' + media_type + ' list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all ' + media_type + 's starting with the letter "a"'},
'search': {'desc': 'Search ' + media_type + ' title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any """ + media_type + """s returned or not,
'media': array, media found,
}"""}
})
def availableChars(self, types = None, status = None, release_status = None):
db = get_db()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query media ids
if types:
all_media_ids = set()
for media_type in types:
all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
else:
all_media_ids = set([x['_id'] for x in db.all('media')])
media_ids = all_media_ids
filter_by = {}
# Filter on movie status
if status and len(status) > 0:
filter_by['media_status'] = set()
for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
filter_by['media_status'].add(media_status.get('_id'))
# Filter on release status
if release_status and len(release_status) > 0:
filter_by['release_status'] = set()
for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
filter_by['release_status'].add(release_status.get('media_id'))
# Filter by combining ids
for x in filter_by:
media_ids = [n for n in media_ids if n in filter_by[x]]
chars = set()
for x in db.all('media_startswith'):
if x['_id'] in media_ids:
chars.add(x['key'])
if len(chars) == 27:
break
return list(chars)
def charView(self, **kwargs):
type = splitString(kwargs.get('type', 'movie'))
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
chars = self.availableChars(type, status, release_status)
return {
'success': True,
'empty': len(chars) == 0,
'chars': chars,
}
def addSingleCharView(self):
for media_type in fireEvent('media.types', merge = True):
tempChar = lambda *args, **kwargs : self.charView(type = media_type, **kwargs)
addApiView('%s.available_chars' % media_type, tempChar)
def delete(self, media_id, delete_from = None):
try:
db = get_db()
media = db.get('id', media_id)
if media:
deleted = False
media_releases = fireEvent('release.for_media', media['_id'], single = True)
if delete_from == 'all':
# Delete connected releases
for release in media_releases:
db.delete(release)
db.delete(media)
deleted = True
else:
total_releases = len(media_releases)
total_deleted = 0
new_media_status = None
for release in media_releases:
if delete_from in ['wanted', 'snatched', 'late']:
if release.get('status') != 'done':
db.delete(release)
total_deleted += 1
new_media_status = 'done'
elif delete_from == 'manage':
if release.get('status') == 'done' or media.get('status') == 'done':
db.delete(release)
total_deleted += 1
if (total_releases == total_deleted) or (total_releases == 0 and not new_media_status) or (not new_media_status and delete_from == 'late'):
db.delete(media)
deleted = True
elif new_media_status:
media['status'] = new_media_status
# Remove profile (no use for in manage)
if new_media_status == 'done':
media['profile_id'] = None
db.update(media)
fireEvent('media.untag', media['_id'], 'recent', single = True)
else:
fireEvent('media.restatus', media.get('_id'), single = True)
if deleted:
fireEvent('notify.frontend', type = 'media.deleted', data = media)
except:
log.error('Failed deleting media: %s', traceback.format_exc())
return True
def deleteView(self, id = '', **kwargs):
ids = splitString(id)
for media_id in ids:
self.delete(media_id, delete_from = kwargs.get('delete_from', 'all'))
return {
'success': True,
}
def addSingleDeleteView(self):
for media_type in fireEvent('media.types', merge = True):
tempDelete = lambda *args, **kwargs : self.deleteView(type = media_type, **kwargs)
addApiView('%s.delete' % media_type, tempDelete, docs = {
'desc': 'Delete a ' + media_type + ' from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete ' + media_type + ' from this page', 'type': 'string: all (default), wanted, manage'},
}
})
def restatus(self, media_id, tag_recent = True, allowed_restatus = None):
try:
db = get_db()
m = db.get('id', media_id)
previous_status = m['status']
log.debug('Changing status for %s', getTitle(m))
if not m['profile_id']:
m['status'] = 'done'
else:
m['status'] = 'active'
try:
profile = db.get('id', m['profile_id'])
media_releases = fireEvent('release.for_media', m['_id'], single = True)
done_releases = [release for release in media_releases if release.get('status') == 'done']
if done_releases:
# Check if we are finished with the media
for release in done_releases:
if fireEvent('quality.isfinish', {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, timedelta(seconds = time.time() - release['last_edit']).days, single = True):
m['status'] = 'done'
break
elif previous_status == 'done':
m['status'] = 'done'
except RecordNotFound:
log.debug('Failed restatus, keeping previous: %s', traceback.format_exc())
m['status'] = previous_status
# Only update when status has changed
if previous_status != m['status'] and (not allowed_restatus or m['status'] in allowed_restatus):
db.update(m)
# Tag media as recent
if tag_recent:
self.tag(media_id, 'recent', update_edited = True)
return m['status']
except:
log.error('Failed restatus: %s', traceback.format_exc())
def tag(self, media_id, tag, update_edited = False):
try:
db = get_db()
m = db.get('id', media_id)
if update_edited:
m['last_edit'] = int(time.time())
tags = m.get('tags') or []
if tag not in tags:
tags.append(tag)
m['tags'] = tags
db.update(m)
return True
except:
log.error('Failed tagging: %s', traceback.format_exc())
return False
def unTag(self, media_id, tag):
try:
db = get_db()
m = db.get('id', media_id)
tags = m.get('tags') or []
if tag in tags:
new_tags = list(set(tags))
new_tags.remove(tag)
m['tags'] = new_tags
db.update(m)
return True
except:
log.error('Failed untagging: %s', traceback.format_exc())
return False
| gpl-3.0 |
TeamEOS/external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/__init__.py | 658 | 4406 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket opening handshake processor. This class try to apply available
opening handshake processors for each protocol version until a connection is
successfully established.
"""
import logging
from mod_pywebsocket import common
from mod_pywebsocket.handshake import hybi00
from mod_pywebsocket.handshake import hybi
# Export AbortedByUserException, HandshakeException, and VersionException
# symbol from this module.
from mod_pywebsocket.handshake._base import AbortedByUserException
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import VersionException
_LOGGER = logging.getLogger(__name__)
def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
"""Performs WebSocket handshake.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
allowDraft75: obsolete argument. ignored.
strict: obsolete argument. ignored.
Handshaker will add attributes such as ws_resource in performing
handshake.
"""
_LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
# To print mimetools.Message as escaped one-line string, we converts
# headers_in to dict object. Without conversion, if we use %r, it just
# prints the type and address, and if we use %s, it prints the original
# header string as multiple lines.
#
# Both mimetools.Message and MpTable_Type of mod_python can be
# converted to dict.
#
# mimetools.Message.__str__ returns the original header string.
# dict(mimetools.Message object) returns the map from header names to
# header values. While MpTable_Type doesn't have such __str__ but just
# __repr__ which formats itself as well as dictionary object.
_LOGGER.debug(
'Client\'s opening handshake headers: %r', dict(request.headers_in))
handshakers = []
handshakers.append(
('RFC 6455', hybi.Handshaker(request, dispatcher)))
handshakers.append(
('HyBi 00', hybi00.Handshaker(request, dispatcher)))
for name, handshaker in handshakers:
_LOGGER.debug('Trying protocol version %s', name)
try:
handshaker.do_handshake()
_LOGGER.info('Established (%s protocol)', name)
return
except HandshakeException, e:
_LOGGER.debug(
'Failed to complete opening handshake as %s protocol: %r',
name, e)
if e.status:
raise e
except AbortedByUserException, e:
raise
except VersionException, e:
raise
# TODO(toyoshim): Add a test to cover the case all handshakers fail.
raise HandshakeException(
'Failed to complete opening handshake for all available protocols',
status=common.HTTP_STATUS_BAD_REQUEST)
# vi:sts=4 sw=4 et
| bsd-3-clause |
openstack/python-magnumclient | magnumclient/common/utils.py | 1 | 11677 | # -*- coding: utf-8 -*-
#
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography import x509
from cryptography.x509.oid import NameOID
from oslo_serialization import base64
from oslo_serialization import jsonutils
from magnumclient import exceptions as exc
from magnumclient.i18n import _
def common_filters(marker=None, limit=None, sort_key=None, sort_dir=None):
"""Generate common filters for any list request.
:param marker: entity ID from which to start returning entities.
:param limit: maximum number of entities to return.
:param sort_key: field to use for sorting.
:param sort_dir: direction of sorting: 'asc' or 'desc'.
:returns: list of string filters.
"""
filters = []
if isinstance(limit, int):
filters.append('limit=%s' % limit)
if marker is not None:
filters.append('marker=%s' % marker)
if sort_key is not None:
filters.append('sort_key=%s' % sort_key)
if sort_dir is not None:
filters.append('sort_dir=%s' % sort_dir)
return filters
def split_and_deserialize(string):
"""Split and try to JSON deserialize a string.
Gets a string with the KEY=VALUE format, split it (using '=' as the
separator) and try to JSON deserialize the VALUE.
:returns: A tuple of (key, value).
"""
try:
key, value = string.split("=", 1)
except ValueError:
raise exc.CommandError(_('Attributes must be a list of '
'PATH=VALUE not "%s"') % string)
try:
value = jsonutils.loads(value)
except ValueError:
pass
return (key, value)
def args_array_to_patch(op, attributes):
patch = []
for attr in attributes:
# Sanitize
if not attr.startswith('/'):
attr = '/' + attr
if op in ['add', 'replace']:
path, value = split_and_deserialize(attr)
if path == "/labels" or path == "/health_status_reason":
a = []
a.append(value)
value = str(handle_labels(a))
patch.append({'op': op, 'path': path, 'value': value})
else:
patch.append({'op': op, 'path': path, 'value': value})
elif op == "remove":
# For remove only the key is needed
patch.append({'op': op, 'path': attr})
else:
raise exc.CommandError(_('Unknown PATCH operation: %s') % op)
return patch
def handle_labels(labels):
labels = format_labels(labels)
if 'mesos_slave_executor_env_file' in labels:
environment_variables_data = handle_json_from_file(
labels['mesos_slave_executor_env_file'])
labels['mesos_slave_executor_env_variables'] = jsonutils.dumps(
environment_variables_data)
return labels
def format_labels(lbls, parse_comma=True):
'''Reformat labels into dict of format expected by the API.'''
if not lbls:
return {}
if parse_comma:
# expect multiple invocations of --labels but fall back
# to either , or ; delimited if only one --labels is specified
if len(lbls) == 1 and lbls[0].count('=') > 1:
lbls = lbls[0].replace(';', ',').split(',')
labels = {}
for lbl in lbls:
try:
(k, v) = lbl.split(('='), 1)
except ValueError:
raise exc.CommandError(_('labels must be a list of KEY=VALUE '
'not %s') % lbl)
if k not in labels:
labels[k] = v
else:
labels[k] += ",%s" % v
return labels
def print_list_field(field):
return lambda obj: ', '.join(getattr(obj, field))
def handle_json_from_file(json_arg):
"""Attempts to read JSON file by the file url.
:param json_arg: May be a file name containing the JSON.
:returns: A list or dictionary parsed from JSON.
"""
try:
with open(json_arg, 'r') as f:
json_arg = f.read().strip()
json_arg = jsonutils.loads(json_arg)
except IOError as e:
err = _("Cannot get JSON from file '%(file)s'. "
"Error: %(err)s") % {'err': e, 'file': json_arg}
raise exc.InvalidAttribute(err)
except ValueError as e:
err = (_("For JSON: '%(string)s', error: '%(err)s'") %
{'err': e, 'string': json_arg})
raise exc.InvalidAttribute(err)
return json_arg
def config_cluster(cluster, cluster_template, cfg_dir, force=False,
certs=None, use_keystone=False):
"""Return and write configuration for the given cluster."""
if cluster_template.coe == 'kubernetes':
return _config_cluster_kubernetes(cluster, cluster_template, cfg_dir,
force, certs, use_keystone)
elif (cluster_template.coe == 'swarm'
or cluster_template.coe == 'swarm-mode'):
return _config_cluster_swarm(cluster, cluster_template, cfg_dir,
force, certs)
def _config_cluster_kubernetes(cluster, cluster_template, cfg_dir,
force=False, certs=None, use_keystone=False):
"""Return and write configuration for the given kubernetes cluster."""
cfg_file = "%s/config" % cfg_dir
if cluster_template.tls_disabled or certs is None:
cfg = ("apiVersion: v1\n"
"clusters:\n"
"- cluster:\n"
" server: %(api_address)s\n"
" name: %(name)s\n"
"contexts:\n"
"- context:\n"
" cluster: %(name)s\n"
" user: %(name)s\n"
" name: %(name)s\n"
"current-context: %(name)s\n"
"kind: Config\n"
"preferences: {}\n"
"users:\n"
"- name: %(name)s'\n"
% {'name': cluster.name, 'api_address': cluster.api_address})
else:
if not use_keystone:
cfg = ("apiVersion: v1\n"
"clusters:\n"
"- cluster:\n"
" certificate-authority-data: %(ca)s\n"
" server: %(api_address)s\n"
" name: %(name)s\n"
"contexts:\n"
"- context:\n"
" cluster: %(name)s\n"
" user: admin\n"
" name: default\n"
"current-context: default\n"
"kind: Config\n"
"preferences: {}\n"
"users:\n"
"- name: admin\n"
" user:\n"
" client-certificate-data: %(cert)s\n"
" client-key-data: %(key)s\n"
% {'name': cluster.name,
'api_address': cluster.api_address,
'key': base64.encode_as_text(certs['key']),
'cert': base64.encode_as_text(certs['cert']),
'ca': base64.encode_as_text(certs['ca'])})
else:
cfg = ("apiVersion: v1\n"
"clusters:\n"
"- cluster:\n"
" certificate-authority-data: %(ca)s\n"
" server: %(api_address)s\n"
" name: %(name)s\n"
"contexts:\n"
"- context:\n"
" cluster: %(name)s\n"
" user: openstackuser\n"
" name: openstackuser@kubernetes\n"
"current-context: openstackuser@kubernetes\n"
"kind: Config\n"
"preferences: {}\n"
"users:\n"
"- name: openstackuser\n"
" user:\n"
" exec:\n"
" command: /bin/bash\n"
" apiVersion: client.authentication.k8s.io/v1alpha1\n"
" args:\n"
" - -c\n"
" - >\n"
" if [ -z ${OS_TOKEN} ]; then\n"
" echo 'Error: Missing OpenStack credential from environment variable $OS_TOKEN' > /dev/stderr\n" # noqa
" exit 1\n"
" else\n"
" echo '{ \"apiVersion\": \"client.authentication.k8s.io/v1alpha1\", \"kind\": \"ExecCredential\", \"status\": { \"token\": \"'\"${OS_TOKEN}\"'\"}}'\n" # noqa
" fi\n"
% {'name': cluster.name,
'api_address': cluster.api_address,
'ca': base64.encode_as_text(certs['ca'])})
if os.path.exists(cfg_file) and not force:
raise exc.CommandError("File %s exists, aborting." % cfg_file)
else:
f = open(cfg_file, "w")
f.write(cfg)
f.close()
if 'csh' in os.environ['SHELL']:
return "setenv KUBECONFIG %s\n" % cfg_file
else:
return "export KUBECONFIG=%s\n" % cfg_file
def _config_cluster_swarm(cluster, cluster_template, cfg_dir,
force=False, certs=None):
"""Return and write configuration for the given swarm cluster."""
tls = "" if cluster_template.tls_disabled else True
if 'csh' in os.environ['SHELL']:
result = ("setenv DOCKER_HOST %(docker_host)s\n"
"setenv DOCKER_CERT_PATH %(cfg_dir)s\n"
"setenv DOCKER_TLS_VERIFY %(tls)s\n"
% {'docker_host': cluster.api_address,
'cfg_dir': cfg_dir,
'tls': tls}
)
else:
result = ("export DOCKER_HOST=%(docker_host)s\n"
"export DOCKER_CERT_PATH=%(cfg_dir)s\n"
"export DOCKER_TLS_VERIFY=%(tls)s\n"
% {'docker_host': cluster.api_address,
'cfg_dir': cfg_dir,
'tls': tls}
)
return result
def generate_csr_and_key():
"""Return a dict with a new csr and key."""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend())
csr = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"admin"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"system:masters")
])).sign(key, hashes.SHA256(), default_backend())
result = {
'csr': csr.public_bytes(
encoding=serialization.Encoding.PEM).decode("utf-8"),
'key': key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()).decode("utf-8"),
}
return result
| apache-2.0 |
bgxavier/nova | nova/objects/compute_node.py | 8 | 15378 | # Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import utils
# TODO(berrange): Remove NovaObjectDictCompat
class ComputeNode(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added get_by_service_id()
# Version 1.2: String attributes updated to support unicode
# Version 1.3: Added stats field
# Version 1.4: Added host ip field
# Version 1.5: Added numa_topology field
# Version 1.6: Added supported_hv_specs
# Version 1.7: Added host field
# Version 1.8: Added get_by_host_and_nodename()
# Version 1.9: Added pci_device_pools
# Version 1.10: Added get_first_node_by_host_for_old_compat()
# Version 1.11: PciDevicePoolList version 1.1
VERSION = '1.11'
fields = {
'id': fields.IntegerField(read_only=True),
'service_id': fields.IntegerField(),
'host': fields.StringField(nullable=True),
'vcpus': fields.IntegerField(),
'memory_mb': fields.IntegerField(),
'local_gb': fields.IntegerField(),
'vcpus_used': fields.IntegerField(),
'memory_mb_used': fields.IntegerField(),
'local_gb_used': fields.IntegerField(),
'hypervisor_type': fields.StringField(),
'hypervisor_version': fields.IntegerField(),
'hypervisor_hostname': fields.StringField(nullable=True),
'free_ram_mb': fields.IntegerField(nullable=True),
'free_disk_gb': fields.IntegerField(nullable=True),
'current_workload': fields.IntegerField(nullable=True),
'running_vms': fields.IntegerField(nullable=True),
'cpu_info': fields.StringField(nullable=True),
'disk_available_least': fields.IntegerField(nullable=True),
'metrics': fields.StringField(nullable=True),
'stats': fields.DictOfNullableStringsField(nullable=True),
'host_ip': fields.IPAddressField(nullable=True),
'numa_topology': fields.StringField(nullable=True),
# NOTE(pmurray): the supported_hv_specs field maps to the
# supported_instances field in the database
'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
# NOTE(pmurray): the pci_device_pools field maps to the
# pci_stats field in the database
'pci_device_pools': fields.ObjectField('PciDevicePoolList',
nullable=True),
}
obj_relationships = {
'pci_device_pools': [('1.9', '1.0'), ('1.11', '1.1')],
'supported_hv_specs': [('1.6', '1.0')],
}
def obj_make_compatible(self, primitive, target_version):
super(ComputeNode, self).obj_make_compatible(primitive, target_version)
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 7) and 'host' in primitive:
del primitive['host']
if target_version < (1, 5) and 'numa_topology' in primitive:
del primitive['numa_topology']
if target_version < (1, 4) and 'host_ip' in primitive:
del primitive['host_ip']
if target_version < (1, 3) and 'stats' in primitive:
# pre 1.3 version does not have a stats field
del primitive['stats']
@staticmethod
def _host_from_db_object(compute, db_compute):
if (('host' not in db_compute or db_compute['host'] is None)
and 'service_id' in db_compute
and db_compute['service_id'] is not None):
# FIXME(sbauza) : Unconverted compute record, provide compatibility
# This has to stay until we can be sure that any/all compute nodes
# in the database have been converted to use the host field
# Service field of ComputeNode could be deprecated in a next patch,
# so let's use directly the Service object
try:
service = objects.Service.get_by_id(
compute._context, db_compute['service_id'])
except exception.ServiceNotFound:
compute['host'] = None
return
try:
compute['host'] = service.host
except (AttributeError, exception.OrphanedObjectError):
# Host can be nullable in Service
compute['host'] = None
elif 'host' in db_compute and db_compute['host'] is not None:
# New-style DB having host as a field
compute['host'] = db_compute['host']
else:
# We assume it should not happen but in case, let's set it to None
compute['host'] = None
@staticmethod
def _from_db_object(context, compute, db_compute):
special_cases = set([
'stats',
'supported_hv_specs',
'host',
'pci_device_pools',
])
fields = set(compute.fields) - special_cases
for key in fields:
compute[key] = db_compute[key]
stats = db_compute['stats']
if stats:
compute['stats'] = jsonutils.loads(stats)
sup_insts = db_compute.get('supported_instances')
if sup_insts:
hv_specs = jsonutils.loads(sup_insts)
hv_specs = [objects.HVSpec.from_list(hv_spec)
for hv_spec in hv_specs]
compute['supported_hv_specs'] = hv_specs
pci_stats = db_compute.get('pci_stats')
compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats)
compute._context = context
# Make sure that we correctly set the host field depending on either
# host column is present in the table or not
compute._host_from_db_object(compute, db_compute)
compute.obj_reset_changes()
return compute
@base.remotable_classmethod
def get_by_id(cls, context, compute_id):
db_compute = db.compute_node_get(context, compute_id)
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_by_service_id(cls, context, service_id):
db_computes = db.compute_nodes_get_by_service_id(context, service_id)
# NOTE(sbauza): Old version was returning an item, we need to keep this
# behaviour for backwards compatibility
db_compute = db_computes[0]
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_by_host_and_nodename(cls, context, host, nodename):
try:
db_compute = db.compute_node_get_by_host_and_nodename(
context, host, nodename)
except exception.ComputeHostNotFound:
# FIXME(sbauza): Some old computes can still have no host record
# We need to provide compatibility by using the old service_id
# record.
# We assume the compatibility as an extra penalty of one more DB
# call but that's necessary until all nodes are upgraded.
try:
service = objects.Service.get_by_compute_host(context, host)
db_computes = db.compute_nodes_get_by_service_id(
context, service.id)
except exception.ServiceNotFound:
# We need to provide the same exception upstream
raise exception.ComputeHostNotFound(host=host)
db_compute = None
for compute in db_computes:
if compute['hypervisor_hostname'] == nodename:
db_compute = compute
# We can avoid an extra call to Service object in
# _from_db_object
db_compute['host'] = service.host
break
if not db_compute:
raise exception.ComputeHostNotFound(host=host)
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_first_node_by_host_for_old_compat(cls, context, host,
use_slave=False):
computes = ComputeNodeList.get_all_by_host(context, host, use_slave)
# FIXME(sbauza): Some hypervisors (VMware, Ironic) can return multiple
# nodes per host, we should return all the nodes and modify the callers
# instead.
# Arbitrarily returning the first node.
return computes[0]
@staticmethod
def _convert_stats_to_db_format(updates):
stats = updates.pop('stats', None)
if stats is not None:
updates['stats'] = jsonutils.dumps(stats)
@staticmethod
def _convert_host_ip_to_db_format(updates):
host_ip = updates.pop('host_ip', None)
if host_ip:
updates['host_ip'] = str(host_ip)
@staticmethod
def _convert_supported_instances_to_db_format(updates):
hv_specs = updates.pop('supported_hv_specs', None)
if hv_specs is not None:
hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
updates['supported_instances'] = jsonutils.dumps(hv_specs)
@staticmethod
def _convert_pci_stats_to_db_format(updates):
pools = updates.pop('pci_device_pools', None)
if pools:
updates['pci_stats'] = jsonutils.dumps(pools.obj_to_primitive())
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
db_compute = db.compute_node_create(self._context, updates)
self._from_db_object(self._context, self, db_compute)
@base.remotable
def save(self, prune_stats=False):
# NOTE(belliott) ignore prune_stats param, no longer relevant
updates = self.obj_get_changes()
updates.pop('id', None)
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
db_compute = db.compute_node_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_compute)
@base.remotable
def destroy(self):
db.compute_node_delete(self._context, self.id)
@property
def service(self):
if not hasattr(self, '_cached_service'):
self._cached_service = objects.Service.get_by_id(self._context,
self.service_id)
return self._cached_service
class ComputeNodeList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# ComputeNode <= version 1.2
# Version 1.1 ComputeNode version 1.3
# Version 1.2 Add get_by_service()
# Version 1.3 ComputeNode version 1.4
# Version 1.4 ComputeNode version 1.5
# Version 1.5 Add use_slave to get_by_service
# Version 1.6 ComputeNode version 1.6
# Version 1.7 ComputeNode version 1.7
# Version 1.8 ComputeNode version 1.8 + add get_all_by_host()
# Version 1.9 ComputeNode version 1.9
# Version 1.10 ComputeNode version 1.10
# Version 1.11 ComputeNode version 1.11
VERSION = '1.11'
fields = {
'objects': fields.ListOfObjectsField('ComputeNode'),
}
child_versions = {
'1.0': '1.2',
# NOTE(danms): ComputeNode was at 1.2 before we added this
'1.1': '1.3',
'1.2': '1.3',
'1.3': '1.4',
'1.4': '1.5',
'1.5': '1.5',
'1.6': '1.6',
'1.7': '1.7',
'1.8': '1.8',
'1.9': '1.9',
'1.10': '1.10',
'1.11': '1.11',
}
@base.remotable_classmethod
def get_all(cls, context):
db_computes = db.compute_node_get_all(context)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@base.remotable_classmethod
def get_by_hypervisor(cls, context, hypervisor_match):
db_computes = db.compute_node_search_by_hypervisor(context,
hypervisor_match)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@base.remotable_classmethod
def _get_by_service(cls, context, service_id, use_slave=False):
try:
db_computes = db.compute_nodes_get_by_service_id(
context, service_id)
except exception.ServiceNotFound:
# NOTE(sbauza): Previous behaviour was returning an empty list
# if the service was created with no computes, we need to keep it.
db_computes = []
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@classmethod
def get_by_service(cls, context, service, use_slave=False):
return cls._get_by_service(context, service.id, use_slave=use_slave)
@base.remotable_classmethod
def get_all_by_host(cls, context, host, use_slave=False):
try:
db_computes = db.compute_node_get_all_by_host(context, host,
use_slave)
except exception.ComputeHostNotFound:
# FIXME(sbauza): Some old computes can still have no host record
# We need to provide compatibility by using the old service_id
# record.
# We assume the compatibility as an extra penalty of one more DB
# call but that's necessary until all nodes are upgraded.
try:
service = objects.Service.get_by_compute_host(context, host,
use_slave)
db_computes = db.compute_nodes_get_by_service_id(
context, service.id)
except exception.ServiceNotFound:
# We need to provide the same exception upstream
raise exception.ComputeHostNotFound(host=host)
# We can avoid an extra call to Service object in _from_db_object
for db_compute in db_computes:
db_compute['host'] = service.host
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
| apache-2.0 |
ZacariasBendeck/youtube-dl | youtube_dl/extractor/hentaistigma.py | 109 | 1201 | from __future__ import unicode_literals
from .common import InfoExtractor
class HentaiStigmaIE(InfoExtractor):
_VALID_URL = r'^https?://hentai\.animestigma\.com/(?P<id>[^/]+)'
_TEST = {
'url': 'http://hentai.animestigma.com/inyouchuu-etsu-bonus/',
'md5': '4e3d07422a68a4cc363d8f57c8bf0d23',
'info_dict': {
'id': 'inyouchuu-etsu-bonus',
'ext': 'mp4',
"title": "Inyouchuu Etsu Bonus",
"age_limit": 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h2[^>]+class="posttitle"[^>]*><a[^>]*>([^<]+)</a>',
webpage, 'title')
wrap_url = self._html_search_regex(
r'<iframe[^>]+src="([^"]+mp4)"', webpage, 'wrapper url')
wrap_webpage = self._download_webpage(wrap_url, video_id)
video_url = self._html_search_regex(
r'file\s*:\s*"([^"]+)"', wrap_webpage, 'video url')
return {
'id': video_id,
'url': video_url,
'title': title,
'age_limit': 18,
}
| unlicense |
SauloAislan/ironic | ironic/tests/unit/drivers/test_utils.py | 3 | 16883 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import os
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import swift
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import fake
from ironic.drivers import utils as driver_utils
from ironic.tests import base as tests_base
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
class UtilsTestCase(db_base.DbTestCase):
def setUp(self):
super(UtilsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager()
self.driver = driver_factory.get_driver("fake")
self.node = obj_utils.create_test_node(self.context)
def test_vendor_interface_get_properties(self):
expected = {'A1': 'A1 description. Required.',
'A2': 'A2 description. Optional.',
'B1': 'B1 description. Required.',
'B2': 'B2 description. Required.'}
props = self.driver.vendor.get_properties()
self.assertEqual(expected, props)
@mock.patch.object(fake.FakeVendorA, 'validate', autospec=True)
def test_vendor_interface_validate_valid_methods(self,
mock_fakea_validate):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.vendor.validate(task, method='first_method')
mock_fakea_validate.assert_called_once_with(
self.driver.vendor.mapping['first_method'],
task, method='first_method')
def test_vendor_interface_validate_bad_method(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.vendor.validate,
task, method='fake_method')
def test_get_node_mac_addresses(self):
ports = []
ports.append(
obj_utils.create_test_port(
self.context,
address='aa:bb:cc:dd:ee:ff',
uuid='bb43dc0b-03f2-4d2e-ae87-c02d7f33cc53',
node_id=self.node.id)
)
ports.append(
obj_utils.create_test_port(
self.context,
address='dd:ee:ff:aa:bb:cc',
uuid='4fc26c0b-03f2-4d2e-ae87-c02d7f33c234',
node_id=self.node.id)
)
with task_manager.acquire(self.context, self.node.uuid) as task:
node_macs = driver_utils.get_node_mac_addresses(task)
self.assertEqual(sorted([p.address for p in ports]), sorted(node_macs))
def test_get_node_capability(self):
properties = {'capabilities': 'cap1:value1, cap2: value2'}
self.node.properties = properties
expected = 'value1'
expected2 = 'value2'
result = driver_utils.get_node_capability(self.node, 'cap1')
result2 = driver_utils.get_node_capability(self.node, 'cap2')
self.assertEqual(expected, result)
self.assertEqual(expected2, result2)
def test_get_node_capability_returns_none(self):
properties = {'capabilities': 'cap1:value1,cap2:value2'}
self.node.properties = properties
result = driver_utils.get_node_capability(self.node, 'capX')
self.assertIsNone(result)
def test_add_node_capability(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = ''
driver_utils.add_node_capability(task, 'boot_mode', 'bios')
self.assertEqual('boot_mode:bios',
task.node.properties['capabilities'])
def test_add_node_capability_append(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = 'a:b,c:d'
driver_utils.add_node_capability(task, 'boot_mode', 'bios')
self.assertEqual('a:b,c:d,boot_mode:bios',
task.node.properties['capabilities'])
def test_add_node_capability_append_duplicate(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = 'a:b,c:d'
driver_utils.add_node_capability(task, 'a', 'b')
self.assertEqual('a:b,c:d,a:b',
task.node.properties['capabilities'])
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
def test_ensure_next_boot_device(self, node_set_boot_device_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_internal_info['persistent_boot_device'] = 'pxe'
driver_utils.ensure_next_boot_device(
task,
{'force_boot_device': True}
)
node_set_boot_device_mock.assert_called_once_with(task, 'pxe')
def test_ensure_next_boot_device_clears_is_next_boot_persistent(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_internal_info['persistent_boot_device'] = 'pxe'
task.node.driver_internal_info['is_next_boot_persistent'] = False
driver_utils.ensure_next_boot_device(
task,
{'force_boot_device': True}
)
task.node.refresh()
self.assertNotIn('is_next_boot_persistent',
task.node.driver_internal_info)
def test_force_persistent_boot_true(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ipmi_force_boot_device'] = True
ret = driver_utils.force_persistent_boot(task, 'pxe', True)
self.assertIsNone(ret)
task.node.refresh()
self.assertIn(('persistent_boot_device', 'pxe'),
task.node.driver_internal_info.items())
self.assertNotIn('is_next_boot_persistent',
task.node.driver_internal_info)
def test_force_persistent_boot_false(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret = driver_utils.force_persistent_boot(task, 'pxe', False)
self.assertIsNone(ret)
task.node.refresh()
self.assertEqual(
False,
task.node.driver_internal_info['is_next_boot_persistent'])
def test_capabilities_to_dict(self):
capabilities_more_than_one_item = 'a:b,c:d'
capabilities_exactly_one_item = 'e:f'
# Testing empty capabilities
self.assertEqual(
{},
driver_utils.capabilities_to_dict('')
)
self.assertEqual(
{'e': 'f'},
driver_utils.capabilities_to_dict(capabilities_exactly_one_item)
)
self.assertEqual(
{'a': 'b', 'c': 'd'},
driver_utils.capabilities_to_dict(capabilities_more_than_one_item)
)
def test_capabilities_to_dict_with_only_key_or_value_fail(self):
capabilities_only_key_or_value = 'xpto'
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
capabilities_only_key_or_value
)
self.assertEqual('Malformed capabilities value: xpto', str(exc))
def test_capabilities_to_dict_with_invalid_character_fail(self):
for test_capabilities in ('xpto:a,', ',xpto:a'):
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
test_capabilities
)
self.assertEqual('Malformed capabilities value: ', str(exc))
def test_capabilities_to_dict_with_incorrect_format_fail(self):
for test_capabilities in (':xpto,', 'xpto:,', ':,'):
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
test_capabilities
)
self.assertEqual('Malformed capabilities value: ', str(exc))
def test_capabilities_not_string(self):
capabilities_already_dict = {'a': 'b'}
capabilities_something_else = 42
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
capabilities_already_dict
)
self.assertEqual("Value of 'capabilities' must be string. Got " +
str(dict), str(exc))
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
capabilities_something_else
)
self.assertEqual("Value of 'capabilities' must be string. Got " +
str(int), str(exc))
def test_normalize_mac_string(self):
mac_raw = "0A:1B-2C-3D:4F"
mac_clean = driver_utils.normalize_mac(mac_raw)
self.assertEqual("0a1b2c3d4f", mac_clean)
def test_normalize_mac_unicode(self):
mac_raw = u"0A:1B-2C-3D:4F"
mac_clean = driver_utils.normalize_mac(mac_raw)
self.assertEqual("0a1b2c3d4f", mac_clean)
class UtilsRamdiskLogsTestCase(tests_base.TestCase):
def setUp(self):
super(UtilsRamdiskLogsTestCase, self).setUp()
self.node = obj_utils.get_test_node(self.context)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_ramdisk_logs_file_name(self, mock_utcnow):
mock_utcnow.return_value = datetime.datetime(2000, 1, 1, 0, 0)
name = driver_utils.get_ramdisk_logs_file_name(self.node)
expected_name = ('1be26c0b-03f2-4d2e-ae87-c02d7f33c123_'
'2000-01-01-00:00:00.tar.gz')
self.assertEqual(expected_name, name)
# with instance_info
instance_uuid = '7a5641ba-d264-424a-a9d7-e2a293ca482b'
node2 = obj_utils.get_test_node(
self.context, instance_uuid=instance_uuid)
name = driver_utils.get_ramdisk_logs_file_name(node2)
expected_name = ('1be26c0b-03f2-4d2e-ae87-c02d7f33c123_' +
instance_uuid + '_2000-01-01-00:00:00.tar.gz')
self.assertEqual(expected_name, name)
@mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)
@mock.patch.object(agent_client.AgentClient,
'collect_system_logs', autospec=True)
def test_collect_ramdisk_logs(self, mock_collect, mock_store):
logs = 'Gary the Snail'
mock_collect.return_value = {'command_result': {'system_logs': logs}}
driver_utils.collect_ramdisk_logs(self.node)
mock_store.assert_called_once_with(self.node, logs)
@mock.patch.object(driver_utils.LOG, 'error', autospec=True)
@mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)
@mock.patch.object(agent_client.AgentClient,
'collect_system_logs', autospec=True)
def test_collect_ramdisk_logs_IPA_command_fail(
self, mock_collect, mock_store, mock_log):
error_str = 'MR. KRABS! I WANNA GO TO BED!'
mock_collect.return_value = {'faultstring': error_str}
driver_utils.collect_ramdisk_logs(self.node)
# assert store was never invoked
self.assertFalse(mock_store.called)
mock_log.assert_called_once_with(
mock.ANY, {'node': self.node.uuid, 'error': error_str})
@mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)
@mock.patch.object(agent_client.AgentClient,
'collect_system_logs', autospec=True)
def test_collect_ramdisk_logs_storage_command_fail(
self, mock_collect, mock_store):
mock_collect.side_effect = exception.IronicException('boom')
self.assertIsNone(driver_utils.collect_ramdisk_logs(self.node))
self.assertFalse(mock_store.called)
@mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)
@mock.patch.object(agent_client.AgentClient,
'collect_system_logs', autospec=True)
def _collect_ramdisk_logs_storage_fail(
self, expected_exception, mock_collect, mock_store):
mock_store.side_effect = expected_exception
logs = 'Gary the Snail'
mock_collect.return_value = {'command_result': {'system_logs': logs}}
driver_utils.collect_ramdisk_logs(self.node)
mock_store.assert_called_once_with(self.node, logs)
@mock.patch.object(driver_utils.LOG, 'exception', autospec=True)
def test_collect_ramdisk_logs_storage_fail_fs(self, mock_log):
error = IOError('boom')
self._collect_ramdisk_logs_storage_fail(error)
mock_log.assert_called_once_with(
mock.ANY, {'node': self.node.uuid, 'error': error})
self.assertIn('file-system', mock_log.call_args[0][0])
@mock.patch.object(driver_utils.LOG, 'error', autospec=True)
def test_collect_ramdisk_logs_storage_fail_swift(self, mock_log):
error = exception.SwiftOperationError('boom')
self._collect_ramdisk_logs_storage_fail(error)
mock_log.assert_called_once_with(
mock.ANY, {'node': self.node.uuid, 'error': error})
self.assertIn('Swift', mock_log.call_args[0][0])
@mock.patch.object(driver_utils.LOG, 'exception', autospec=True)
def test_collect_ramdisk_logs_storage_fail_unkown(self, mock_log):
error = Exception('boom')
self._collect_ramdisk_logs_storage_fail(error)
mock_log.assert_called_once_with(
mock.ANY, {'node': self.node.uuid, 'error': error})
self.assertIn('Unknown error', mock_log.call_args[0][0])
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
@mock.patch.object(driver_utils,
'get_ramdisk_logs_file_name', autospec=True)
def test_store_ramdisk_logs_swift(self, mock_logs_name, mock_swift):
container_name = 'ironic_test_container'
file_name = 'ironic_test_file.tar.gz'
b64str = 'ZW5jb2RlZHN0cmluZw==\n'
cfg.CONF.set_override('deploy_logs_storage_backend', 'swift', 'agent')
cfg.CONF.set_override(
'deploy_logs_swift_container', container_name, 'agent')
cfg.CONF.set_override('deploy_logs_swift_days_to_expire', 1, 'agent')
mock_logs_name.return_value = file_name
driver_utils.store_ramdisk_logs(self.node, b64str)
mock_swift.return_value.create_object.assert_called_once_with(
container_name, file_name, mock.ANY,
object_headers={'X-Delete-After': '86400'})
mock_logs_name.assert_called_once_with(self.node)
@mock.patch.object(os, 'makedirs', autospec=True)
@mock.patch.object(driver_utils,
'get_ramdisk_logs_file_name', autospec=True)
def test_store_ramdisk_logs_local(self, mock_logs_name, mock_makedirs):
file_name = 'ironic_test_file.tar.gz'
b64str = 'ZW5jb2RlZHN0cmluZw==\n'
log_path = '/foo/bar'
cfg.CONF.set_override('deploy_logs_local_path', log_path, 'agent')
mock_logs_name.return_value = file_name
with mock.patch.object(driver_utils, 'open', new=mock.mock_open(),
create=True) as mock_open:
driver_utils.store_ramdisk_logs(self.node, b64str)
expected_path = os.path.join(log_path, file_name)
mock_open.assert_called_once_with(expected_path, 'wb')
mock_makedirs.assert_called_once_with(log_path)
mock_logs_name.assert_called_once_with(self.node)
| apache-2.0 |
Arcanemagus/plexpy | lib/dns/rdtypes/ANY/NSEC.py | 18 | 4690 | # Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.rdatatype
import dns.name
from dns._compat import xrange
class NSEC(dns.rdata.Rdata):
"""NSEC record
@ivar next: the next name
@type next: dns.name.Name object
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['next', 'windows']
def __init__(self, rdclass, rdtype, next, windows):
super(NSEC, self).__init__(rdclass, rdtype)
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = bitmap[i]
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(window * 256 +
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%s%s' % (next, text)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("NSEC with bit 0")
if nrdtype > 65535:
raise dns.exception.SyntaxError("NSEC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = bytearray(b'\0' * 32)
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, bitmap[0:octets]))
bitmap = bytearray(b'\0' * 32)
window = new_window
offset = nrdtype % 256
byte = offset // 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = bitmap[byte] | (0x80 >> bit)
windows.append((window, bitmap[0:octets]))
return cls(rdclass, rdtype, next, windows)
def to_wire(self, file, compress=None, origin=None):
self.next.to_wire(file, None, origin)
for (window, bitmap) in self.windows:
file.write(struct.pack('!BB', window, len(bitmap)))
file.write(bitmap)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
(next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns.exception.FormError("NSEC too short")
window = wire[current]
octets = wire[current + 1]
if octets == 0 or octets > 32:
raise dns.exception.FormError("bad NSEC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns.exception.FormError("bad NSEC bitmap length")
bitmap = bytearray(wire[current: current + octets].unwrap())
current += octets
rdlen -= octets
windows.append((window, bitmap))
if origin is not None:
next = next.relativize(origin)
return cls(rdclass, rdtype, next, windows)
def choose_relativity(self, origin=None, relativize=True):
self.next = self.next.choose_relativity(origin, relativize)
| gpl-3.0 |
PyGotham/pygotham | pygotham/frontend/profile.py | 2 | 2262 | """PyGotham user profiles."""
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from flask_login import current_user
from flask_security import login_required
from pygotham.core import db
from pygotham.frontend import route
from pygotham.models import Talk, Volunteer
__all__ = ('blueprint',)
blueprint = Blueprint(
'profile',
__name__,
subdomain='<event_slug>',
url_prefix='/profile',
)
@route(blueprint, '/dashboard/')
@login_required
def dashboard():
"""Return the user's dashboard."""
# TODO: Optionally, old proposals should be shown in a read-only mode.
talks = Talk.query.current.filter(Talk.user == current_user)
return render_template(
'profile/dashboard.html', talks=talks)
@route(blueprint, '/settings/', methods=('GET', 'POST'))
@login_required
def settings():
"""Return the user's settings."""
# TODO: How should this be handled? Should a speaker's bio be stored
# as a snapshot from event to event? It could be stored as part of a
# talks.models.Presentation.
from pygotham.forms import ProfileForm
form = ProfileForm(request.form, obj=current_user)
if form.validate_on_submit():
form.populate_obj(current_user)
db.session.commit()
flash('Your profile has been updated.', 'success')
return redirect(url_for('profile.settings'))
return render_template('profile/settings.html', form=form)
@route(blueprint, '/unvolunteer/')
@login_required
def unvolunteer():
"""Remove a user from being a volunteer."""
if current_user.is_volunteer:
volunteer = Volunteer.query.current.filter(
Volunteer.user == current_user).first()
db.session.delete(volunteer)
db.session.commit()
flash("We're sorry to see you change your mind!")
return redirect(url_for('profile.dashboard'))
@route(blueprint, '/volunteer/')
@login_required
def volunteer():
"""Sign up a user as a volunteer."""
if not current_user.is_volunteer:
volunteer = Volunteer(user=current_user, event=g.current_event)
db.session.add(volunteer)
db.session.commit()
flash('Thanks for volunteering!')
return redirect(url_for('profile.dashboard'))
| bsd-3-clause |
wcalvert/LPC11U_LPC13U_CodeBase | src/drivers/sensors/testscripts/plot_xyz_plus_mag_sma.py | 2 | 3774 | #-------------------------------------------------------------------------------
# Name: plot_sensors_event.py
# Purpose: Plots logged sensors_event_t data from logger.c CSV files
#
# Author: K. Townsend
#
# Created: 09/06/2013
# Copyright: (c) K. Townsend 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import math
import numpy as np
import matplotlib.pyplot as plt
import Tkinter, tkFileDialog
from collections import deque
# This program will plot X/Y/Z data logged via drivers/storage/logger.c, and
# assumes we are getting vector data in CSV format generated using the
# 'sensorsLogSensorsEvent' helper function in drivers/sensors/sensors.c
#
# Data should look similar to the this:
#
# 0,1,5714,6.001670,-6.629296,-4.785645,0.000000
# 0,1,5729,6.001670,-6.629296,-4.785645,0.000000
# 0,1,5734,5.883990,-6.590069,-4.746419,0.000000
#
# In addition to the raw X/Y/Z data, vector magnitude is also calculated in
# a fourth data column
class RingBuffer(deque):
def __init__(self, size_max):
deque.__init__(self)
self.size_max = size_max
def append(self, datum):
deque.append(self, datum)
if len(self) > self.size_max:
self.popleft( )
def tolist(self):
return list(self)
def main():
# Variables for our moving average filter
current = 0
avg = 0
total = 0
mavals = []
# Get window size (how many 'samples' are averaged together)
windowsize = int(input("Windows size (0..65535): "))
if (windowsize > 65535):
print ('Setting window size to 65535')
windowsize = 65535
if (windowsize < 1):
print ('Setting window size to 1')
windowsize = 1
# Request the data file to process
root = Tkinter.Tk()
root.withdraw()
filename = tkFileDialog.askopenfilename()
# Load the CSV file in 'data'
data = np.genfromtxt(filename,
delimiter=',',
dtype="i32,i32,i32,f32,f32,f32,f32",
names=['id','type','timestamp','x','y','z','a'])
# Create a circular buffer for our moving average filter
window = RingBuffer(size_max=windowsize)
# Calculate magnitude in column a
for x in np.nditer(data, op_flags=['readwrite']):
x['a'] = math.sqrt(
math.pow(x['x'], 2) +
math.pow(x['y'], 2) +
math.pow(x['z'], 2))
# Perform the moving average filter operations
current+=1
# Add magnitude into the ringbuffer
window.append(x['a'])
# Make sure we've reached 'windowlength' samples in the buffer
if (current <= windowsize):
mavals.append(0)
else:
# Get the current average based on the window content
li = window.tolist()
total = 0
for i in li:
total += i
avg = (float)(total/windowsize)
# Append ma output for plotting below
mavals.append(avg);
# Display the results
plt.title("SMA Filtered sensors_event_t Data (X/Y/Z + Magnitude)\nSMA Window Size = %d Samples"
% (windowsize))
plt.xlabel('Timestamp (ms)')
plt.ylabel('Value')
plt.xlim(data['timestamp'].min(), data['timestamp'].max()*1.1)
plt.grid(True)
plt.plot(data['timestamp'], data['x'], color='r', alpha = 0.25, label='x')
plt.plot(data['timestamp'], data['y'], color='g', alpha = 0.25, label='y')
plt.plot(data['timestamp'], data['z'], color='b', alpha = 0.25, label='z')
plt.plot(data['timestamp'], data['a'], color='m', alpha = 0.25, label='mag')
plt.plot(data['timestamp'], mavals, color="black", label="mag filtered")
plt.legend()
plt.show()
pass
if __name__ == '__main__':
main()
| bsd-3-clause |
Openergy/oplus | setup.py | 1 | 1042 | from setuptools import setup, find_packages
from pkg_resources import parse_requirements
import os
with open(os.path.join("oplus", "version.py")) as f:
version = f.read().split("=")[1].strip().strip("'").strip('"')
with open("requirements.txt", "r") as f:
requirements = [str(r) for r in parse_requirements(f.read())]
setup(
name='oplus',
version=version,
packages=find_packages(),
author="Openergy development team",
author_email="[email protected]",
long_description=open('README.md').read(),
install_requires=requirements,
url='https://github.com/openergy/oplus',
classifiers=[
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Natural Language :: French",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Physics",
],
package_data={'oplus': ['*.txt']},
include_package_data=True
)
| mpl-2.0 |
lude-ma/python-ivi | ivi/agilent/agilentMSO6052A.py | 7 | 1687 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent6000 import *
class agilentMSO6052A(agilent6000):
"Agilent InfiniiVision MSO6052A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO6052A')
super(agilentMSO6052A, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 500e6
self._init_channels()
| mit |
Alluxio/alluxio | integration/vagrant/bin/spot_request.py | 6 | 7965 | #!/usr/bin/env python
#
# The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
# (the "License"). You may not use this work except in compliance with the License, which is
# available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
#
# -*- coding: utf-8 -*-
"""
Submit or Cancel spot instance requests.
When submit, the process will block until the request is fulfilled
or the process is killed by user(like CTRL + C),
if the process is killed, the requests will be automatically canceled.
"""
import os
import time
import pickle
import argparse
import subprocess
import yaml
import boto.ec2.blockdevicemapping as bdm
from util import mkdir_p, info, warn, error
from init_aws import get_conn, get_ec2_conf
def get_bdm(ec2_conf):
def device(d):
dev = bdm.BlockDeviceType()
if d['VirtualName'].startswith('ephemeral'):
# Instance Storage
dev.ephemeral_name = d['VirtualName']
else:
# EBS
dev.size = d['Ebs.VolumeSize']
delete = d.get('Ebs.DeleteOnTermination', None)
if delete is not None:
dev.delete_on_termination = delete
return (d['DeviceName'], dev)
devices = map(device, ec2_conf['Block_Device_Mapping'])
device_mapping = bdm.BlockDeviceMapping()
for name, dev in devices:
device_mapping[name] = dev
return device_mapping
def get_init_conf():
return yaml.load(open('conf/init.yml'))
class RequestFailedError(Exception): pass
def all_fulfilled(requests):
fulfilled = True
for r in requests:
if r.status.code != 'fulfilled':
fulfilled = False
if r.state == 'failed':
raise RequestFailedError(r.status.message)
if not fulfilled:
break
return fulfilled
def wait_until_fulfilled(request_ids, conn):
while True:
requests = conn.get_all_spot_instance_requests(request_ids)
if not all_fulfilled(requests):
time.sleep(1)
else:
return requests
def add_tag(host):
return '{}-{}'.format(get_ec2_conf()['Tag'], host)
def get_host(tag):
return tag.split('-')[-1]
# request_id -> tag
def request_id_to_tag(requests, masters):
ret = {}
for i, rid in enumerate([r.id for r in requests]):
# TODO(cc): This naming convention for host may need changes
if i == 0:
host = 'AlluxioMaster'
elif i < masters:
host = 'AlluxioMaster{}'.format(i + 1)
else:
host = 'AlluxioWorker{}'.format(i - masters + 1)
ret[rid] = add_tag(host)
return ret
def save_request_ids(request_ids):
out = open('.request_ids', 'w')
pickle.dump(request_ids, out)
out.close()
def load_request_ids():
return pickle.load(open('.request_ids'))
def submit_request(conn, ec2_conf, masters):
# enable ssh as root without tty
user_data = "#!/bin/bash\n \
echo 'Defaults:root !requiretty' > /etc/sudoers.d/998-vagrant-cloud-init-requiretty && \
echo 'Defaults:ec2-user !requiretty' > /etc/sudoers.d/999-vagrant-cloud-init-requiretty && \
chmod 440 /etc/sudoers.d/998-vagrant-cloud-init-requiretty && chmod 440 /etc/sudoers.d/999-vagrant-cloud-init-requiretty"
requests = conn.request_spot_instances(
price = ec2_conf['Spot_Price'],
image_id = ec2_conf['AMI'],
count = get_init_conf()['MachineNumber'],
availability_zone_group = ec2_conf['Availability_Zone'],
placement = ec2_conf['Availability_Zone'], # where to put instance
key_name = ec2_conf['Keypair'],
security_groups = [ec2_conf['Security_Group']],
user_data = user_data,
instance_type = ec2_conf['Instance_Type'],
block_device_map = get_bdm(ec2_conf))
request_ids = [r.id for r in requests]
save_request_ids(request_ids)
# sleep before waiting for spot instances to be fulfilled.
time.sleep(5)
# block, waiting for all requests to be fulfilled
requests = wait_until_fulfilled(request_ids, conn)
# tag the requests and instances
rid_tag = request_id_to_tag(requests, masters)
for r in requests:
tag = rid_tag[r.id]
r.add_tag('Name', tag)
conn.create_tags([r.instance_id], {'Name': tag})
return rid_tag, requests
def cancel_request(conn):
warn('canceling spot instance requests and terminating instances...')
requests = conn.get_all_spot_instance_requests(load_request_ids())
for r in requests:
r.cancel()
instance_ids = [r.instance_id for r in requests if r.instance_id is not None]
if len(instance_ids) > 0:
conn.terminate_instances(instance_ids)
# mock the inventory file and machine id files that should have
# been generated by vagrant, so that we can keep the vagrant work flow.
def mock_vagrant_info(instance_id_to_tag_ip):
inventory_dir = '.vagrant/provisioners/ansible/inventory'
mkdir_p(inventory_dir)
inventory = open(os.path.join(inventory_dir, 'vagrant_ansible_inventory'), 'w')
for instance_id, tag_ip in instance_id_to_tag_ip.iteritems():
tag, ip = tag_ip
host = get_host(tag)
inventory.write("{} ansible_ssh_host={} ansible_ssh_port=22\n".format(host, ip))
id_dir = os.path.join('.vagrant', 'machines', host, 'aws')
mkdir_p(id_dir)
with open(os.path.join(id_dir, 'id'), 'w') as f:
f.write(instance_id)
inventory.close()
def is_ssh_ready(host):
s = subprocess.Popen(['ssh',
'-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'ConnectTimeout=30',
'-i', os.path.expanduser(get_ec2_conf()['Key_Path']),
'%s@%s' % ('ec2-user', host),
'true'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
s.communicate()
return s.returncode == 0
def wait_for_ssh(hosts):
while len(hosts):
hosts = [h for h in hosts if not is_ssh_ready(h)]
def parse():
parser = argparse.ArgumentParser()
grp = parser.add_mutually_exclusive_group(required=True)
grp.add_argument('-s', '--submit', action='store_true')
grp.add_argument('-c', '--cancel', action='store_true')
parser.add_argument('--masters', type=int, default=1, help='number of Alluxio masters')
return parser.parse_args()
def main(args):
ec2_conf = get_ec2_conf()
conn = get_conn()
if args.submit:
info('waiting for spot instance requests to be fulfilled, you can cancel by ctrl+c ...')
try:
rid_tag, requests = submit_request(conn, ec2_conf, args.masters)
except (KeyboardInterrupt, RequestFailedError) as e:
error(e)
exit(1)
info('spot instance requests fulfilled')
instance_id_to_tag_ip = {}
info('getting instance IPs...')
for r in requests:
instance_id = r.instance_id
info('waiting for ip to be allocated to the machine')
ip = conn.get_only_instances([instance_id])[0].ip_address
while ip is None:
time.sleep(1)
ip = conn.get_only_instances([instance_id])[0].ip_address
instance_id_to_tag_ip[instance_id] = (rid_tag[r.id], ip)
info('mocking vagrant info under .vagrant...')
mock_vagrant_info(instance_id_to_tag_ip)
info('creation of spot instances done')
info('waiting for ssh to be available...')
wait_for_ssh([ip for tag, ip in instance_id_to_tag_ip.values()])
info('ssh for all instances are ready')
elif args.cancel:
cancel_request(conn)
if __name__ == '__main__':
main(parse())
| apache-2.0 |
brandon-rhodes/numpy | numpy/polynomial/tests/test_printing.py | 208 | 2004 | from __future__ import division, absolute_import, print_function
import numpy.polynomial as poly
from numpy.testing import TestCase, run_module_suite, assert_
class test_str(TestCase):
def test_polynomial_str(self):
res = str(poly.Polynomial([0, 1]))
tgt = 'poly([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = str(poly.Chebyshev([0, 1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_legendre_str(self):
res = str(poly.Legendre([0, 1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_hermite_str(self):
res = str(poly.Hermite([0, 1]))
tgt = 'herm([0., 1.])'
assert_(res, tgt)
def test_hermiteE_str(self):
res = str(poly.HermiteE([0, 1]))
tgt = 'herme([0., 1.])'
assert_(res, tgt)
def test_laguerre_str(self):
res = str(poly.Laguerre([0, 1]))
tgt = 'lag([0., 1.])'
assert_(res, tgt)
class test_repr(TestCase):
def test_polynomial_str(self):
res = repr(poly.Polynomial([0, 1]))
tgt = 'Polynomial([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = repr(poly.Chebyshev([0, 1]))
tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0, 1]))
tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0, 1]))
tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0, 1]))
tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0, 1]))
tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])'
assert_(res, tgt)
#
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
CauldronDevelopmentLLC/buildbot | buildbot/test/test_svnpoller.py | 2 | 16276 | # -*- test-case-name: buildbot.test.test_svnpoller -*-
import time
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.changes.svnpoller import SVNPoller
# this is the output of "svn info --xml
# svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
prefix_output = """\
<?xml version="1.0"?>
<info>
<entry
kind="dir"
path="trunk"
revision="18354">
<url>svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk</url>
<repository>
<root>svn+ssh://svn.twistedmatrix.com/svn/Twisted</root>
<uuid>bbbe8e31-12d6-0310-92fd-ac37d47ddeeb</uuid>
</repository>
<commit
revision="18352">
<author>jml</author>
<date>2006-10-01T02:37:34.063255Z</date>
</commit>
</entry>
</info>
"""
# and this is "svn info --xml svn://svn.twistedmatrix.com/svn/Twisted". I
# think this is kind of a degenerate case.. it might even be a form of error.
prefix_output_2 = """\
<?xml version="1.0"?>
<info>
</info>
"""
# this is the svn info output for a local repository, svn info --xml
# file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository
prefix_output_3 = """\
<?xml version="1.0"?>
<info>
<entry
kind="dir"
path="SVN-Repository"
revision="3">
<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</url>
<repository>
<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
</repository>
<commit
revision="3">
<author>warner</author>
<date>2006-10-01T07:37:04.182499Z</date>
</commit>
</entry>
</info>
"""
# % svn info --xml file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk
prefix_output_4 = """\
<?xml version="1.0"?>
<info>
<entry
kind="dir"
path="trunk"
revision="3">
<url>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk</url>
<repository>
<root>file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository</root>
<uuid>c0f47ff4-ba1e-0410-96b5-d44cc5c79e7f</uuid>
</repository>
<commit
revision="1">
<author>warner</author>
<date>2006-10-01T07:37:02.286440Z</date>
</commit>
</entry>
</info>
"""
class ComputePrefix(unittest.TestCase):
def test1(self):
base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
s = SVNPoller(base + "/")
self.failUnlessEqual(s.svnurl, base) # certify slash-stripping
prefix = s.determine_prefix(prefix_output)
self.failUnlessEqual(prefix, "trunk")
self.failUnlessEqual(s._prefix, prefix)
def test2(self):
base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted"
s = SVNPoller(base)
self.failUnlessEqual(s.svnurl, base)
prefix = s.determine_prefix(prefix_output_2)
self.failUnlessEqual(prefix, "")
def test3(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository"
s = SVNPoller(base)
self.failUnlessEqual(s.svnurl, base)
prefix = s.determine_prefix(prefix_output_3)
self.failUnlessEqual(prefix, "")
def test4(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample/trunk"
s = SVNPoller(base)
self.failUnlessEqual(s.svnurl, base)
prefix = s.determine_prefix(prefix_output_4)
self.failUnlessEqual(prefix, "sample/trunk")
# output from svn log on .../SVN-Repository/sample
# (so it includes trunk and branches)
sample_base = "file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample"
sample_logentries = [None] * 6
sample_logentries[5] = """\
<logentry
revision="6">
<author>warner</author>
<date>2006-10-01T19:35:16.165664Z</date>
<paths>
<path
action="D">/sample/branch/version.c</path>
</paths>
<msg>revised_to_2</msg>
</logentry>
"""
sample_logentries[4] = """\
<logentry
revision="5">
<author>warner</author>
<date>2006-10-01T19:35:16.165664Z</date>
<paths>
<path
action="D">/sample/branch</path>
</paths>
<msg>revised_to_2</msg>
</logentry>
"""
sample_logentries[3] = """\
<logentry
revision="4">
<author>warner</author>
<date>2006-10-01T19:35:16.165664Z</date>
<paths>
<path
action="M">/sample/trunk/version.c</path>
</paths>
<msg>revised_to_2</msg>
</logentry>
"""
sample_logentries[2] = """\
<logentry
revision="3">
<author>warner</author>
<date>2006-10-01T19:35:10.215692Z</date>
<paths>
<path
action="M">/sample/branch/main.c</path>
</paths>
<msg>commit_on_branch</msg>
</logentry>
"""
sample_logentries[1] = """\
<logentry
revision="2">
<author>warner</author>
<date>2006-10-01T19:35:09.154973Z</date>
<paths>
<path
copyfrom-path="/sample/trunk"
copyfrom-rev="1"
action="A">/sample/branch</path>
</paths>
<msg>make_branch</msg>
</logentry>
"""
sample_logentries[0] = """\
<logentry
revision="1">
<author>warner</author>
<date>2006-10-01T19:35:08.642045Z</date>
<paths>
<path
action="A">/sample</path>
<path
action="A">/sample/trunk</path>
<path
action="A">/sample/trunk/subdir/subdir.c</path>
<path
action="A">/sample/trunk/main.c</path>
<path
action="A">/sample/trunk/version.c</path>
<path
action="A">/sample/trunk/subdir</path>
</paths>
<msg>sample_project_files</msg>
</logentry>
"""
sample_info_output = """\
<?xml version="1.0"?>
<info>
<entry
kind="dir"
path="sample"
revision="4">
<url>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository/sample</url>
<repository>
<root>file:///usr/home/warner/stuff/Projects/BuildBot/trees/misc/_trial_temp/test_vc/repositories/SVN-Repository</root>
<uuid>4f94adfc-c41e-0410-92d5-fbf86b7c7689</uuid>
</repository>
<commit
revision="4">
<author>warner</author>
<date>2006-10-01T19:35:16.165664Z</date>
</commit>
</entry>
</info>
"""
changes_output_template = """\
<?xml version="1.0"?>
<log>
%s</log>
"""
def make_changes_output(maxrevision):
# return what 'svn log' would have just after the given revision was
# committed
logs = sample_logentries[0:maxrevision]
assert len(logs) == maxrevision
logs.reverse()
output = changes_output_template % ("".join(logs))
return output
def split_file(path):
pieces = path.split("/")
if pieces[0] == "branch":
return "branch", "/".join(pieces[1:])
if pieces[0] == "trunk":
return None, "/".join(pieces[1:])
raise RuntimeError("there shouldn't be any files like %s" % path)
class MySVNPoller(SVNPoller):
def __init__(self, *args, **kwargs):
SVNPoller.__init__(self, *args, **kwargs)
self.pending_commands = []
self.finished_changes = []
def getProcessOutput(self, args):
d = defer.Deferred()
self.pending_commands.append((args, d))
return d
def submit_changes(self, changes):
self.finished_changes.extend(changes)
class ComputeChanges(unittest.TestCase):
def test1(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
s = SVNPoller(base)
s._prefix = "sample"
output = make_changes_output(4)
doc = s.parse_logs(output)
newlast, logentries = s._filter_new_logentries(doc, 4)
self.failUnlessEqual(newlast, 4)
self.failUnlessEqual(len(logentries), 0)
newlast, logentries = s._filter_new_logentries(doc, 3)
self.failUnlessEqual(newlast, 4)
self.failUnlessEqual(len(logentries), 1)
newlast, logentries = s._filter_new_logentries(doc, 1)
self.failUnlessEqual(newlast, 4)
self.failUnlessEqual(len(logentries), 3)
newlast, logentries = s._filter_new_logentries(doc, None)
self.failUnlessEqual(newlast, 4)
self.failUnlessEqual(len(logentries), 0)
def testChanges(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
s = SVNPoller(base, split_file=split_file)
s._prefix = "sample"
doc = s.parse_logs(make_changes_output(3))
newlast, logentries = s._filter_new_logentries(doc, 1)
# so we see revisions 2 and 3 as being new
self.failUnlessEqual(newlast, 3)
changes = s.create_changes(logentries)
self.failUnlessEqual(len(changes), 2)
self.failUnlessEqual(changes[0].branch, "branch")
self.failUnlessEqual(changes[0].revision, '2')
self.failUnlessEqual(changes[1].branch, "branch")
self.failUnlessEqual(changes[1].files, ["main.c"])
self.failUnlessEqual(changes[1].revision, '3')
# and now pull in r4
doc = s.parse_logs(make_changes_output(4))
newlast, logentries = s._filter_new_logentries(doc, newlast)
self.failUnlessEqual(newlast, 4)
# so we see revision 4 as being new
changes = s.create_changes(logentries)
self.failUnlessEqual(len(changes), 1)
self.failUnlessEqual(changes[0].branch, None)
self.failUnlessEqual(changes[0].revision, '4')
self.failUnlessEqual(changes[0].files, ["version.c"])
# and now pull in r5 (should *not* create a change as it's a
# branch deletion
doc = s.parse_logs(make_changes_output(5))
newlast, logentries = s._filter_new_logentries(doc, newlast)
self.failUnlessEqual(newlast, 5)
# so we see revision 5 as being new
changes = s.create_changes(logentries)
self.failUnlessEqual(len(changes), 0)
# and now pull in r6 (should create a change as it's not
# deleting an entire branch
doc = s.parse_logs(make_changes_output(6))
newlast, logentries = s._filter_new_logentries(doc, newlast)
self.failUnlessEqual(newlast, 6)
# so we see revision 6 as being new
changes = s.create_changes(logentries)
self.failUnlessEqual(len(changes), 1)
self.failUnlessEqual(changes[0].branch, 'branch')
self.failUnlessEqual(changes[0].revision, '6')
self.failUnlessEqual(changes[0].files, ["version.c"])
def testFirstTime(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
s = SVNPoller(base, split_file=split_file)
s._prefix = "sample"
doc = s.parse_logs(make_changes_output(4))
logentries = s.get_new_logentries(doc)
# SVNPoller ignores all changes that happened before it was started
self.failUnlessEqual(len(logentries), 0)
self.failUnlessEqual(s.last_change, 4)
class Misc(unittest.TestCase):
def testAlreadyWorking(self):
base = "file:///home/warner/stuff/Projects/BuildBot/trees/svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample"
s = MySVNPoller(base)
d = s.checksvn()
# the SVNPoller is now waiting for its getProcessOutput to finish
self.failUnlessEqual(s.overrun_counter, 0)
d2 = s.checksvn()
self.failUnlessEqual(s.overrun_counter, 1)
self.failUnlessEqual(len(s.pending_commands), 1)
def testGetRoot(self):
base = "svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk"
s = MySVNPoller(base)
d = s.checksvn()
# the SVNPoller is now waiting for its getProcessOutput to finish
self.failUnlessEqual(len(s.pending_commands), 1)
self.failUnlessEqual(s.pending_commands[0][0],
["info", "--xml", "--non-interactive", base])
def makeTime(timestring):
datefmt = '%Y/%m/%d %H:%M:%S'
when = time.mktime(time.strptime(timestring, datefmt))
return when
class Everything(unittest.TestCase):
def test1(self):
s = MySVNPoller(sample_base, split_file=split_file)
d = s.checksvn()
# the SVNPoller is now waiting for its getProcessOutput to finish
self.failUnlessEqual(len(s.pending_commands), 1)
self.failUnlessEqual(s.pending_commands[0][0],
["info", "--xml", "--non-interactive",
sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(sample_info_output)
# now it should be waiting for the 'svn log' command
self.failUnlessEqual(len(s.pending_commands), 1)
self.failUnlessEqual(s.pending_commands[0][0],
["log", "--xml", "--verbose", "--non-interactive",
"--limit=100", sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(make_changes_output(1))
# the command ignores the first batch of changes
self.failUnlessEqual(len(s.finished_changes), 0)
self.failUnlessEqual(s.last_change, 1)
# now fire it again, nothing changing
d = s.checksvn()
self.failUnlessEqual(s.pending_commands[0][0],
["log", "--xml", "--verbose", "--non-interactive",
"--limit=100", sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(make_changes_output(1))
# nothing has changed
self.failUnlessEqual(len(s.finished_changes), 0)
self.failUnlessEqual(s.last_change, 1)
# and again, with r2 this time
d = s.checksvn()
self.failUnlessEqual(s.pending_commands[0][0],
["log", "--xml", "--verbose", "--non-interactive",
"--limit=100", sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(make_changes_output(2))
# r2 should appear
self.failUnlessEqual(len(s.finished_changes), 1)
self.failUnlessEqual(s.last_change, 2)
c = s.finished_changes[0]
self.failUnlessEqual(c.branch, "branch")
self.failUnlessEqual(c.revision, '2')
self.failUnlessEqual(c.files, [''])
# TODO: this is what creating the branch looks like: a Change with a
# zero-length file. We should decide if we want filenames like this
# in the Change (and make sure nobody else gets confused by it) or if
# we want to strip them out.
self.failUnlessEqual(c.comments, "make_branch")
# and again at r2, so nothing should change
d = s.checksvn()
self.failUnlessEqual(s.pending_commands[0][0],
["log", "--xml", "--verbose", "--non-interactive",
"--limit=100", sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(make_changes_output(2))
# nothing has changed
self.failUnlessEqual(len(s.finished_changes), 1)
self.failUnlessEqual(s.last_change, 2)
# and again with both r3 and r4 appearing together
d = s.checksvn()
self.failUnlessEqual(s.pending_commands[0][0],
["log", "--xml", "--verbose", "--non-interactive",
"--limit=100", sample_base])
d = s.pending_commands[0][1]
s.pending_commands.pop(0)
d.callback(make_changes_output(4))
self.failUnlessEqual(len(s.finished_changes), 3)
self.failUnlessEqual(s.last_change, 4)
c3 = s.finished_changes[1]
self.failUnlessEqual(c3.branch, "branch")
self.failUnlessEqual(c3.revision, '3')
self.failUnlessEqual(c3.files, ["main.c"])
self.failUnlessEqual(c3.comments, "commit_on_branch")
c4 = s.finished_changes[2]
self.failUnlessEqual(c4.branch, None)
self.failUnlessEqual(c4.revision, '4')
self.failUnlessEqual(c4.files, ["version.c"])
self.failUnlessEqual(c4.comments, "revised_to_2")
self.failUnless(abs(c4.when - time.time()) < 60)
# TODO:
# get coverage of split_file returning None
# point at a live SVN server for a little while
| gpl-2.0 |
greg-hellings/ansible-modules-extras | monitoring/sensu_check.py | 42 | 11565 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: sensu_check
short_description: Manage Sensu checks
version_added: 2.0
description:
- Manage the checks that should be run on a machine by I(Sensu).
- Most options do not have a default and will not be added to the check definition unless specified.
- All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
- they are simply specified for your convenience.
options:
name:
description:
- The name of the check
- This is the key that is used to determine whether a check exists
required: true
state:
description: Whether the check should be present or not
choices: [ 'present', 'absent' ]
required: false
default: present
path:
description:
- Path to the json file of the check to be added/removed.
- Will be created if it does not exist (unless I(state=absent)).
- The parent folders need to exist when I(state=present), otherwise an error will be thrown
required: false
default: /etc/sensu/conf.d/checks.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so
- you can get the original file back if you somehow clobbered it incorrectly.
choices: [ 'yes', 'no' ]
required: false
default: no
command:
description:
- Path to the sensu check to run (not required when I(state=absent))
required: true
handlers:
description:
- List of handlers to notify when the check fails
required: false
default: []
subscribers:
description:
- List of subscribers/channels this check should run for
- See sensu_subscribers to subscribe a machine to a channel
required: false
default: []
interval:
description:
- Check interval in seconds
required: false
default: null
timeout:
description:
- Timeout for the check
required: false
default: 10
handle:
description:
- Whether the check should be handled or not
choices: [ 'yes', 'no' ]
required: false
default: yes
subdue_begin:
description:
- When to disable handling of check failures
required: false
default: null
subdue_end:
description:
- When to enable handling of check failures
required: false
default: null
dependencies:
description:
- Other checks this check depends on, if dependencies fail,
- handling of this check will be disabled
required: false
default: []
metric:
description: Whether the check is a metric
choices: [ 'yes', 'no' ]
required: false
default: no
standalone:
description:
- Whether the check should be scheduled by the sensu client or server
- This option obviates the need for specifying the I(subscribers) option
choices: [ 'yes', 'no' ]
required: false
default: no
publish:
description:
- Whether the check should be scheduled at all.
- You can still issue it via the sensu api
choices: [ 'yes', 'no' ]
required: false
default: yes
occurrences:
description:
- Number of event occurrences before the handler should take action
required: false
default: 1
refresh:
description:
- Number of seconds handlers should wait before taking second action
required: false
default: null
aggregate:
description:
- Classifies the check as an aggregate check,
- making it available via the aggregate API
choices: [ 'yes', 'no' ]
required: false
default: no
low_flap_threshold:
description:
- The low threshhold for flap detection
required: false
default: null
high_flap_threshold:
description:
- The low threshhold for flap detection
required: false
default: null
requirements: [ ]
author: Anders Ingemann
'''
EXAMPLES = '''
# Fetch metrics about the CPU load every 60 seconds,
# the sensu server has a handler called 'relay' which forwards stats to graphite
- name: get cpu metrics
sensu_check: name=cpu_load
command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb
metric=yes handlers=relay subscribers=common interval=60
# Check whether nginx is running
- name: check nginx process
sensu_check: name=nginx_running
command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid'
handlers=default subscribers=nginx interval=60
# Stop monitoring the disk capacity.
# Note that the check will still show up in the sensu dashboard,
# to remove it completely you need to issue a DELETE request to the sensu api.
- name: check disk
sensu_check: name=check_disk_capacity
'''
def sensu_check(module, path, name, state='present', backup=False):
changed = False
reasons = []
try:
import json
except ImportError:
import simplejson as json
try:
try:
stream = open(path, 'r')
config = json.load(stream.read())
except IOError, e:
if e.errno is 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=str(e))
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
finally:
if stream:
stream.close()
if 'checks' not in config:
if state == 'absent':
reasons.append('`checks\' section did not exist and state is `absent\'')
return changed, reasons
config['checks'] = {}
changed = True
reasons.append('`checks\' section did not exist')
if state == 'absent':
if name in config['checks']:
del config['checks'][name]
changed = True
reasons.append('check was present and state is `absent\'')
if state == 'present':
if name not in config['checks']:
check = {}
config['checks'][name] = check
changed = True
reasons.append('check was absent and state is `present\'')
else:
check = config['checks'][name]
simple_opts = ['command',
'handlers',
'subscribers',
'interval',
'timeout',
'handle',
'dependencies',
'standalone',
'publish',
'occurrences',
'refresh',
'aggregate',
'low_flap_threshold',
'high_flap_threshold',
]
for opt in simple_opts:
if module.params[opt] is not None:
if opt not in check or check[opt] != module.params[opt]:
check[opt] = module.params[opt]
changed = True
reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
else:
if opt in check:
del check[opt]
changed = True
reasons.append('`{opt}\' was removed'.format(opt=opt))
if module.params['metric']:
if 'type' not in check or check['type'] != 'metric':
check['type'] = 'metric'
changed = True
reasons.append('`type\' was not defined or not `metric\'')
if not module.params['metric'] and 'type' in check:
del check['type']
changed = True
reasons.append('`type\' was defined')
if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
subdue = {'begin': module.params['subdue_begin'],
'end': module.params['subdue_end'],
}
if 'subdue' not in check or check['subdue'] != subdue:
check['subdue'] = subdue
changed = True
reasons.append('`subdue\' did not exist or was different')
else:
if 'subdue' in check:
del check['subdue']
changed = True
reasons.append('`subdue\' was removed')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
try:
stream = open(path, 'w')
stream.write(json.dumps(config, indent=2) + '\n')
except IOError, e:
module.fail_json(msg=str(e))
finally:
if stream:
stream.close()
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'bool', 'default': 'no'},
'command': {'type': 'str'},
'handlers': {'type': 'list'},
'subscribers': {'type': 'list'},
'interval': {'type': 'int'},
'timeout': {'type': 'int'},
'handle': {'type': 'bool'},
'subdue_begin': {'type': 'str'},
'subdue_end': {'type': 'str'},
'dependencies': {'type': 'list'},
'metric': {'type': 'bool', 'default': 'no'},
'standalone': {'type': 'bool'},
'publish': {'type': 'bool'},
'occurrences': {'type': 'int'},
'refresh': {'type': 'int'},
'aggregate': {'type': 'bool'},
'low_flap_threshold': {'type': 'int'},
'high_flap_threshold': {'type': 'int'},
}
required_together = [['subdue_begin', 'subdue_end']]
module = AnsibleModule(argument_spec=arg_spec,
required_together=required_together,
supports_check_mode=True)
if module.params['state'] != 'absent' and module.params['command'] is None:
module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_check(module, path, name, state, backup)
module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
kushal124/gensim | gensim/test/test_utils.py | 53 | 2863 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various utils functions.
"""
import logging
import unittest
from gensim import utils
class TestIsCorpus(unittest.TestCase):
def test_None(self):
# test None
result = utils.is_corpus(None)
expected = (False, None)
self.assertEqual(expected, result)
def test_simple_lists_of_tuples(self):
# test list words
# one document, one word
potentialCorpus = [[(0, 4.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# one document, several words
potentialCorpus = [[(0, 4.), (1, 2.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.), (1, 2.), (2, 5.), (3, 8.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# several documents, one word
potentialCorpus = [[(0, 4.)], [(1, 2.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.)], [(1, 2.)], [(2, 5.)], [(3, 8.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_int_tuples(self):
potentialCorpus = [[(0, 4)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_invalid_formats(self):
# test invalid formats
# these are no corpus, because they do not consists of 2-tuples with
# the form(int, float).
potentials = list()
potentials.append(["human"])
potentials.append("human")
potentials.append(["human", "star"])
potentials.append([1, 2, 3, 4, 5, 5])
potentials.append([[(0, 'string')]])
for noCorpus in potentials:
result = utils.is_corpus(noCorpus)
expected = (False, noCorpus)
self.assertEqual(expected, result)
class TestUtils(unittest.TestCase):
def test_decode_entities(self):
# create a string that fails to decode with unichr on narrow python builds
body = u'It’s the Year of the Horse. YES VIN DIESEL 🙌 💯'
expected = u'It\x92s the Year of the Horse. YES VIN DIESEL \U0001f64c \U0001f4af'
self.assertEquals(utils.decode_htmlentities(body), expected)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| gpl-3.0 |
randynobx/ansible | test/runner/lib/changes.py | 57 | 5755 | """Detect changes in Ansible code."""
from __future__ import absolute_import, print_function
import re
import os
from lib.util import (
ApplicationError,
SubprocessError,
MissingEnvironmentVariable,
CommonConfig,
display,
)
from lib.http import (
HttpClient,
urlencode,
)
from lib.git import (
Git,
)
class InvalidBranch(ApplicationError):
"""Exception for invalid branch specification."""
def __init__(self, branch, reason):
"""
:type branch: str
:type reason: str
"""
message = 'Invalid branch: %s\n%s' % (branch, reason)
super(InvalidBranch, self).__init__(message)
self.branch = branch
class ChangeDetectionNotSupported(ApplicationError):
"""Exception for cases where change detection is not supported."""
pass
class ShippableChanges(object):
"""Change information for Shippable build."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
try:
self.branch = os.environ['BRANCH']
self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
self.commit = os.environ['COMMIT']
self.project_id = os.environ['PROJECT_ID']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.is_tag:
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
if self.is_pr:
self.paths = sorted(git.get_diff_names(['origin/%s' % self.branch, '--']))
self.diff = git.get_diff(['origin/%s' % self.branch, '--'])
else:
merge_runs = self.get_merge_runs(self.project_id, self.branch)
last_successful_commit = self.get_last_successful_commit(git, merge_runs)
if last_successful_commit:
self.paths = sorted(git.get_diff_names([last_successful_commit, self.commit]))
self.diff = git.get_diff([last_successful_commit, self.commit])
else:
# tracked files (including unchanged)
self.paths = sorted(git.get_file_names(['--cached']))
self.diff = []
def get_merge_runs(self, project_id, branch):
"""
:type project_id: str
:type branch: str
:rtype: list[dict]
"""
params = dict(
isPullRequest='false',
projectIds=project_id,
branch=branch,
)
client = HttpClient(self.args, always=True)
response = client.get('https://api.shippable.com/runs?%s' % urlencode(params))
return response.json()
@staticmethod
def get_last_successful_commit(git, merge_runs):
"""
:type git: Git
:type merge_runs: dict | list[dict]
:rtype: str
"""
if 'id' in merge_runs and merge_runs['id'] == 4004:
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return None
merge_runs = sorted(merge_runs, key=lambda r: r['createdAt'])
known_commits = set()
last_successful_commit = None
for merge_run in merge_runs:
commit_sha = merge_run['commitSha']
if commit_sha not in known_commits:
known_commits.add(commit_sha)
if merge_run['statusCode'] == 30:
if git.is_valid_ref(commit_sha):
last_successful_commit = commit_sha
return last_successful_commit
class LocalChanges(object):
"""Change information for local work."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
self.current_branch = git.get_branch()
if self.is_official_branch(self.current_branch):
raise InvalidBranch(branch=self.current_branch,
reason='Current branch is not a feature branch.')
self.fork_branch = None
self.fork_point = None
self.local_branches = sorted(git.get_branches())
self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
for self.fork_branch in self.official_branches:
try:
self.fork_point = git.get_branch_fork_point(self.fork_branch)
break
except SubprocessError:
pass
if self.fork_point is None:
raise ApplicationError('Unable to auto-detect fork branch and fork point.')
# tracked files (including unchanged)
self.tracked = sorted(git.get_file_names(['--cached']))
# untracked files (except ignored)
self.untracked = sorted(git.get_file_names(['--others', '--exclude-standard']))
# tracked changes (including deletions) committed since the branch was forked
self.committed = sorted(git.get_diff_names([self.fork_point, 'HEAD']))
# tracked changes (including deletions) which are staged
self.staged = sorted(git.get_diff_names(['--cached']))
# tracked changes (including deletions) which are not staged
self.unstaged = sorted(git.get_diff_names([]))
# diff of all tracked files from fork point to working copy
self.diff = git.get_diff([self.fork_point])
@staticmethod
def is_official_branch(name):
"""
:type name: str
:rtype: bool
"""
if name == 'devel':
return True
if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
return True
return False
| gpl-3.0 |
suninsky/ReceiptOCR | Python/server/lib/python2.7/site-packages/werkzeug/http.py | 85 | 36658 | # -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from time import time, gmtime
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
try:
from urllib.request import parse_http_list as _parse_list_header
from urllib.parse import unquote_to_bytes as _unquote
except ImportError: # pragma: no cover
from urllib2 import parse_http_list as _parse_list_header, \
unquote as _unquote
from datetime import datetime, timedelta
from hashlib import md5
import base64
from werkzeug._internal import _cookie_quote, _make_cookie_domain, \
_cookie_parse_impl
from werkzeug._compat import to_unicode, iteritems, text_type, \
string_types, try_coerce_native, to_bytes, PY2, \
integer_types
_cookie_charset = 'latin1'
# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231
_accept_re = re.compile(
r'''( # media-range capturing-parenthesis
[^\s;,]+ # type/subtype
(?:[ \t]*;[ \t]* # ";"
(?: # parameter non-capturing-parenthesis
[^\s;,q][^\s;,]* # token that doesn't start with "q"
| # or
q[^\s;,=][^\s;,]* # token that is more than just "q"
)
)* # zero or more parameters
) # end of media-range
(?:[ \t]*;[ \t]*q= # weight is a "q" parameter
(\d*(?:\.\d+)?) # qvalue capturing-parentheses
[^,]* # "extension" accept params: who cares?
)? # accept params are optional
''', re.VERBOSE)
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(
r';\s*(%s|[^\s;,=\*]+)\s*'
r'(?:\*?=\s*(?:([^\s]+?)\'([^\s]*?)\')?(%s|[^;,]+)?)?\s*' %
(_quoted_string_re, _quoted_string_re)
)
_option_header_start_mime_type = re.compile(r',\s*([^;,\s]+)([;,]\s*.+)?')
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_hop_headers = frozenset([
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailer', 'transfer-encoding',
'upgrade'
])
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required', # see RFC 6585
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
449: 'Retry With', # proprietary MS extension
451: 'Unavailable For Legal Reasons',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
def wsgi_to_bytes(data):
"""coerce wsgi unicode represented bytes to real ones
"""
if isinstance(data, bytes):
return data
return data.encode('latin1') # XXX: utf8 fallback?
def bytes_to_wsgi(data):
assert isinstance(data, bytes), 'data must be bytes'
if isinstance(data, str):
return data
else:
return data.decode('latin1')
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
if isinstance(value, bytes):
value = bytes_to_wsgi(value)
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value, cls=dict):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` arugment):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = cls()
if not isinstance(value, text_type):
# XXX: validate
value = bytes_to_wsgi(value)
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value, multiple=False):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:param multiple: Whether try to parse and return multiple MIME types
:return: (mimetype, options) or (mimetype, options, mimetype, options, …)
if multiple=True
"""
if not value:
return '', {}
result = []
value = "," + value.replace("\n", ",")
while value:
match = _option_header_start_mime_type.match(value)
if not match:
break
result.append(match.group(1)) # mimetype
options = {}
# Parse options
rest = match.group(2)
while rest:
optmatch = _option_header_piece_re.match(rest)
if not optmatch:
break
option, encoding, _, option_value = optmatch.groups()
option = unquote_header_value(option)
if option_value is not None:
option_value = unquote_header_value(
option_value,
option == 'filename')
if encoding is not None:
option_value = _unquote(option_value).decode(encoding)
options[option] = option_value
rest = rest[optmatch.end():]
result.append(options)
if multiple is False:
return tuple(result)
value = rest
return tuple(result) if result else ('', {})
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b'basic':
try:
username, password = base64.b64decode(auth_info).split(b':', 1)
except Exception:
return
return Authorization('basic', {'username': bytes_to_wsgi(username),
'password': bytes_to_wsgi(password)})
elif auth_type == b'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'response':
if key not in auth_map:
return
if 'qop' in auth_map:
if not auth_map.get('nc') or not auth_map.get('cnonce'):
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0])
def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or '=' not in value:
return None
ranges = []
last_end = 0
units, rng = value.split('=', 1)
units = units.strip().lower()
for item in rng.split(','):
item = item.strip()
if '-' not in item:
return None
if item.startswith('-'):
if last_end < 0:
return None
try:
begin = int(item)
except ValueError:
return None
end = None
last_end = -1
elif '-' in item:
begin, end = item.split('-', 1)
begin = begin.strip()
end = end.strip()
if not begin.isdigit():
return None
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
if not end.isdigit():
return None
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges)
def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or '').strip().split(None, 1)
except ValueError:
return None
if '/' not in rangedef:
return None
rng, length = rangedef.split('/', 1)
if length == '*':
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == '*':
return ContentRange(units, None, None, length, on_update=on_update)
elif '-' not in rng:
return None
start, stop = rng.split('-', 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'W/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('W/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag.startswith(('W/', 'w/')):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - \
timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, '-')
def http_date(timestamp=None):
"""Formats the time to match the RFC1123 date format.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
:param timestamp: If provided that date is used, otherwise the current.
"""
return _dump_date(timestamp, ' ')
def is_resource_modified(environ, etag=None, data=None, last_modified=None,
ignore_if_range=True):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:param ignore_if_range: If `False`, `If-Range` header will be taken into
account.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, string_types):
last_modified = parse_date(last_modified)
# ensure that microsecond is zero because the HTTP spec does not transmit
# that either and we might have some false positives. See issue #39
if last_modified is not None:
last_modified = last_modified.replace(microsecond=0)
if_range = None
if not ignore_if_range and 'HTTP_RANGE' in environ:
# http://tools.ietf.org/html/rfc7233#section-3.2
# A server MUST ignore an If-Range header field received in a request
# that does not contain a Range header field.
if_range = parse_if_range_header(environ.get('HTTP_IF_RANGE'))
if if_range is not None and if_range.date is not None:
modified_since = if_range.date
else:
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
etag, _ = unquote_etag(etag)
if if_range is not None and if_range.etag is not None:
unmodified = parse_etags(if_range.etag).contains(etag)
else:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
# http://tools.ietf.org/html/rfc7232#section-3.2
# "A recipient MUST use the weak comparison function when comparing
# entity-tags for If-None-Match"
unmodified = if_none_match.contains_weak(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise.
"""
return header.lower() in _hop_by_hop_headers
def parse_cookie(header, charset='utf-8', errors='replace', cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
elif header is None:
header = ''
# If the value is an unicode string it's mangled through latin1. This
# is done because on PEP 3333 on Python 3 all headers are assumed latin1
# which however is incorrect for cookies, which are sent in page encoding.
# As a result we
if isinstance(header, text_type):
header = header.encode('latin1', 'replace')
if cls is None:
cls = TypeConversionDict
def _parse_pairs():
for key, val in _cookie_parse_impl(header):
key = to_unicode(key, charset, errors, allow_none_charset=True)
val = to_unicode(val, charset, errors, allow_none_charset=True)
yield try_coerce_native(key), val
return cls(_parse_pairs())
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False,
charset='utf-8', sync_expires=True):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
buf = [key + b'=' + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in ((b'Domain', domain, True),
(b'Expires', expires, False,),
(b'Max-Age', max_age, False),
(b'Secure', secure, None),
(b'HttpOnly', httponly, None),
(b'Path', path, False)):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b'=' + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b'; '.join(buf)
if not PY2:
rv = rv.decode('latin1')
return rv
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length
# circular dependency fun
from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \
WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \
RequestCacheControl
# DEPRECATED
# backwards compatible imports
from werkzeug.datastructures import ( # noqa
MIMEAccept, CharsetAccept, LanguageAccept, Headers
)
from werkzeug.urls import iri_to_uri
| mit |
UManPychron/pychron | pychron/file_defaults.py | 2 | 11951 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
"""
This file defines the text for various default files.
Values are used in pychron.paths when building directory structure
"""
from __future__ import absolute_import
import yaml
from pychron.core.helpers.strtools import to_bool
# PIPELINE_TEMPLATES = '''- Isotope Evolutions
# - Blanks
# - IC Factor
# - Flux
# - Ideogram
# - Spectrum
# - Inverse Isochron
# - Series
# - Regression Series
# - Radial
# - Analysis Table
# - Interpreted Age Table
# - Interpreted Age Ideogram
# - Auto Ideogram
# - Auto Series
# - Auto Report
# - Report
# - Diff
# - Vertical Flux
# - Xy Scatter
# - Geochron
# - Yield
# - CSV Analyses Export
# - CSV Ideogram
# - Correction Factors
# - Monitor Chain
# - Analysis Metadata
# '''
IDENTIFIERS_DEFAULT = """
- name: Blank
shortname: b
extractable: False
special: True
- name: Blank Air
shortname: ba
extractable: False
special: True
- name: Blank Cocktail
shortname: bc
extractable: False
special: True
- name: Blank Unknown
shortname: bu
extractable: False
special: True
- name: Blank ExtractionLine
shortname: be
extractable: False
special: True
- name: Background
shortname: bg
extractable: False
special: True
- name: Unknown
shortname: u
extractable: True
special: False
- name: Cocktail
shortname: c
extractable: False
special: True
- name: Air
shortname: a
extractable: False
special: True
- name: Pause
shortname: pa
extractable: False
special: True
- name: Degas
shortname: dg
extractable: True
special: True
- name: Detector IC
shortname: ic
extractable: False
special: True
"""
EDIT_UI_DEFAULT = """
predefined: Simple
"""
TASK_EXTENSION_DEFAULT = """
-
plugin_id: pychron.update.plugin
actions:
- pychron.update.check_for_updates, True
- pychron.update.manage_version, False
- pychron.update.manage_branch, False
- pychron.update.build_app, False
-
plugin_id: pychron.processing.plugin.recall
actions:
- pychron.recall.recall, True
- pychron.recall.configure, True
- pychron.recall.time_view, True
-
plugin_id: pychron.processing.plugin.figures
actions:
- pychron.figure.spectrum, True
- pychron.figure.ideogram, True
- pychron.figure.inv_isochron, True
- pychron.figure.series, True
- pychron.figure.composite, True
- pychron.figure.xyscatter, True
- pychron.figure.file_ideogram, True
- pychron.figure.file_spectrum, True
- pychron.figure.ideogram_file_template, True
- pychron.figure.spectrum_file_template, True
- pychron.figure.refresh, True
-
plugin_id: pychron.processing.plugin.reduction
actions:
- pychron.reduction.iso_evo, True
- pychron.reduction.blanks, True
- pychron.reduction.ic_factor, True
- pychron.reduction.discrimination, False
- pychron.reduction.flux, True
-
plugin_id: pychron.processing.plugin.dataset
actions:
- pychron.reduction.sqlite_dataset, True
- pychron.reduction.xml_dataset, True
-
plugin_id: pychron.processing.plugin.grouping
actions:
- pychron.grouping.selected, True
- pychron.grouping.aliquot, True
- pychron.grouping.lnumber, True
- pychron.grouping.sample, True
- pychron.grouping.clear, True
- pychron.grouping.gselected, True
- pychron.grouping.gsample, True
-
plugin_id: pychron.processing.plugin.misc
actions:
- pychron.misc.tag, True
- pychron.misc.drtag, False
- pychron.misc.select_drtag, False
- pychron.misc.db_save, True
- pychron.misc.clear_cache, True
- pychron.misc.modify_k, False
- pychron.misc.modify_identifier, False
-
plugin_id: pychron.processing.plugin.agroup
actions:
- pychron.agroup.make, False
- pychron.agroup.delete, False
-
plugin_id: pychron.experiment.plugin.edit
task_id: pychron.experiment.task
actions:
- pychron.experiment.edit.deselect, False
- pychron.experiment.edit.reset, True
- pychron.experiment.edit.sync, True
- pychron.experiment.edit.undo, False
- pychron.experiment.edit.configure, False
-
plugin_id: pychron.experiment.plugin
actions:
- pychron.experiment.open_system_conditionals, True
- pychron.experiment.open_queue_conditionals, True
- pychron.experiment.open_experiment, True
- pychron.experiment.open_last_experiment, True
- pychron.experiment.launch_history, True
- pychron.experiment.run_history_view, True
- pychron.experiment.test_notify, False
- pychron.experiment.new_experiment, True
- pychron.experiment.signal_calculator, False
- pychron.experiment.new_pattern, False
- pychron.experiment.open_pattern, False
-
plugin_id: pychron.entry.plugin
task_id: pychron.entry.irradiation.task
actions:
- pychron.entry2.transfer_j, True
- pychron.entry2.import_irradiation, True
- pychron.entry2.export_irradiation, False
- pychron.entry2.import_samples_from_file, False
- pychron.entry2.generate_tray, False
- pychron.entry2.save_labbook, False
- pychron.entry2.make_template, False
-
plugin_id: pychron.entry.plugin
actions:
- pychron.entry1.labnumber_entry, True
- pychron.entry1.sample_entry, True
- pychron.entry1.sample_prep, True
- pychron.entry1.generate_irradiation_table, False
- pychron.entry1.import_irradiation_holder, False
- pychron.entry1.sensitivity_entry, True
- pychron.entry1.flux_monitor, False
"""
actions = []
for line in TASK_EXTENSION_DEFAULT.split('\n'):
line = line.strip()
if line.startswith('- pychron.'):
a, b = line.split(',')
if to_bool(b):
actions.append(a)
SIMPLE_UI_DEFAULT = '\n'.join(actions)
DEFAULT_INITIALIZATION = '''<root>
<globals>
</globals>
<plugins>
<general>
<plugin enabled="false">Processing</plugin>
<plugin enabled="false">MediaStorage</plugin>
<plugin enabled="false">PyScript</plugin>
<plugin enabled="false">Video</plugin>
<plugin enabled="false">Database</plugin>
<plugin enabled="false">Entry</plugin>
<plugin enabled="false">ArArConstants</plugin>
<plugin enabled="false">Loading</plugin>
<plugin enabled="false">LabBook</plugin>
<plugin enabled="false">DashboardServer</plugin>
<plugin enabled="false">DashboardClient</plugin>
</general>
<hardware>
</hardware>
<social>
</social>
</plugins>
</root>
'''
DEFAULT_STARTUP_TESTS = '''
- plugin: Database
tests:
- test_pychron
- test_pychron_version
- plugin: MassSpec
tests:
- test_database
- plugin: LabBook
tests:
- plugin: ArArConstants
tests:
- plugin: ArgusSpectrometer
tests:
- test_communication
- test_intensity
- plugin: ExtractionLine
tests:
- test_valve_communication
- test_gauge_communication
'''
EXPERIMENT_DEFAULTS = '''
columns:
- Labnumber
- Aliquot
- Sample
- Position
- Extract
- Units
- Duration (s)
- Cleanup (s)
- Beam (mm)
- Pattern
- Extraction
- Measurement
- Conditionals
- Comment
'''
RATIO_CHANGE_DETECTION = '''
# - ratio: Ar40/Ar36
# nanalyses: 5
# threshold: 1
## percent_threshold: 1
## nominal_ratio: 295
## nsigma: 3
# analysis_type: air
# failure_count: 2
# consecutive_failure: True
# - ratio: Ar40/Ar39
# nanalyses: 5
# threshold: 1
## percent_threshold: 1
## nominal_ratio: 10
## nsigma: 3
# analysis_type: cocktail
# failure_count: 2
# consecutive_failure: True
'''
def make_screen(**kw):
obj = {'padding_left': 100,
'padding_right': 100,
'padding_top': 100,
'padding_bottom': 100,
'bgcolor': 'white',
'plot_bgcolor': 'white',
'xtick_in': 1,
'xtick_out': 5,
'ytick_in': 1,
'ytick_out': 5,
'use_xgrid': True,
'use_ygrid': True,
}
obj.update(kw)
return yaml.dump(obj, default_flow_style=False)
def make_presentation(**kw):
obj = {'padding_left': 40,
'padding_right': 40,
'padding_top': 40,
'padding_bottom': 40,
'bgcolor': (239, 238, 185),
'plot_bgcolor': (208, 243, 241),
'xtick_in': 1,
'xtick_out': 5,
'ytick_in': 1,
'ytick_out': 5,
'use_xgrid': True,
'use_ygrid': True, }
obj.update(kw)
return yaml.dump(obj, default_flow_style=False)
DEFINE_EQUILIBRATION_SCREEN = make_screen()
ISO_EVO_SCREEN = make_screen()
SERIES_SCREEN = make_screen()
BLANKS_SCREEN = make_screen()
ICFACTOR_SCREEN = make_screen()
BLANKS_PRESENTATION = make_presentation()
iso_d = dict(use_xgrid=False, use_ygrid=False)
inv_iso_d = dict(use_xgrid=False, use_ygrid=False,
nominal_intercept_label='Atm',
nominal_intercept_value=295.5,
show_nominal_intercept=True,
invert_nominal_intercept=True,
inset_marker_size=2.5,
inset_marker_color='black')
ISOCHRON_SCREEN = make_screen(**iso_d)
ISOCHRON_PRESENTATION = make_presentation(**iso_d)
INVERSE_ISOCHRON_SCREEN = make_screen(**inv_iso_d)
INVERSE_ISOCHRON_PRESENTATION = make_presentation(**inv_iso_d)
ideo_d = dict(probability_curve_kind='cumulative',
mean_calculation_kind='weighted mean',
mean_sig_figs=2,
index_attr='uage')
IDEOGRAM_SCREEN = make_screen(mean_indicator_fontsize=12,
**ideo_d)
IDEOGRAM_PRESENTATION = make_presentation(mean_indicator_fontsize=24,
**ideo_d)
spec_d = dict(plateau_line_width=1,
plateau_line_color='black',
plateau_sig_figs=2,
# calculate_fixed_plateau= False,
# calculate_fixed_plateau_start= '',
# calculate_fixed_plateau_end= '',
pc_nsteps=3,
pc_gas_fraction=50,
integrated_sig_figs=2,
legend_location='Upper Right',
include_legend=False,
include_sample_in_legend=False,
display_step=True,
display_extract_value=False)
SPECTRUM_PRESENTATION = make_presentation(**spec_d)
SPECTRUM_SCREEN = make_screen(**spec_d)
radial_d = dict()
RADIAL_SCREEN = make_screen(**radial_d)
regression_series_d = dict()
REGRESSION_SERIES_SCREEN = make_screen(**regression_series_d)
FLUX_CONSTANTS_DEFAULT = """
# This is an example flux file. Add additional decay_constant and monitor_age pairs here
"FC MIN":
lambda_ec: [5.80e-11, 0]
lambda_b: [4.884e-10, 0]
monitor_age: 28.201
"FC SJ":
lambda_ec: [5.81e-11, 0]
lambda_b: [4.962e-10, 0]
monitor_age: 28.02
"""
REACTORS_DEFAULT = '''{
"Triga": {
"K4039": [0.007614,0.000105],
"K3839": [0.013,0.0],
"K3739": [0.0,0.0],
"Ca3937": [0.00066,1e-05],
"Ca3837": [4e-05,2e-06],
"Ca3637": [0.000264,1e-06],
"Cl3638": [250.0,0.0],
"Ca_K": [1.96,0.0],
"Cl_K": [0.227,0.0]
}
}
'''
# ============= EOF =============================================
| apache-2.0 |
nomadcube/scikit-learn | sklearn/decomposition/incremental_pca.py | 199 | 10508 | """Incremental Principal Components Analysis."""
# Author: Kyle Kastner <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .base import _BasePCA
from ..utils import check_array, gen_batches
from ..utils.extmath import svd_flip, _batch_mean_variance_update
class IncrementalPCA(_BasePCA):
"""Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
centered data, keeping only the most significant singular vectors to
project the data to a lower dimensional space.
Depending on the size of the input data, this algorithm can be much more
memory efficient than a PCA.
This algorithm has constant memory complexity, on the order
of ``batch_size``, enabling use of np.memmap files without loading the
entire file into memory.
The computational overhead of each SVD is
``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
remain in memory at a time. There will be ``n_samples / batch_size`` SVD
computations to get the principal components, versus 1 large SVD of
complexity ``O(n_samples * n_features ** 2)`` for PCA.
Read more in the :ref:`User Guide <IncrementalPCA>`.
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
batch_size : int or None, (default=None)
The number of samples to use for each batch. Only used when calling
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
is inferred from the data and set to ``5 * n_features``, to provide a
balance between approximation accuracy and memory consumption.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
whiten : bool, optional
When True (False by default) the ``components_`` vectors are divided
by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometimes
improve the predictive accuracy of the downstream estimators by
making data respect some hard-wired assumptions.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ : array, shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0
mean_ : array, shape (n_features,)
Per-feature empirical mean, aggregate over calls to ``partial_fit``.
var_ : array, shape (n_features,)
Per-feature empirical variance, aggregate over calls to ``partial_fit``.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf.
n_components_ : int
The estimated number of components. Relevant when ``n_components=None``.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Notes
-----
Implements the incremental PCA model from:
`D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
pp. 125-141, May 2008.`
See http://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
This model is an extension of the Sequential Karhunen-Loeve Transform from:
`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
its Application to Images, IEEE Transactions on Image Processing, Volume 9,
Number 8, pp. 1371-1374, August 2000.`
See http://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf
We have specifically abstained from an optimization used by authors of both
papers, a QR decomposition used in specific situations to reduce the
algorithmic complexity of the SVD. The source for this technique is
`Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
section 5.4.4, pp 252-253.`. This technique has been omitted because it is
advantageous only when decomposing a matrix with ``n_samples`` (rows)
>= 5/3 * ``n_features`` (columns), and hurts the readability of the
implemented algorithm. This would be a good opportunity for future
optimization, if it is deemed necessary.
References
----------
D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
Section 5.4.4, pp. 252-253.
See also
--------
PCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, whiten=False, copy=True,
batch_size=None):
self.n_components = n_components
self.whiten = whiten
self.copy = copy
self.batch_size = batch_size
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y: Passthrough for ``Pipeline`` compatibility.
Returns
-------
self: object
Returns the instance itself.
"""
self.components_ = None
self.mean_ = None
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
self.var_ = None
self.n_samples_seen_ = 0
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(n_samples, self.batch_size_):
self.partial_fit(X[batch])
return self
def partial_fit(self, X, y=None):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self: object
Returns the instance itself.
"""
X = check_array(X, copy=self.copy, dtype=np.float)
n_samples, n_features = X.shape
if not hasattr(self, 'components_'):
self.components_ = None
if self.n_components is None:
self.n_components_ = n_features
elif not 1 <= self.n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features))
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (self.components_.shape[0]
!= self.n_components_):
raise ValueError("Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value." % (
self.components_.shape[0], self.n_components_))
if self.components_ is None:
# This is the first pass through partial_fit
self.n_samples_seen_ = 0
col_var = X.var(axis=0)
col_mean = X.mean(axis=0)
X -= col_mean
U, S, V = linalg.svd(X, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_samples
explained_variance_ratio = S ** 2 / np.sum(col_var *
n_samples)
else:
col_batch_mean = X.mean(axis=0)
col_mean, col_var, n_total_samples = _batch_mean_variance_update(
X, self.mean_, self.var_, self.n_samples_seen_)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = np.sqrt((self.n_samples_seen_ * n_samples) /
n_total_samples) * (self.mean_ -
col_batch_mean)
X_combined = np.vstack((self.singular_values_.reshape((-1, 1)) *
self.components_, X,
mean_correction))
U, S, V = linalg.svd(X_combined, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_total_samples
explained_variance_ratio = S ** 2 / np.sum(col_var *
n_total_samples)
self.n_samples_seen_ += n_samples
self.components_ = V[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = \
explained_variance_ratio[:self.n_components_]
if self.n_components_ < n_features:
self.noise_variance_ = \
explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.
return self
| bsd-3-clause |
supriyantomaftuh/django | tests/staticfiles_tests/test_storage.py | 147 | 18183 | from __future__ import unicode_literals
import os
import sys
import unittest
from django.conf import settings
from django.contrib.staticfiles import finders, storage
from django.contrib.staticfiles.management.commands import collectstatic
from django.contrib.staticfiles.management.commands.collectstatic import \
Command as CollectstaticCommand
from django.core.cache.backends.base import BaseCache
from django.core.management import call_command
from django.test import SimpleTestCase, override_settings
from django.utils import six
from django.utils.encoding import force_text
from .cases import (
BaseCollectionTestCase, BaseStaticFilesTestCase, StaticFilesTestCase,
)
from .settings import TEST_ROOT, TEST_SETTINGS, TESTFILES_PATH
def hashed_file_path(test, path):
fullpath = test.render_template(test.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
class TestHashedFiles(object):
hashed_file_path = hashed_file_path
def tearDown(self):
# Clear hashed files to avoid side effects among tests.
storage.staticfiles_storage.hashed_files.clear()
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt")
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt", asvar=True)
self.assertStaticRenders("cached/styles.css", "/static/cached/styles.bb84a0240107.css")
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_ignored_completely(self):
relpath = self.hashed_file_path("cached/css/ignored.css")
self.assertEqual(relpath, "cached/css/ignored.6c77f2643390.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'#foobar', content)
self.assertIn(b'http:foobar', content)
self.assertIn(b'https:foobar', content)
self.assertIn(b'data:foobar', content)
self.assertIn(b'//foobar', content)
def test_path_with_querystring(self):
relpath = self.hashed_file_path("cached/styles.css?spam=eggs")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css?spam=eggs")
with storage.staticfiles_storage.open(
"cached/styles.bb84a0240107.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_fragment(self):
relpath = self.hashed_file_path("cached/styles.css#eggs")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css#eggs")
with storage.staticfiles_storage.open(
"cached/styles.bb84a0240107.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_querystring_and_fragment(self):
relpath = self.hashed_file_path("cached/css/fragments.css")
self.assertEqual(relpath, "cached/css/fragments.75433540b096.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'fonts/font.a4b0478549d0.eot?#iefix', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content)
self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content)
self.assertIn(b'#default#VML', content)
def test_template_tag_absolute(self):
relpath = self.hashed_file_path("cached/absolute.css")
self.assertEqual(relpath, "cached/absolute.ae9ef2716fe3.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/cached/styles.css", content)
self.assertIn(b"/static/cached/styles.bb84a0240107.css", content)
self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content)
def test_template_tag_denorm(self):
relpath = self.hashed_file_path("cached/denorm.css")
self.assertEqual(relpath, "cached/denorm.c5bd139ad821.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"..//cached///styles.css", content)
self.assertIn(b"../cached/styles.bb84a0240107.css", content)
self.assertNotIn(b"url(img/relative.png )", content)
self.assertIn(b'url("img/relative.acae32e4532b.png', content)
def test_template_tag_relative(self):
relpath = self.hashed_file_path("cached/relative.css")
self.assertEqual(relpath, "cached/relative.b0375bd89156.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"../cached/styles.css", content)
self.assertNotIn(b'@import "styles.css"', content)
self.assertNotIn(b'url(img/relative.png)', content)
self.assertIn(b'url("img/relative.acae32e4532b.png")', content)
self.assertIn(b"../cached/styles.bb84a0240107.css", content)
def test_import_replacement(self):
"See #18050"
relpath = self.hashed_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.2b1d40b0bbd4.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"""import url("styles.bb84a0240107.css")""", relfile.read())
def test_template_tag_deep_relative(self):
relpath = self.hashed_file_path("cached/css/window.css")
self.assertEqual(relpath, "cached/css/window.3906afbb5a17.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b'url(img/window.png)', content)
self.assertIn(b'url("img/window.acae32e4532b.png")', content)
def test_template_tag_url(self):
relpath = self.hashed_file_path("cached/url.css")
self.assertEqual(relpath, "cached/url.902310b73412.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"https://", relfile.read())
def test_post_processing(self):
"""
Test that post_processing behaves correctly.
Files that are alterable should always be post-processed; files that
aren't should be skipped.
collectstatic has already been called once in setUp() for this testcase,
therefore we check by verifying behavior on a second run.
"""
collectstatic_args = {
'interactive': False,
'verbosity': 0,
'link': False,
'clear': False,
'dry_run': False,
'post_process': True,
'use_default_ignore_patterns': True,
'ignore_patterns': ['*.ignoreme'],
}
collectstatic_cmd = CollectstaticCommand()
collectstatic_cmd.set_options(**collectstatic_args)
stats = collectstatic_cmd.collect()
self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed'])
self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified'])
self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed'])
def test_css_import_case_insensitive(self):
relpath = self.hashed_file_path("cached/styles_insensitive.css")
self.assertEqual(relpath, "cached/styles_insensitive.c609562b6d3c.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'faulty')],
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
)
def test_post_processing_failure(self):
"""
Test that post_processing indicates the origin of the error when it
fails. Regression test for #18986.
"""
finders.get_finder.cache_clear()
err = six.StringIO()
with self.assertRaises(Exception):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue())
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionCachedStorage(TestHashedFiles, BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
def test_cache_invalidation(self):
name = "cached/styles.css"
hashed_name = "cached/styles.bb84a0240107.css"
# check if the cache is filled correctly as expected
cache_key = storage.staticfiles_storage.hash_key(name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(self.hashed_file_path(name), cached_name)
# clearing the cache to make sure we re-set it correctly in the url method
storage.staticfiles_storage.hashed_files.clear()
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, None)
self.assertEqual(self.hashed_file_path(name), hashed_name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, hashed_name)
def test_cache_key_memcache_validation(self):
"""
Handle cache key creation correctly, see #17861.
"""
name = (
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/\x16\xb4"
)
cache_key = storage.staticfiles_storage.hash_key(name)
cache_validator = BaseCache({})
cache_validator.validate_key(cache_key)
self.assertEqual(cache_key, 'staticfiles:821ea71ef36f95b3922a77f7364670e7')
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='django.contrib.staticfiles.storage.ManifestStaticFilesStorage',
DEBUG=False,
))
class TestCollectionManifestStorage(TestHashedFiles, BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
def setUp(self):
super(TestCollectionManifestStorage, self).setUp()
self._clear_filename = os.path.join(TESTFILES_PATH, 'cleared.txt')
with open(self._clear_filename, 'w') as f:
f.write('to be deleted in one test')
def tearDown(self):
super(TestCollectionManifestStorage, self).tearDown()
if os.path.exists(self._clear_filename):
os.unlink(self._clear_filename)
def test_manifest_exists(self):
filename = storage.staticfiles_storage.manifest_name
path = storage.staticfiles_storage.path(filename)
self.assertTrue(os.path.exists(path))
def test_loaded_cache(self):
self.assertNotEqual(storage.staticfiles_storage.hashed_files, {})
manifest_content = storage.staticfiles_storage.read_manifest()
self.assertIn(
'"version": "%s"' % storage.staticfiles_storage.manifest_version,
force_text(manifest_content)
)
def test_parse_cache(self):
hashed_files = storage.staticfiles_storage.hashed_files
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_clear_empties_manifest(self):
cleared_file_name = os.path.join('test', 'cleared.txt')
# collect the additional file
self.run_collectstatic()
hashed_files = storage.staticfiles_storage.hashed_files
self.assertIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertIn(cleared_file_name, manifest_content)
original_path = storage.staticfiles_storage.path(cleared_file_name)
self.assertTrue(os.path.exists(original_path))
# delete the original file form the app, collect with clear
os.unlink(self._clear_filename)
self.run_collectstatic(clear=True)
self.assertFileNotFound(original_path)
hashed_files = storage.staticfiles_storage.hashed_files
self.assertNotIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertNotIn(cleared_file_name, manifest_content)
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='staticfiles_tests.storage.SimpleCachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionSimpleCachedStorage(BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
hashed_file_path = hashed_file_path
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt", "/static/test/file.deploy12345.txt")
self.assertStaticRenders("cached/styles.css", "/static/cached/styles.deploy12345.css")
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.deploy12345.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.deploy12345.css", content)
class CustomStaticFilesStorage(storage.StaticFilesStorage):
"""
Used in TestStaticFilePermissions
"""
def __init__(self, *args, **kwargs):
kwargs['file_permissions_mode'] = 0o640
kwargs['directory_permissions_mode'] = 0o740
super(CustomStaticFilesStorage, self).__init__(*args, **kwargs)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports chmod.")
class TestStaticFilePermissions(BaseCollectionTestCase, StaticFilesTestCase):
command_params = {
'interactive': False,
'post_process': True,
'verbosity': 0,
'ignore_patterns': ['*.ignoreme'],
'use_default_ignore_patterns': True,
'clear': False,
'link': False,
'dry_run': False,
}
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
super(TestStaticFilePermissions, self).setUp()
def tearDown(self):
os.umask(self.old_umask)
super(TestStaticFilePermissions, self).tearDown()
# Don't run collectstatic command in this test class.
def run_collectstatic(self, **kwargs):
pass
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
)
def test_collect_static_files_permissions(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o655)
self.assertEqual(dir_mode, 0o765)
@override_settings(
FILE_UPLOAD_PERMISSIONS=None,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=None,
)
def test_collect_static_files_default_permissions(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o666 & ~self.umask)
self.assertEqual(dir_mode, 0o777 & ~self.umask)
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
STATICFILES_STORAGE='staticfiles_tests.test_storage.CustomStaticFilesStorage',
)
def test_collect_static_files_subclass_of_static_storage(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o640)
self.assertEqual(dir_mode, 0o740)
| bsd-3-clause |
MiltosD/CEFELRC | lib/python2.7/site-packages/django/db/models/sql/constants.py | 394 | 1043 | import re
# Valid query types (a dictionary is used for speedy lookups).
QUERY_TERMS = dict([(x, None) for x in (
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
)])
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
# Constants to make looking up tuple values clearer.
# Join lists (indexes into the tuples that are values in the alias_map
# dictionary in the Query class).
TABLE_NAME = 0
RHS_ALIAS = 1
JOIN_TYPE = 2
LHS_ALIAS = 3
LHS_JOIN_COL = 4
RHS_JOIN_COL = 5
NULLABLE = 6
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC')}
| bsd-3-clause |
multigcs/quadfork | Libraries/Mavlink/pymavlink/generator/lib/minixsv/minixsvWrapper.py | 79 | 2400 | #!/usr/local/bin/python
import sys
import getopt
from ..genxmlif import GenXmlIfError
from xsvalErrorHandler import ErrorHandler, XsvalError
from ..minixsv import *
from pyxsval import parseAndValidate
##########################################
# minixsv Wrapper for calling minixsv from command line
validSyntaxText = '''\
minixsv XML Schema Validator
Syntax: minixsv [-h] [-?] [-p Parser] [-s XSD-Filename] XML-Filename
Options:
-h, -?: Display this help text
-p Parser: XML Parser to be used
(XMLIF_MINIDOM, XMLIF_ELEMENTTREE, XMLIF_4DOM
default: XMLIF_ELEMENTTREE)
-s XSD-FileName: specify the schema file for validation
(if not specified in XML-File)
'''
def checkShellInputParameter():
"""check shell input parameters."""
xmlInputFilename = None
xsdFilename = None
xmlParser = "XMLIF_ELEMENTTREE"
try:
(options, arguments) = getopt.getopt(sys.argv[1:], '?hp:s:')
if ('-?','') in options or ('-h','') in options:
print validSyntaxText
sys.exit(-1)
else:
if len (arguments) == 1:
xmlInputFilename = arguments[0]
for o, a in options:
if o == "-s":
xsdFilename = a
if o == "-p":
if a in (XMLIF_MINIDOM, XMLIF_ELEMENTTREE, XMLIF_4DOM):
xmlParser = a
else:
print 'Invalid XML parser %s!' %(a)
sys.exit(-1)
else:
print 'minixsv needs one argument (XML input file)!'
sys.exit(-1)
except getopt.GetoptError, errstr:
print errstr
sys.exit(-1)
return xmlInputFilename, xsdFilename, xmlParser
def main():
xmlInputFilename, xsdFileName, xmlParser = checkShellInputParameter()
try:
parseAndValidate (xmlInputFilename, xsdFile=xsdFileName, xmlIfClass=xmlParser)
except IOError, errstr:
print errstr
sys.exit(-1)
except GenXmlIfError, errstr:
print errstr
sys.exit(-1)
except XsvalError, errstr:
print errstr
sys.exit(-1)
if __name__ == "__main__":
main()
| gpl-3.0 |
aerler/Ensemble | src/ensemble/ensemble_test.py | 1 | 8702 | '''
Created on 2013-08-24
Unittest for the GeoPy main package geodata.
@author: Andre R. Erler, GPL v3
'''
import unittest
import netCDF4 as nc
import numpy as np
import numpy.ma as ma
import scipy.stats as ss
import os
import gc
from copy import deepcopy
import shutil
# internal imports
# from ensemble.base import Ensemble
from ensemble.expand import expandArgumentList
## tests related to loading datasets
class ArgumentTest(unittest.TestCase):
def setUp(self):
''' create two test variables '''
pass
def tearDown(self):
''' clean up '''
gc.collect()
def testExpArgList(self):
''' test function to expand argument lists '''
# test arguments
args1 = [0,1,2]; args2 = ['0','1','2']; args3 = ['test']*3; arg4 = 'static1'; arg5 = 'static2'
explist = ['arg1','arg2','arg3']
# test inner product expansion
arg_list = expandArgumentList(arg1=args1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
expand_list=explist, lproduct='inner')
assert len(arg_list) == len(args1) and len(arg_list) == len(args2)
for args,arg1,arg2,arg3 in zip(arg_list,args1,args2,args3):
assert args['arg1'] == arg1
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
# test outer product expansion
arg_list = expandArgumentList(arg1=args1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
expand_list=explist, lproduct='outer')
assert len(arg_list) == len(args1) * len(args2) * len(args3)
n = 0
for arg1 in args1:
for arg2 in args2:
for arg3 in args3:
args = arg_list[n]
assert args['arg1'] == arg1
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
n += 1
assert n == len(arg_list)
# test simultaneous inner and outer product expansion
n1 = len(args2) * len(args3) / len(args1)
tmp1 = args1*n1
arg_list = expandArgumentList(arg1=tmp1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
outer_list=['arg2','arg3'], inner_list=['arg1'])
assert len(arg_list) == len(args2) * len(args3) == len(tmp1)
n = 0
for arg2 in args2:
for arg3 in args3:
args = arg_list[n]
assert args['arg1'] == tmp1[n]
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
n += 1
assert n == len(arg_list)
# test parallel outer product expansion
assert len(args1) == len(args2) # necessary for test
arg_list = expandArgumentList(arg1=args1, arg2=args2, arg3=args3, arg4=arg4, arg5=arg5,
expand_list=[('arg1','arg2'),'arg3'], lproduct='outer')
assert len(arg_list) == len(args1) * len(args3)
n = 0
for arg1,arg2 in zip(args1,args2):
for arg3 in args3:
args = arg_list[n]
assert args['arg1'] == arg1
assert args['arg2'] == arg2
assert args['arg3'] == arg3
assert args['arg4'] == arg4
assert args['arg5'] == arg5
n += 1
assert n == len(arg_list)
## simple tests for the Container protocol
class ContainerTest(unittest.TestCase):
def setUp(self):
''' create some objects for testing '''
pass
def tearDown(self):
''' clean up '''
gc.collect()
def testEnsemble(self):
''' simple test for the Ensemble container class '''
# make test objects
test_1 = 'test 1'; test_2 = 'test 2'; test_3 = 'test 3'
# instantiate ensemble
ens = Ensemble(test_1, test_2, name='ensemble', title='Test Ensemble')
# basic functionality
assert len(ens.members) == len(ens) == 2
assert test_1 in ens and test_2 in ens
# collective add/remove
# test adding a new member
ens += test_3 # this is an ensemble operation
assert len(ens) == 3
assert test_3 in ens
# remove
del ens[-1]
assert len(ens) == 2
assert test_3 not in ens
# print representation
print(''); print(ens); print('')
## tests for the method redirect functionality
class MethodTest(unittest.TestCase):
def setUp(self):
''' create Dataset with Axes and a Variables for testing '''
pass
def tearDown(self):
''' clean up '''
gc.collect()
def testEnsemble(self):
''' test the Ensemble container class '''
# test object
dataset = self.dataset
dataset.load()
# make a copy
copy = dataset.copy()
copy.name = 'copy of {}'.format(dataset.name)
yacod = dataset.copy()
yacod.name = 'yacod' # used later
# instantiate ensemble
ens = Ensemble(dataset, copy, name='ensemble', title='Test Ensemble', basetype='Dataset')
# basic functionality
assert len(ens.members) == len(ens)
# these var/ax names are specific to the test dataset...
if all(ens.hasVariable('var')):
assert isinstance(ens.var,Ensemble)
assert ens.var.basetype == Variable
#assert ens.var == Ensemble(dataset.var, copy.var, basetype=Variable, idkey='dataset_name')
assert ens.var.members == [dataset.var, copy.var]
#print ens.var
#print Ensemble(dataset.var, copy.var, basetype=Variable, idkey='dataset_name')
#print(''); print(ens); print('')
#print ens.time
assert ens.time == [dataset.time , copy.time]
# Axis ensembles are not supported anymore, since they are often shared.
#assert isinstance(ens.time,Ensemble) and ens.time.basetype == Variable
# collective add/remove
ax = Axis(name='ax', units='none', coord=(1,10))
var1 = Variable(name='new',units='none',axes=(ax,))
var2 = Variable(name='new',units='none',axes=(ax,))
ens.addVariable([var1,var2], copy=False) # this is a dataset operation
assert ens[0].hasVariable(var1)
assert ens[1].hasVariable(var2)
assert all(ens.hasVariable('new'))
# test adding a new member
ens += yacod # this is an ensemble operation
#print(''); print(ens); print('')
ens -= 'new' # this is a dataset operation
assert not any(ens.hasVariable('new'))
ens -= 'test'
# fancy test of Variable and Dataset integration
assert not any(ens[self.var.name].mean(axis='time').hasAxis('time'))
print((ens.prettyPrint(short=True)))
# apply function to dataset ensemble
if all(ax.units == 'month' for ax in ens.time):
maxens = ens.seasonalMax(lstrict=not lsimple); del maxens
# test call
tes = ens(time=slice(0,3,2))
assert all(len(tax)==2 for tax in tes.time)
# test list indexing
sne = ens[list(range(len(ens)-1,-1,-1))]
assert sne[-1] == ens[0] and sne[0] == ens[-1]
if __name__ == "__main__":
# use Intel MKL multithreading: OMP_NUM_THREADS=4
# import os
print(('OMP_NUM_THREADS = {:s}\n'.format(os.environ['OMP_NUM_THREADS'])))
specific_tests = []
# specific_tests += ['Ensemble']
# list of tests to be performed
tests = []
# list of Container tests
tests += ['Argument']
# list of Container tests
# tests += ['Container']
# list of Method tests
# tests += ['Method']
# construct dictionary of test classes defined above
test_classes = dict()
local_values = locals().copy()
for key,val in local_values.items():
if key[-4:] == 'Test':
test_classes[key[:-4]] = val
# run tests
report = []
for test in tests: # test+'.test'+specific_test
if specific_tests:
test_names = ['ensemble_test.'+test+'Test.test'+s_t for s_t in specific_tests]
s = unittest.TestLoader().loadTestsFromNames(test_names)
else: s = unittest.TestLoader().loadTestsFromTestCase(test_classes[test])
report.append(unittest.TextTestRunner(verbosity=2).run(s))
# print summary
runs = 0; errs = 0; fails = 0
for name,test in zip(tests,report):
#print test, dir(test)
runs += test.testsRun
e = len(test.errors)
errs += e
f = len(test.failures)
fails += f
if e+ f != 0: print(("\nErrors in '{:s}' Tests: {:s}".format(name,str(test))))
if errs + fails == 0:
print(("\n *** All {:d} Test(s) successfull!!! *** \n".format(runs)))
else:
print(("\n ### Test Summary: ### \n" +
" ### Ran {:2d} Test(s) ### \n".format(runs) +
" ### {:2d} Failure(s) ### \n".format(fails)+
" ### {:2d} Error(s) ### \n".format(errs)))
| gpl-3.0 |
mwcraig/ccdproc | ccdproc/tests/test_cosmicray.py | 2 | 10917 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_allclose
import pytest
from astropy.utils import NumpyRNGContext
from astropy.nddata import StdDevUncertainty
from astropy import units as u
from ccdproc.core import (cosmicray_lacosmic, cosmicray_median,
background_deviation_box, background_deviation_filter)
from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func
DATA_SCALE = 5.3
NCRAYS = 30
def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS):
size = data.shape[0]
with NumpyRNGContext(125):
crrays = np.random.randint(0, size, size=(ncrays, 2))
# use (threshold + 1) below to make sure cosmic ray is well above the
# threshold no matter what the random number generator returns
crflux = (10 * scale * np.random.random(NCRAYS) +
(threshold + 5) * scale)
for i in range(ncrays):
y, x = crrays[i]
data.data[y, x] = crflux[i]
def test_cosmicray_lacosmic():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
data, crarr = cosmicray_lacosmic(ccd_data.data, sigclip=5)
# check the number of cosmic rays detected
# currently commented out while checking on issues
# in astroscrappy
# assert crarr.sum() == NCRAYS
def test_cosmicray_lacosmic_ccddata():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5)
# check the number of cosmic rays detected
# currently commented out while checking on issues
# in astroscrappy
# assert nccd_data.mask.sum() == NCRAYS
def test_cosmicray_lacosmic_check_data():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
with pytest.raises(TypeError):
noise = DATA_SCALE * np.ones_like(ccd_data.data)
cosmicray_lacosmic(10, noise)
@pytest.mark.parametrize('array_input', [True, False])
@pytest.mark.parametrize('gain_correct_data', [True, False])
def test_cosmicray_gain_correct(array_input, gain_correct_data):
# Add regression check for #705 and for the new gain_correct
# argument.
# The issue is that cosmicray_lacosmic gain-corrects the
# data and returns that gain corrected data. That is not the
# intent...
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
# No units here on purpose.
gain = 2.0
# Don't really need to set this (6.5 is the default value) but want to
# make lack of units explicit.
readnoise = 6.5
if array_input:
new_data, cr_mask = cosmicray_lacosmic(ccd_data.data,
gain=gain,
gain_apply=gain_correct_data)
else:
new_ccd = cosmicray_lacosmic(ccd_data,
gain=gain,
gain_apply=gain_correct_data)
new_data = new_ccd.data
cr_mask = new_ccd.mask
# Fill masked locations with 0 since there is no simple relationship
# between the original value and the corrected value.
orig_data = np.ma.array(ccd_data.data, mask=cr_mask).filled(0)
new_data = np.ma.array(new_data.data, mask=cr_mask).filled(0)
if gain_correct_data:
gain_for_test = gain
else:
gain_for_test = 1.0
np.testing.assert_allclose(gain_for_test * orig_data, new_data)
def test_cosmicray_lacosmic_accepts_quantity_gain():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
# The units below are the point of the test
gain = 2.0 * u.electron / u.adu
# Since gain and ccd_data have units, the readnoise should too.
readnoise = 6.5 * u.electron
new_ccd = cosmicray_lacosmic(ccd_data,
gain=gain,
gain_apply=True)
def test_cosmicray_lacosmic_accepts_quantity_readnoise():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
gain = 2.0 * u.electron / u.adu
# The units below are the point of this test
readnoise = 6.5 * u.electron
new_ccd = cosmicray_lacosmic(ccd_data,
gain=gain,
gain_apply=True,
readnoise=readnoise)
def test_cosmicray_lacosmic_detects_inconsistent_units():
# This is intended to detect cases like a ccd with units
# of adu, a readnoise in electrons and a gain in adu / electron.
# That is not internally inconsistent.
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
ccd_data.unit = 'adu'
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
readnoise = 6.5 * u.electron
# The units below are deliberately incorrect.
gain = 2.0 * u.adu / u.electron
with pytest.raises(ValueError) as e:
cosmicray_lacosmic(ccd_data,
gain=gain,
gain_apply=True,
readnoise=readnoise)
assert 'Inconsistent units' in str(e.value)
def test_cosmicray_lacosmic_warns_on_ccd_in_electrons(recwarn):
# Check that an input ccd in electrons raises a warning.
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
# The unit below is important for the test; this unit on
# input is supposed to raise an error.
ccd_data.unit = u.electron
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
# No units here on purpose.
gain = 2.0
# Don't really need to set this (6.5 is the default value) but want to
# make lack of units explicit.
readnoise = 6.5
new_ccd = cosmicray_lacosmic(ccd_data,
gain=gain,
gain_apply=True,
readnoise=readnoise)
assert "Image unit is electron" in str(recwarn.pop())
def test_cosmicray_median_check_data():
with pytest.raises(TypeError):
ndata, crarr = cosmicray_median(10, thresh=5, mbox=11,
error_image=DATA_SCALE)
def test_cosmicray_median():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
ndata, crarr = cosmicray_median(ccd_data.data, thresh=5, mbox=11,
error_image=DATA_SCALE)
# check the number of cosmic rays detected
assert crarr.sum() == NCRAYS
def test_cosmicray_median_ccddata():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
ccd_data.uncertainty = ccd_data.data*0.0+DATA_SCALE
nccd = cosmicray_median(ccd_data, thresh=5, mbox=11,
error_image=None)
# check the number of cosmic rays detected
assert nccd.mask.sum() == NCRAYS
def test_cosmicray_median_masked():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
data = np.ma.masked_array(ccd_data.data, (ccd_data.data > -1e6))
ndata, crarr = cosmicray_median(data, thresh=5, mbox=11,
error_image=DATA_SCALE)
# check the number of cosmic rays detected
assert crarr.sum() == NCRAYS
def test_cosmicray_median_background_None():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
data, crarr = cosmicray_median(ccd_data.data, thresh=5, mbox=11,
error_image=None)
# check the number of cosmic rays detected
assert crarr.sum() == NCRAYS
def test_cosmicray_median_gbox():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
scale = DATA_SCALE # yuck. Maybe use pytest.parametrize?
threshold = 5
add_cosmicrays(ccd_data, scale, threshold, ncrays=NCRAYS)
error = ccd_data.data*0.0+DATA_SCALE
data, crarr = cosmicray_median(ccd_data.data, error_image=error,
thresh=5, mbox=11, rbox=0, gbox=5)
data = np.ma.masked_array(data, crarr)
assert crarr.sum() > NCRAYS
assert abs(data.std() - scale) < 0.1
def test_cosmicray_median_rbox():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
scale = DATA_SCALE # yuck. Maybe use pytest.parametrize?
threshold = 5
add_cosmicrays(ccd_data, scale, threshold, ncrays=NCRAYS)
error = ccd_data.data*0.0+DATA_SCALE
data, crarr = cosmicray_median(ccd_data.data, error_image=error,
thresh=5, mbox=11, rbox=21, gbox=5)
assert data[crarr].mean() < ccd_data.data[crarr].mean()
assert crarr.sum() > NCRAYS
def test_cosmicray_median_background_deviation():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
with pytest.raises(TypeError):
cosmicray_median(ccd_data.data, thresh=5, mbox=11,
error_image='blank')
def test_background_deviation_box():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
bd = background_deviation_box(cd, 25)
assert abs(bd.mean() - scale) < 0.10
def test_background_deviation_box_fail():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
with pytest.raises(ValueError):
background_deviation_box(cd, 0.5)
def test_background_deviation_filter():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
bd = background_deviation_filter(cd, 25)
assert abs(bd.mean() - scale) < 0.10
def test_background_deviation_filter_fail():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
with pytest.raises(ValueError):
background_deviation_filter(cd, 0.5)
| bsd-3-clause |
transientskp/aartfaac-arthur | scripts/arthur-plot.py | 1 | 1440 | #!/usr/bin/env python3
import sys
import numpy as np
from arthur.imaging import full_calculation, calculate_lag
from arthur.io import read_full
from arthur.plot import plot_image, plot_lag, plot_chan_power, plot_corr_mat, plot_diff
from arthur.constants import NUM_CHAN
from matplotlib import pyplot
FRQ = 58398437.5 # Central observation frequency in Hz
def main():
if len(sys.argv) < 2:
print("Image the first set of visibilites from a visibilities file")
print()
print("usage: {} <file>".format(sys.argv[0]))
sys.exit(1)
else:
path = sys.argv[1]
# define them here so we can access them out of for loop scope
lags = []
prev_data = date = img_data = corr_data = diff_data = None
chan_data = np.zeros((NUM_CHAN, 60), dtype=np.float32)
for date, body in read_full(path):
img_data, corr_data, chan_row = full_calculation(body, FRQ)
lags += [calculate_lag(date).seconds]
if prev_data is None:
prev_data = img_data
chan_data = np.roll(chan_data, 1)
chan_data[:, 0] = chan_row
diff_data = img_data - prev_data
prev_data = img_data
fig_img = plot_image(date, img_data, FRQ)
fig_lag = plot_lag(lags)
fig_chan = plot_chan_power(chan_data)
fig_cm = plot_corr_mat(corr_data, FRQ, date)
fig_diff = plot_diff(diff_data, FRQ, date)
pyplot.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
seiji56/rmaze-2016 | testes/sharp.py | 1 | 1113 | import time
import Adafruit_ADS1x15
import sys
addr = 0
def convert( aString ):
if aString.startswith("0x") or aString.startswith("0X"):
return int(aString,16)
elif aString.startswith("0"):
return int(aString,8)
else:
return int(aString)
milli_time = lambda: int(round(time.time() * 1000))
if len(sys.argv) < 3:
print('Usage: ' + sys.argv[0] + ' <address> <port>')
exit(0)
addr = convert(sys.argv[1])
port = convert(sys.argv[2])
it = 1
if len(sys.argv) == 4:
it = convert(sys.argv[3])
adc = Adafruit_ADS1x15.ADS1015(address=addr, busnum=1)
GAIN = 1
print('Reading port ' + str(port) + 'ADS1x15 at ' + hex(addr) + ' values, press Ctrl-C to quit...')
print('| {0:^6} | {1:^6} |'.format(*([port] + ['Time'])))
#print('-' * 46)
while True:
value = 0
ltime = milli_time()
try:
for i in range(it):
value += adc.read_adc(port, gain=GAIN)
except IOError:
print('Could not read sensor.')
exit(-1)
value /= it
print('| {0:^6} | {1:^6} |'.format(*([value] + [milli_time() - ltime])))
time.sleep(0.5)
| gpl-3.0 |
mbr/unleash | tests/test_depgraph.py | 1 | 1538 | import pytest
from unleash.depgraph import DependencyGraph
@pytest.fixture
def dg():
# our example dependency graph. it looks like this
#
# D -> B
# \
# A E -> F
# /
# C
g = DependencyGraph()
g.add_obj('D', ['B'])
g.add_obj('B', ['A'])
g.add_obj('C', ['A'])
g.add_obj('E', ['F'])
return g
def test_get_full_dependencies(dg):
assert dg.get_full_dependencies('D') == {'B', 'A'}
def test_get_full_dependants(dg):
assert dg.get_full_dependants('A') == {'B', 'C', 'D'}
assert dg.get_full_dependants('F') == {'E'}
def test_get_dependants(dg):
assert set(dg.get_dependants('A')) == {'B', 'C'}
def test_get_dependencies(dg):
assert dg.get_dependencies('B') == ['A']
assert dg.get_dependencies('D') == ['B']
assert dg.get_dependencies('E') == ['F']
def test_remove_obj(dg):
dg.remove_obj('A')
assert dg.get_dependencies('B') == []
def test_remove_dependency(dg):
dg.remove_dependency('C', 'A')
assert dg.get_full_dependants('A') == {'B', 'D'}
def test_resolve_order(dg):
ordered = dg.resolve_order()
a = ordered.index('A')
b = ordered.index('B')
c = ordered.index('C')
d = ordered.index('D')
e = ordered.index('E')
f = ordered.index('F')
assert d > b
assert b > a
assert c > a
assert e > f
def test_dag_enforced(dg):
with pytest.raises(ValueError):
dg.add_obj('A', ['B'])
with pytest.raises(ValueError):
dg.add_dependency('A', 'B')
| mit |
cstipkovic/spidermonkey-research | testing/marionette/harness/marionette/tests/unit/test_addons.py | 1 | 1985 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import unittest
from marionette import MarionetteTestCase
from marionette_driver.addons import Addons, AddonInstallException
here = os.path.abspath(os.path.dirname(__file__))
class TestAddons(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
self.addons = Addons(self.marionette)
@property
def all_addon_ids(self):
with self.marionette.using_context('chrome'):
addons = self.marionette.execute_async_script("""
Components.utils.import("resource://gre/modules/AddonManager.jsm");
AddonManager.getAllAddons(function(addons){
let ids = addons.map(function(x) {
return x.id;
});
marionetteScriptFinished(ids);
});
""")
return addons
def test_install_and_remove_temporary_unsigned_addon(self):
addon_path = os.path.join(here, 'mn-restartless-unsigned.xpi')
addon_id = self.addons.install(addon_path, temp=True)
self.assertIn(addon_id, self.all_addon_ids)
self.addons.uninstall(addon_id)
self.assertNotIn(addon_id, self.all_addon_ids)
def test_install_unsigned_addon(self):
addon_path = os.path.join(here, 'mn-restartless-unsigned.xpi')
with self.assertRaises(AddonInstallException):
self.addons.install(addon_path)
@unittest.skip("need to get the test extension signed")
def test_install_and_remove_signed_addon(self):
addon_path = os.path.join(here, 'mn-restartless-signed.xpi')
addon_id = self.addons.install(addon_path)
self.assertIn(addon_id, self.all_addon_ids)
self.addons.uninstall(addon_id)
self.assertNotIn(addon_id, self.all_addon_ids)
| mpl-2.0 |
jimbobhickville/libcloud | libcloud/test/compute/test_vcloud.py | 33 | 32420 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import httplib, b
from libcloud.compute.drivers.vcloud import TerremarkDriver, VCloudNodeDriver, Subject
from libcloud.compute.drivers.vcloud import VCloud_1_5_NodeDriver, ControlAccess
from libcloud.compute.drivers.vcloud import VCloud_5_1_NodeDriver
from libcloud.compute.drivers.vcloud import Vdc
from libcloud.compute.base import Node, NodeImage
from libcloud.compute.types import NodeState
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import VCLOUD_PARAMS
class TerremarkTests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VCloudNodeDriver.connectionCls.host = "test"
VCloudNodeDriver.connectionCls.conn_classes = (None, TerremarkMockHttp)
TerremarkMockHttp.type = None
self.driver = TerremarkDriver(*VCLOUD_PARAMS)
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(
ret[0].id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vAppTemplate/5')
def test_list_sizes(self):
ret = self.driver.list_sizes()
self.assertEqual(ret[0].ram, 512)
def test_create_node(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
node = self.driver.create_node(
name='testerpart2',
image=image,
size=size,
vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224',
network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725',
cpus=2,
)
self.assertTrue(isinstance(node, Node))
self.assertEqual(
node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031')
self.assertEqual(node.name, 'testerpart2')
def test_list_nodes(self):
ret = self.driver.list_nodes()
node = ret[0]
self.assertEqual(
node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031')
self.assertEqual(node.name, 'testerpart2')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, [])
self.assertEqual(node.private_ips, ['10.112.78.69'])
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
class VCloud_1_5_Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VCloudNodeDriver.connectionCls.host = 'test'
VCloudNodeDriver.connectionCls.conn_classes = (
None, VCloud_1_5_MockHttp)
VCloud_1_5_MockHttp.type = None
self.driver = VCloud_1_5_NodeDriver(*VCLOUD_PARAMS)
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(
'https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id)
def test_list_sizes(self):
ret = self.driver.list_sizes()
self.assertEqual(ret[0].ram, 512)
def test_networks(self):
ret = self.driver.networks
self.assertEqual(
ret[0].get('href'), 'https://vm-vcloud/api/network/dca8b667-6c8f-4c3e-be57-7a9425dba4f4')
def test_create_node(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
node = self.driver.create_node(
name='testNode',
image=image,
size=size,
ex_vdc='MyVdc',
ex_network='vCloud - Default',
cpus=2,
)
self.assertTrue(isinstance(node, Node))
self.assertEqual(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', node.id)
self.assertEqual('testNode', node.name)
def test_create_node_clone(self):
image = self.driver.list_nodes()[0]
node = self.driver.create_node(name='testNode', image=image)
self.assertTrue(isinstance(node, Node))
self.assertEqual(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', node.id)
self.assertEqual('testNode', node.name)
def test_list_nodes(self):
ret = self.driver.list_nodes()
node = ret[0]
self.assertEqual(
node.id, 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a')
self.assertEqual(node.name, 'testNode')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, ['65.41.67.2'])
self.assertEqual(node.private_ips, ['65.41.67.2'])
self.assertEqual(node.extra, {'vdc': 'MyVdc',
'vms': [{
'id': 'https://vm-vcloud/api/vApp/vm-dd75d1d3-5b7b-48f0-aff3-69622ab7e045',
'name': 'testVm',
'state': NodeState.RUNNING,
'public_ips': ['65.41.67.2'],
'private_ips': ['65.41.67.2'],
'os_type': 'rhel5_64Guest'
}]})
node = ret[1]
self.assertEqual(
node.id, 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b')
self.assertEqual(node.name, 'testNode2')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, ['192.168.0.103'])
self.assertEqual(node.private_ips, ['192.168.0.100'])
self.assertEqual(node.extra, {'vdc': 'MyVdc',
'vms': [{
'id': 'https://vm-vcloud/api/vApp/vm-dd75d1d3-5b7b-48f0-aff3-69622ab7e046',
'name': 'testVm2',
'state': NodeState.RUNNING,
'public_ips': ['192.168.0.103'],
'private_ips': ['192.168.0.100'],
'os_type': 'rhel5_64Guest'
}]})
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_validate_vm_names(self):
# valid inputs
self.driver._validate_vm_names(['host-n-ame-name'])
self.driver._validate_vm_names(['tc-mybuild-b1'])
self.driver._validate_vm_names(None)
# invalid inputs
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['invalid.host'])
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['inv-alid.host'])
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['hostnametoooolong'])
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['host$name'])
def test_change_vm_names(self):
self.driver._change_vm_names(
'/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', ['changed1', 'changed2'])
def test_is_node(self):
self.assertTrue(self.driver._is_node(
Node('testId', 'testNode', state=0, public_ips=[], private_ips=[], driver=self.driver)))
self.assertFalse(self.driver._is_node(
NodeImage('testId', 'testNode', driver=self.driver)))
def test_ex_undeploy(self):
node = self.driver.ex_undeploy_node(
Node('https://test/api/vApp/undeployTest', 'testNode', state=0,
public_ips=[], private_ips=[], driver=self.driver))
self.assertEqual(node.state, NodeState.STOPPED)
def test_ex_undeploy_with_error(self):
node = self.driver.ex_undeploy_node(
Node('https://test/api/vApp/undeployErrorTest', 'testNode',
state=0, public_ips=[], private_ips=[], driver=self.driver))
self.assertEqual(node.state, NodeState.STOPPED)
def test_ex_find_node(self):
node = self.driver.ex_find_node('testNode')
self.assertEqual(node.name, "testNode")
node = self.driver.ex_find_node('testNode', self.driver.vdcs[0])
self.assertEqual(node.name, "testNode")
node = self.driver.ex_find_node('testNonExisting', self.driver.vdcs[0])
self.assertEqual(node, None)
def test_ex_add_vm_disk__with_invalid_values(self):
self.assertRaises(
ValueError, self.driver.ex_add_vm_disk, 'dummy', 'invalid value')
self.assertRaises(
ValueError, self.driver.ex_add_vm_disk, 'dummy', '-1')
def test_ex_add_vm_disk(self):
self.driver.ex_add_vm_disk('https://test/api/vApp/vm-test', '20')
def test_ex_set_vm_cpu__with_invalid_values(self):
self.assertRaises(ValueError, self.driver.ex_set_vm_cpu, 'dummy', 50)
self.assertRaises(ValueError, self.driver.ex_set_vm_cpu, 'dummy', -1)
def test_ex_set_vm_cpu(self):
self.driver.ex_set_vm_cpu('https://test/api/vApp/vm-test', 4)
def test_ex_set_vm_memory__with_invalid_values(self):
self.assertRaises(
ValueError, self.driver.ex_set_vm_memory, 'dummy', 777)
self.assertRaises(
ValueError, self.driver.ex_set_vm_memory, 'dummy', -1024)
def test_ex_set_vm_memory(self):
self.driver.ex_set_vm_memory('https://test/api/vApp/vm-test', 1024)
def test_vdcs(self):
vdcs = self.driver.vdcs
self.assertEqual(len(vdcs), 1)
self.assertEqual(
vdcs[0].id, 'https://vm-vcloud/api/vdc/3d9ae28c-1de9-4307-8107-9356ff8ba6d0')
self.assertEqual(vdcs[0].name, 'MyVdc')
self.assertEqual(vdcs[0].allocation_model, 'AllocationPool')
self.assertEqual(vdcs[0].storage.limit, 5120000)
self.assertEqual(vdcs[0].storage.used, 1984512)
self.assertEqual(vdcs[0].storage.units, 'MB')
self.assertEqual(vdcs[0].cpu.limit, 160000)
self.assertEqual(vdcs[0].cpu.used, 0)
self.assertEqual(vdcs[0].cpu.units, 'MHz')
self.assertEqual(vdcs[0].memory.limit, 527360)
self.assertEqual(vdcs[0].memory.used, 130752)
self.assertEqual(vdcs[0].memory.units, 'MB')
def test_ex_list_nodes(self):
self.assertEqual(
len(self.driver.ex_list_nodes()), len(self.driver.list_nodes()))
def test_ex_list_nodes__masked_exception(self):
"""
Test that we don't mask other exceptions.
"""
brokenVdc = Vdc('/api/vdc/brokenVdc', 'brokenVdc', self.driver)
self.assertRaises(AnotherError, self.driver.ex_list_nodes, (brokenVdc))
def test_ex_power_off(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
self.driver.ex_power_off_node(node)
def test_ex_query(self):
results = self.driver.ex_query(
'user', filter='name==jrambo', page=2, page_size=30, sort_desc='startDate')
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['type'], 'UserRecord')
self.assertEqual(results[0]['name'], 'jrambo')
self.assertEqual(results[0]['isLdapUser'], 'true')
def test_ex_get_control_access(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
control_access = self.driver.ex_get_control_access(node)
self.assertEqual(
control_access.everyone_access_level, ControlAccess.AccessLevel.READ_ONLY)
self.assertEqual(len(control_access.subjects), 1)
self.assertEqual(control_access.subjects[0].type, 'group')
self.assertEqual(control_access.subjects[0].name, 'MyGroup')
self.assertEqual(control_access.subjects[
0].id, 'https://vm-vcloud/api/admin/group/b8202c48-7151-4e61-9a6c-155474c7d413')
self.assertEqual(control_access.subjects[
0].access_level, ControlAccess.AccessLevel.FULL_CONTROL)
def test_ex_set_control_access(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
control_access = ControlAccess(node, None, [Subject(
name='MyGroup',
type='group',
access_level=ControlAccess.AccessLevel.FULL_CONTROL)])
self.driver.ex_set_control_access(node, control_access)
def test_ex_get_metadata(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
metadata = self.driver.ex_get_metadata(node)
self.assertEqual(metadata, {'owners': '[email protected]'})
def test_ex_set_metadata_entry(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
self.driver.ex_set_metadata_entry(node, 'foo', 'bar')
class VCloud_5_1_Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VCloudNodeDriver.connectionCls.host = 'test'
VCloudNodeDriver.connectionCls.conn_classes = (
None, VCloud_1_5_MockHttp)
VCloud_1_5_MockHttp.type = None
self.driver = VCloudNodeDriver(
*VCLOUD_PARAMS, **{'api_version': '5.1'})
self.assertTrue(isinstance(self.driver, VCloud_5_1_NodeDriver))
def _test_create_node_valid_ex_vm_memory(self):
# TODO: Hook up the fixture
values = [4, 1024, 4096]
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
for value in values:
self.driver.create_node(
name='testerpart2',
image=image,
size=size,
vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224',
network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725',
cpus=2,
ex_vm_memory=value
)
def test_create_node_invalid_ex_vm_memory(self):
values = [1, 3, 7]
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
for value in values:
try:
self.driver.create_node(
name='testerpart2',
image=image,
size=size,
vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224',
network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725',
cpus=2,
ex_vm_memory=value
)
except ValueError:
pass
else:
self.fail('Exception was not thrown')
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(
'https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id)
class TerremarkMockHttp(MockHttp):
fixtures = ComputeFileFixtures('terremark')
def _api_v0_8_login(self, method, url, body, headers):
headers['set-cookie'] = 'vcloud-token=testtoken'
body = self.fixtures.load('api_v0_8_login.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_org_240(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_org_240.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vdc_224(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vdc_224.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vdc_224_catalog(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vdc_224_catalog.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_catalogItem_5(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_catalogItem_5.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vdc_224_action_instantiateVAppTemplate(self, method, url, body, headers):
body = self.fixtures.load(
'api_v0_8_vdc_224_action_instantiateVAppTemplate.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vapp_14031_action_deploy(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vapp_14031_action_deploy.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_task_10496(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_task_10496.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031_power_action_powerOn(self, method, url, body, headers):
body = self.fixtures.load(
'api_v0_8_vapp_14031_power_action_powerOn.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('api_v0_8_vapp_14031_get.xml')
elif method == 'DELETE':
body = ''
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031_power_action_reset(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vapp_14031_power_action_reset.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031_power_action_poweroff(self, method, url, body, headers):
body = self.fixtures.load(
'api_v0_8_vapp_14031_power_action_poweroff.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_task_11001(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_task_11001.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
class AnotherErrorMember(Exception):
"""
helper class for the synthetic exception
"""
def __init__(self):
self.tag = 'Error'
def get(self, foo):
return 'ACCESS_TO_RESOURCE_IS_FORBIDDEN_1'
class AnotherError(Exception):
pass
class VCloud_1_5_MockHttp(MockHttp, unittest.TestCase):
fixtures = ComputeFileFixtures('vcloud_1_5')
def request(self, method, url, body=None, headers=None, raw=False):
self.assertTrue(url.startswith('/api/'),
('"%s" is invalid. Needs to '
'start with "/api". The passed URL should be just '
'the path, not full URL.', url))
super(VCloud_1_5_MockHttp, self).request(method, url, body, headers,
raw)
def _api_sessions(self, method, url, body, headers):
headers['x-vcloud-authorization'] = 'testtoken'
body = self.fixtures.load('api_sessions.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_org(self, method, url, body, headers):
body = self.fixtures.load('api_org.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a(self, method, url, body, headers):
body = self.fixtures.load(
'api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4(self, method, url, body, headers):
body = self.fixtures.load(
'api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0(self, method, url, body, headers):
body = self.fixtures.load(
'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vdc_brokenVdc(self, method, url, body, headers):
body = self.fixtures.load('api_vdc_brokenVdc.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_errorRaiser(self, method, url, body, headers):
m = AnotherErrorMember()
raise AnotherError(m)
def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate(self, method, url, body, headers):
body = self.fixtures.load(
'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn(self, method, url, body, headers):
return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers)
# Clone
def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp(self, method, url, body, headers):
body = self.fixtures.load(
'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_networkConnectionSection(self, method, url, body, headers):
body = self.fixtures.load(
'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a(self, method, url, body, headers):
status = httplib.OK
if method == 'GET':
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml')
status = httplib.OK
elif method == 'DELETE':
body = self.fixtures.load(
'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b(self, method, url, body, headers):
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c(self, method, url, body, headers):
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045(self, method, url, body, headers):
body = self.fixtures.load(
'put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset(self, method, url, body, headers):
return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers)
def _api_task_b034df55_fe81_4798_bc81_1f0fd0ead450(self, method, url, body, headers):
body = self.fixtures.load(
'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4(self, method, url, body, headers):
body = self.fixtures.load(
'api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_catalogItem_3132e037_759b_4627_9056_ca66466fa607(self, method, url, body, headers):
body = self.fixtures.load(
'api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployTest(self, method, url, body, headers):
body = self.fixtures.load('api_vApp_undeployTest.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployTest_action_undeploy(self, method, url, body, headers):
body = self.fixtures.load('api_task_undeploy.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_task_undeploy(self, method, url, body, headers):
body = self.fixtures.load('api_task_undeploy.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployErrorTest(self, method, url, body, headers):
body = self.fixtures.load('api_vApp_undeployTest.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployErrorTest_action_undeploy(self, method, url, body, headers):
if b('shutdown') in b(body):
body = self.fixtures.load('api_task_undeploy_error.xml')
else:
body = self.fixtures.load('api_task_undeploy.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_task_undeployError(self, method, url, body, headers):
body = self.fixtures.load('api_task_undeploy_error.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_access_to_resource_forbidden(self, method, url, body, headers):
raise Exception(
ET.fromstring(self.fixtures.load('api_vApp_vapp_access_to_resource_forbidden.xml')))
def _api_vApp_vm_test(self, method, url, body, headers):
body = self.fixtures.load('api_vApp_vm_test.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vm_test_virtualHardwareSection_disks(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_test_virtualHardwareSection_disks.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_test_virtualHardwareSection_disks.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vm_test_virtualHardwareSection_cpu(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_test_virtualHardwareSection_cpu.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_test_virtualHardwareSection_cpu.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vm_test_virtualHardwareSection_memory(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_test_virtualHardwareSection_memory.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_test_virtualHardwareSection_memory.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_powerOff(self, method, url, body, headers):
return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers)
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(self, method, url, body, headers):
assert method == 'POST'
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_query(self, method, url, body, headers):
assert method == 'GET'
if 'type=user' in url:
self.assertTrue('page=2' in url)
self.assertTrue('filter=(name==jrambo)' in url)
self.assertTrue('sortDesc=startDate')
body = self.fixtures.load('api_query_user.xml')
elif 'type=group' in url:
body = self.fixtures.load('api_query_group.xml')
else:
raise AssertionError('Unexpected query type')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_metadata(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('api_vapp_post_metadata.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
else:
body = self.fixtures.load('api_vapp_get_metadata.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_controlAccess(self, method, url, body, headers):
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_controlAccess(self, method, url, body, headers):
body = str(body)
self.assertTrue(method == 'POST')
self.assertTrue(
'<IsSharedToEveryone>false</IsSharedToEveryone>' in body)
self.assertTrue(
'<Subject href="https://vm-vcloud/api/admin/group/b8202c48-7151-4e61-9a6c-155474c7d413" />' in body)
self.assertTrue('<AccessLevel>FullControl</AccessLevel>' in body)
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413(self, method, url, body, headers):
body = self.fixtures.load(
'api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
nikushx/AlexaUrbanDictionaryWOTD | libraries/requests/packages/urllib3/util/retry.py | 198 | 9981 | from __future__ import absolute_import
import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| gpl-2.0 |
milinbhakta/flaskjinja | flask1/Lib/site-packages/setuptools/command/bdist_rpm.py | 1049 | 1508 | import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
3. Replace dash with underscore in the version numbers for better RPM
compatibility.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command('egg_info')
orig.bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-', '_')
spec = orig.bdist_rpm._make_spec_file(self)
line23 = '%define version ' + version
line24 = '%define version ' + rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23, line24)
for line in spec
]
insert_loc = spec.index(line24) + 1
unmangled_version = "%define unmangled_version " + version
spec.insert(insert_loc, unmangled_version)
return spec
| gpl-2.0 |
GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awx_collection/plugins/modules/tower_credential.py | 1 | 13502 | #!/usr/bin/python
# coding: utf-8 -*-
# Copyright: (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_credential
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower credential.
description:
- Create, update, or destroy Ansible Tower credentials. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the credential.
required: True
type: str
new_name:
description:
- Setting this option will change the existing name (looked up via the name field.
required: False
type: str
description:
description:
- The description to use for the credential.
type: str
organization:
description:
- Organization that should own the credential.
type: str
credential_type:
description:
- Name of credential type.
- Will be preferred over kind
type: str
inputs:
description:
- >-
Credential inputs where the keys are var names used in templating.
Refer to the Ansible Tower documentation for example syntax.
- Any fields in this dict will take prescedence over any fields mentioned below (i.e. host, username, etc)
type: dict
user:
description:
- User that should own this credential.
type: str
team:
description:
- Team that should own this credential.
type: str
kind:
description:
- Type of credential being added.
- The ssh choice refers to a Tower Machine credential.
- Deprecated, please use credential_type
required: False
type: str
choices: ["ssh", "vault", "net", "scm", "aws", "vmware", "satellite6", "cloudforms", "gce", "azure_rm", "openstack", "rhv", "insights", "tower"]
host:
description:
- Host for this credential.
- Deprecated, will be removed in a future release
type: str
username:
description:
- Username for this credential. ``access_key`` for AWS.
- Deprecated, please use inputs
type: str
password:
description:
- Password for this credential. ``secret_key`` for AWS. ``api_key`` for RAX.
- Use "ASK" and launch in Tower to be prompted.
- Deprecated, please use inputs
type: str
project:
description:
- Project that should use this credential for GCP.
- Deprecated, will be removed in a future release
type: str
ssh_key_data:
description:
- SSH private key content. To extract the content from a file path, use the lookup function (see examples).
- Deprecated, please use inputs
type: str
ssh_key_unlock:
description:
- Unlock password for ssh_key.
- Use "ASK" and launch in Tower to be prompted.
- Deprecated, please use inputs
type: str
authorize:
description:
- Should use authorize for net type.
- Deprecated, please use inputs
type: bool
default: 'no'
authorize_password:
description:
- Password for net credentials that require authorize.
- Deprecated, please use inputs
type: str
client:
description:
- Client or application ID for azure_rm type.
- Deprecated, please use inputs
type: str
security_token:
description:
- STS token for aws type.
- Deprecated, please use inputs
type: str
secret:
description:
- Secret token for azure_rm type.
- Deprecated, please use inputs
type: str
subscription:
description:
- Subscription ID for azure_rm type.
- Deprecated, please use inputs
type: str
tenant:
description:
- Tenant ID for azure_rm type.
- Deprecated, please use inputs
type: str
domain:
description:
- Domain for openstack type.
- Deprecated, please use inputs
type: str
become_method:
description:
- Become method to use for privilege escalation.
- Some examples are "None", "sudo", "su", "pbrun"
- Due to become plugins, these can be arbitrary
- Deprecated, please use inputs
type: str
become_username:
description:
- Become username.
- Use "ASK" and launch in Tower to be prompted.
- Deprecated, please use inputs
type: str
become_password:
description:
- Become password.
- Use "ASK" and launch in Tower to be prompted.
- Deprecated, please use inputs
type: str
vault_password:
description:
- Vault password.
- Use "ASK" and launch in Tower to be prompted.
- Deprecated, please use inputs
type: str
vault_id:
description:
- Vault identifier.
- This parameter is only valid if C(kind) is specified as C(vault).
- Deprecated, please use inputs
type: str
state:
description:
- Desired state of the resource.
choices: ["present", "absent"]
default: "present"
type: str
extends_documentation_fragment: awx.awx.auth
notes:
- Values `inputs` and the other deprecated fields (such as `tenant`) are replacements of existing values.
See the last 4 examples for details.
'''
EXAMPLES = '''
- name: Add tower machine credential
tower_credential:
name: Team Name
description: Team Description
organization: test-org
credential_type: Machine
state: present
tower_config_file: "~/tower_cli.cfg"
- name: Create a valid SCM credential from a private_key file
tower_credential:
name: SCM Credential
organization: Default
state: present
credential_type: Source Control
inputs:
username: joe
password: secret
ssh_key_data: "{{ lookup('file', '/tmp/id_rsa') }}"
ssh_key_unlock: "passphrase"
- name: Fetch private key
slurp:
src: '$HOME/.ssh/aws-private.pem'
register: aws_ssh_key
- name: Add Credential Into Tower
tower_credential:
name: Workshop Credential
credential_type: Machine
organization: Default
inputs:
ssh_key_data: "{{ aws_ssh_key['content'] | b64decode }}"
run_once: true
delegate_to: localhost
- name: Add Credential with Custom Credential Type
tower_credential:
name: Workshop Credential
credential_type: MyCloudCredential
organization: Default
tower_username: admin
tower_password: ansible
tower_host: https://localhost
- name: Create a Vaiult credential (example for notes)
tower_credential:
name: Example password
credential_type: Vault
organization: Default
inputs:
vault_password: 'hello'
vault_id: 'My ID'
- name: Bad password update (will replace vault_id)
tower_credential:
name: Example password
credential_type: Vault
organization: Default
inputs:
vault_password: 'new_password'
- name: Another bad password update (will replace vault_id)
tower_credential:
name: Example password
credential_type: Vault
organization: Default
vault_password: 'new_password'
- name: A safe way to update a password and keep vault_id
tower_credential:
name: Example password
credential_type: Vault
organization: Default
inputs:
vault_password: 'new_password'
vault_id: 'My ID'
'''
from ..module_utils.tower_api import TowerAPIModule
KIND_CHOICES = {
'ssh': 'Machine',
'vault': 'Vault',
'net': 'Network',
'scm': 'Source Control',
'aws': 'Amazon Web Services',
'vmware': 'VMware vCenter',
'satellite6': 'Red Hat Satellite 6',
'cloudforms': 'Red Hat CloudForms',
'gce': 'Google Compute Engine',
'azure_rm': 'Microsoft Azure Resource Manager',
'openstack': 'OpenStack',
'rhv': 'Red Hat Virtualization',
'insights': 'Insights',
'tower': 'Ansible Tower',
}
OLD_INPUT_NAMES = (
'authorize', 'authorize_password', 'client',
'security_token', 'secret', 'tenant', 'subscription',
'domain', 'become_method', 'become_username',
'become_password', 'vault_password', 'project', 'host',
'username', 'password', 'ssh_key_data', 'vault_id',
'ssh_key_unlock'
)
def main():
# Any additional arguments that are not fields of the item can be added here
argument_spec = dict(
name=dict(required=True),
new_name=dict(),
description=dict(),
organization=dict(),
credential_type=dict(),
inputs=dict(type='dict', no_log=True),
user=dict(),
team=dict(),
# These are for backwards compatability
kind=dict(choices=list(KIND_CHOICES.keys())),
host=dict(),
username=dict(),
password=dict(no_log=True),
project=dict(),
ssh_key_data=dict(no_log=True),
ssh_key_unlock=dict(no_log=True),
authorize=dict(type='bool'),
authorize_password=dict(no_log=True),
client=dict(),
security_token=dict(),
secret=dict(no_log=True),
subscription=dict(),
tenant=dict(),
domain=dict(),
become_method=dict(),
become_username=dict(),
become_password=dict(no_log=True),
vault_password=dict(no_log=True),
vault_id=dict(),
# End backwards compatability
state=dict(choices=['present', 'absent'], default='present'),
)
# Create a module for ourselves
module = TowerAPIModule(argument_spec=argument_spec, required_one_of=[['kind', 'credential_type']])
# Extract our parameters
name = module.params.get('name')
new_name = module.params.get('new_name')
description = module.params.get('description')
organization = module.params.get('organization')
credential_type = module.params.get('credential_type')
inputs = module.params.get('inputs')
user = module.params.get('user')
team = module.params.get('team')
# The legacy arguments are put into a hash down below
kind = module.params.get('kind')
# End backwards compatability
state = module.params.get('state')
# Deprication warnings
for legacy_input in OLD_INPUT_NAMES:
if module.params.get(legacy_input) is not None:
module.deprecate(msg='{0} parameter has been deprecated, please use inputs instead'.format(legacy_input), version="ansible.tower:4.0.0")
if kind:
module.deprecate(msg='The kind parameter has been deprecated, please use credential_type instead', version="ansible.tower:4.0.0")
cred_type_id = module.resolve_name_to_id('credential_types', credential_type if credential_type else KIND_CHOICES[kind])
if organization:
org_id = module.resolve_name_to_id('organizations', organization)
# Attempt to look up the object based on the provided name, credential type and optional organization
lookup_data = {
'name': name,
'credential_type': cred_type_id,
}
if organization:
lookup_data['organization'] = org_id
credential = module.get_one('credentials', **{'data': lookup_data})
if state == 'absent':
# If the state was absent we can let the module delete it if needed, the module will handle exiting from this
module.delete_if_needed(credential)
# Attempt to look up the related items the user specified (these will fail the module if not found)
if user:
user_id = module.resolve_name_to_id('users', user)
if team:
team_id = module.resolve_name_to_id('teams', team)
# Create credential input from legacy inputs
has_inputs = False
credential_inputs = {}
for legacy_input in OLD_INPUT_NAMES:
if module.params.get(legacy_input) is not None:
has_inputs = True
credential_inputs[legacy_input] = module.params.get(legacy_input)
if inputs:
has_inputs = True
credential_inputs.update(inputs)
# Create the data that gets sent for create and update
credential_fields = {
'name': new_name if new_name else name,
'credential_type': cred_type_id,
}
if has_inputs:
credential_fields['inputs'] = credential_inputs
if description:
credential_fields['description'] = description
if organization:
credential_fields['organization'] = org_id
# If we don't already have a credential (and we are creating one) we can add user/team
# The API does not appear to do anything with these after creation anyway
# NOTE: We can't just add these on a modification because they are never returned from a GET so it would always cause a changed=True
if not credential:
if user:
credential_fields['user'] = user_id
if team:
credential_fields['team'] = team_id
# If the state was present we can let the module build or update the existing group, this will return on its own
module.create_or_update_if_needed(
credential, credential_fields, endpoint='credentials', item_type='credential'
)
if __name__ == '__main__':
main()
| apache-2.0 |
paweljasinski/ironpython3 | Src/StdLib/Lib/multiprocessing/semaphore_tracker.py | 5 | 4863 | #
# On Unix we run a server process which keeps track of unlinked
# semaphores. The server ignores SIGINT and SIGTERM and reads from a
# pipe. Every other process of the program has a copy of the writable
# end of the pipe, so we get EOF when all other processes have exited.
# Then the server process unlinks any remaining semaphore names.
#
# This is important because the system only supports a limited number
# of named semaphores, and they will not be automatically removed till
# the next reboot. Without this semaphore tracker process, "killall
# python" would probably leave unlinked semaphores.
#
import errno
import os
import signal
import sys
import threading
import warnings
import _multiprocessing
from . import spawn
from . import util
from . import current_process
__all__ = ['ensure_running', 'register', 'unregister']
class SemaphoreTracker(object):
def __init__(self):
self._lock = threading.Lock()
self._fd = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that semaphore tracker process is running.
This can be run from any process. Usually a child process will use
the semaphore created by its parent.'''
with self._lock:
if self._fd is not None:
return
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from multiprocessing.semaphore_tracker import main;main(%d)'
r, w = os.pipe()
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd % r]
util.spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(w)
raise
else:
self._fd = w
finally:
os.close(r)
def register(self, name):
'''Register name of semaphore with semaphore tracker.'''
self._send('REGISTER', name)
def unregister(self, name):
'''Unregister name of semaphore with semaphore tracker.'''
self._send('UNREGISTER', name)
def _send(self, cmd, name):
self.ensure_running()
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
_semaphore_tracker = SemaphoreTracker()
ensure_running = _semaphore_tracker.ensure_running
register = _semaphore_tracker.register
unregister = _semaphore_tracker.unregister
getfd = _semaphore_tracker.getfd
def main(fd):
'''Run semaphore tracker.'''
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
cache = set()
try:
# keep track of registered/unregistered semaphores
with open(fd, 'rb') as f:
for line in f:
try:
cmd, name = line.strip().split(b':')
if cmd == b'REGISTER':
cache.add(name)
elif cmd == b'UNREGISTER':
cache.remove(name)
else:
raise RuntimeError('unrecognized command %r' % cmd)
except Exception:
try:
sys.excepthook(*sys.exc_info())
except:
pass
finally:
# all processes have terminated; cleanup any remaining semaphores
if cache:
try:
warnings.warn('semaphore_tracker: There appear to be %d '
'leaked semaphores to clean up at shutdown' %
len(cache))
except Exception:
pass
for name in cache:
# For some reason the process which created and registered this
# semaphore has failed to unregister it. Presumably it has died.
# We therefore unlink it.
try:
name = name.decode('ascii')
try:
_multiprocessing.sem_unlink(name)
except Exception as e:
warnings.warn('semaphore_tracker: %r: %s' % (name, e))
finally:
pass
| apache-2.0 |
benbox69/pyload | module/plugins/internal/Extractor.py | 6 | 4390 | # -*- coding: utf-8 -*-
import os
import re
from module.PyFile import PyFile
from module.plugins.internal.Plugin import Plugin
class ArchiveError(Exception):
pass
class CRCError(Exception):
pass
class PasswordError(Exception):
pass
class Extractor(Plugin):
__name__ = "Extractor"
__type__ = "extractor"
__version__ = "0.33"
__status__ = "testing"
__description__ = """Base extractor plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]"),
("Immenz" , "[email protected]" )]
EXTENSIONS = []
REPAIR = False
VERSION = None
@classmethod
def is_archive(cls, filename):
name = os.path.basename(filename).lower()
return any(name.endswith(ext) for ext in cls.EXTENSIONS)
@classmethod
def is_multipart(cls, filename):
return False
@classmethod
def find(cls):
"""
Check if system statisfy dependencies
:return: boolean
"""
pass
@classmethod
def get_targets(cls, files_ids):
"""
Filter suited targets from list of filename id tuple list
:param files_ids: List of filepathes
:return: List of targets, id tuple list
"""
targets = []
processed = []
for fname, id, fout in files_ids:
if cls.is_archive(fname):
pname = re.sub(cls.re_multipart, "", fname) if cls.is_multipart(fname) else os.path.splitext(fname)[0]
if pname not in processed:
processed.append(pname)
targets.append((fname, id, fout))
return targets
def __init__(self, plugin, filename, out,
fullpath=True,
overwrite=False,
excludefiles=[],
renice=0,
delete='No',
keepbroken=False,
fid=None):
"""
Initialize extractor for specific file
"""
self._init(plugin.pyload)
self.plugin = plugin
self.filename = filename
self.out = out
self.fullpath = fullpath
self.overwrite = overwrite
self.excludefiles = excludefiles
self.renice = renice
self.delete = delete
self.keepbroken = keepbroken
self.files = [] #: Store extracted files here
pyfile = self.pyload.files.getFile(fid) if fid else None
self.notify_progress = lambda x: pyfile.setProgress(x) if pyfile else lambda x: None
self.init()
def init(self):
"""
Initialize additional data structures
"""
pass
def _log(self, level, plugintype, pluginname, messages):
return self.plugin._log(level,
plugintype,
self.plugin.__name__,
(self.__name__,) + messages)
def check(self):
"""
Quick Check by listing content of archive.
Raises error if password is needed, integrity is questionable or else.
:raises PasswordError
:raises CRCError
:raises ArchiveError
"""
raise NotImplementedError
def verify(self):
"""
Testing with Extractors buildt-in method
Raises error if password is needed, integrity is questionable or else.
:raises PasswordError
:raises CRCError
:raises ArchiveError
"""
raise NotImplementedError
def repair(self):
return None
def extract(self, password=None):
"""
Extract the archive. Raise specific errors in case of failure.
:param progress: Progress function, call this to update status
:param password password to use
:raises PasswordError
:raises CRCError
:raises ArchiveError
:return:
"""
raise NotImplementedError
def get_delete_files(self):
"""
Return list of files to delete, do *not* delete them here.
:return: List with paths of files to delete
"""
return [self.filename]
def list(self, password=None):
"""
Populate self.files at some point while extracting
"""
return self.files
| gpl-3.0 |
xodus7/tensorflow | tensorflow/python/autograph/utils/type_check.py | 26 | 1170 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used in autograph-generated code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_util
def is_tensor(*args):
"""Check if any arguments are tensors.
Args:
*args: Python objects that may or may not be tensors.
Returns:
True if any *args are TensorFlow types, False if none are.
"""
return any([tensor_util.is_tensor(a) for a in args])
| apache-2.0 |
yanheven/keystone | keystone/common/wsgi.py | 3 | 31025 | # Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import copy
import itertools
import urllib
from oslo_config import cfg
import oslo_i18n
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import strutils
import routes.middleware
import six
import webob.dec
import webob.exc
from keystone.common import dependency
from keystone.common import json_home
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LI
from keystone.i18n import _LW
from keystone.models import token_model
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Environment variable used to pass the request context
CONTEXT_ENV = 'openstack.context'
# Environment variable used to pass the request params
PARAMS_ENV = 'openstack.params'
def validate_token_bind(context, token_ref):
bind_mode = CONF.token.enforce_token_bind
if bind_mode == 'disabled':
return
if not isinstance(token_ref, token_model.KeystoneToken):
raise exception.UnexpectedError(_('token reference must be a '
'KeystoneToken type, got: %s') %
type(token_ref))
bind = token_ref.bind
# permissive and strict modes don't require there to be a bind
permissive = bind_mode in ('permissive', 'strict')
# get the named mode if bind_mode is not one of the known
name = None if permissive or bind_mode == 'required' else bind_mode
if not bind:
if permissive:
# no bind provided and none required
return
else:
LOG.info(_LI("No bind information present in token"))
raise exception.Unauthorized()
if name and name not in bind:
LOG.info(_LI("Named bind mode %s not in bind information"), name)
raise exception.Unauthorized()
for bind_type, identifier in six.iteritems(bind):
if bind_type == 'kerberos':
if not (context['environment'].get('AUTH_TYPE', '').lower()
== 'negotiate'):
LOG.info(_LI("Kerberos credentials required and not present"))
raise exception.Unauthorized()
if not context['environment'].get('REMOTE_USER') == identifier:
LOG.info(_LI("Kerberos credentials do not match "
"those in bind"))
raise exception.Unauthorized()
LOG.info(_LI("Kerberos bind authentication successful"))
elif bind_mode == 'permissive':
LOG.debug(("Ignoring unknown bind for permissive mode: "
"{%(bind_type)s: %(identifier)s}"),
{'bind_type': bind_type, 'identifier': identifier})
else:
LOG.info(_LI("Couldn't verify unknown bind: "
"{%(bind_type)s: %(identifier)s}"),
{'bind_type': bind_type, 'identifier': identifier})
raise exception.Unauthorized()
def best_match_language(req):
"""Determines the best available locale from the Accept-Language
HTTP header passed in the request.
"""
if not req.accept_language:
return None
return req.accept_language.best_match(
oslo_i18n.get_available_languages('keystone'))
class BaseApplication(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = keystone.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import keystone.fancy_api
keystone.fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify()
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError('You must implement __call__')
@dependency.requires('assignment_api', 'policy_api', 'token_provider_api')
class Application(BaseApplication):
@webob.dec.wsgify()
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
# allow middleware up the stack to provide context, params and headers.
context = req.environ.get(CONTEXT_ENV, {})
context['query_string'] = dict(six.iteritems(req.params))
context['headers'] = dict(six.iteritems(req.headers))
context['path'] = req.environ['PATH_INFO']
scheme = (None if not CONF.secure_proxy_ssl_header
else req.environ.get(CONF.secure_proxy_ssl_header))
if scheme:
# NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used
# before the proxy removed it ('https' usually). So if
# the webob.Request instance is modified in order to use this
# scheme instead of the one defined by API, the call to
# webob.Request.relative_url() will return a URL with the correct
# scheme.
req.environ['wsgi.url_scheme'] = scheme
context['host_url'] = req.host_url
params = req.environ.get(PARAMS_ENV, {})
# authentication and authorization attributes are set as environment
# values by the container and processed by the pipeline. the complete
# set is not yet know.
context['environment'] = req.environ
context['accept_header'] = req.accept
req.environ = None
params.update(arg_dict)
context.setdefault('is_admin', False)
# TODO(termie): do some basic normalization on methods
method = getattr(self, action)
# NOTE(morganfainberg): use the request method to normalize the
# response code between GET and HEAD requests. The HTTP status should
# be the same.
req_method = req.environ['REQUEST_METHOD'].upper()
LOG.info('%(req_method)s %(path)s?%(params)s', {
'req_method': req_method,
'path': context['path'],
'params': urllib.urlencode(req.params)})
params = self._normalize_dict(params)
try:
result = method(context, **params)
except exception.Unauthorized as e:
LOG.warning(
_LW("Authorization failed. %(exception)s from "
"%(remote_addr)s"),
{'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
return render_exception(e, context=context,
user_locale=best_match_language(req))
except exception.Error as e:
LOG.warning(six.text_type(e))
return render_exception(e, context=context,
user_locale=best_match_language(req))
except TypeError as e:
LOG.exception(six.text_type(e))
return render_exception(exception.ValidationError(e),
context=context,
user_locale=best_match_language(req))
except Exception as e:
LOG.exception(six.text_type(e))
return render_exception(exception.UnexpectedError(exception=e),
context=context,
user_locale=best_match_language(req))
if result is None:
return render_response(status=(204, 'No Content'))
elif isinstance(result, six.string_types):
return result
elif isinstance(result, webob.Response):
return result
elif isinstance(result, webob.exc.WSGIHTTPException):
return result
response_code = self._get_response_code(req)
return render_response(body=result, status=response_code,
method=req_method)
def _get_response_code(self, req):
req_method = req.environ['REQUEST_METHOD']
controller = importutils.import_class('keystone.common.controller')
code = None
if isinstance(self, controller.V3Controller) and req_method == 'POST':
code = (201, 'Created')
return code
def _normalize_arg(self, arg):
return arg.replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
return {self._normalize_arg(k): v for (k, v) in six.iteritems(d)}
def assert_admin(self, context):
if not context['is_admin']:
try:
user_token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
except exception.TokenNotFound as e:
raise exception.Unauthorized(e)
validate_token_bind(context, user_token_ref)
creds = copy.deepcopy(user_token_ref.metadata)
try:
creds['user_id'] = user_token_ref.user_id
except exception.UnexpectedError:
LOG.debug('Invalid user')
raise exception.Unauthorized()
if user_token_ref.project_scoped:
creds['tenant_id'] = user_token_ref.project_id
else:
LOG.debug('Invalid tenant')
raise exception.Unauthorized()
creds['roles'] = user_token_ref.role_names
# Accept either is_admin or the admin role
self.policy_api.enforce(creds, 'admin_required', {})
def _attribute_is_empty(self, ref, attribute):
"""Returns true if the attribute in the given ref (which is a
dict) is empty or None.
"""
return ref.get(attribute) is None or ref.get(attribute) == ''
def _require_attribute(self, ref, attribute):
"""Ensures the reference contains the specified attribute.
Raise a ValidationError if the given attribute is not present
"""
if self._attribute_is_empty(ref, attribute):
msg = _('%s field is required and cannot be empty') % attribute
raise exception.ValidationError(message=msg)
def _require_attributes(self, ref, attrs):
"""Ensures the reference contains the specified attributes.
Raise a ValidationError if any of the given attributes is not present
"""
missing_attrs = [attribute for attribute in attrs
if self._attribute_is_empty(ref, attribute)]
if missing_attrs:
msg = _('%s field(s) cannot be empty') % ', '.join(missing_attrs)
raise exception.ValidationError(message=msg)
def _get_trust_id_for_request(self, context):
"""Get the trust_id for a call.
Retrieve the trust_id from the token
Returns None if token is not trust scoped
"""
if ('token_id' not in context or
context.get('token_id') == CONF.admin_token):
LOG.debug(('will not lookup trust as the request auth token is '
'either absent or it is the system admin token'))
return None
try:
token_data = self.token_provider_api.validate_token(
context['token_id'])
except exception.TokenNotFound:
LOG.warning(_LW('Invalid token in _get_trust_id_for_request'))
raise exception.Unauthorized()
token_ref = token_model.KeystoneToken(token_id=context['token_id'],
token_data=token_data)
return token_ref.trust_id
@classmethod
def base_url(cls, context, endpoint_type):
url = CONF['%s_endpoint' % endpoint_type]
if url:
substitutions = dict(
itertools.chain(six.iteritems(CONF),
six.iteritems(CONF.eventlet_server)))
url = url % substitutions
else:
# NOTE(jamielennox): if url is not set via the config file we
# should set it relative to the url that the user used to get here
# so as not to mess with version discovery. This is not perfect.
# host_url omits the path prefix, but there isn't another good
# solution that will work for all urls.
url = context['host_url']
return url.rstrip('/')
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = keystone.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import keystone.analytics
keystone.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
def __init__(self, application):
super(Middleware, self).__init__()
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, request, response):
"""Do whatever you'd like to the response, based on the request."""
return response
@webob.dec.wsgify()
def __call__(self, request):
try:
response = self.process_request(request)
if response:
return response
response = request.get_response(self.application)
return self.process_response(request, response)
except exception.Error as e:
LOG.warning(six.text_type(e))
return render_exception(e, request=request,
user_locale=best_match_language(request))
except TypeError as e:
LOG.exception(six.text_type(e))
return render_exception(exception.ValidationError(e),
request=request,
user_locale=best_match_language(request))
except Exception as e:
LOG.exception(six.text_type(e))
return render_exception(exception.UnexpectedError(exception=e),
request=request,
user_locale=best_match_language(request))
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify()
def __call__(self, req):
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
for key, value in req.environ.items():
LOG.debug('%s = %s', key,
strutils.mask_password(value))
LOG.debug('')
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
for line in req.body_file:
LOG.debug('%s', strutils.mask_password(line))
LOG.debug('')
resp = req.get_response(self.application)
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
for (key, value) in six.iteritems(resp.headers):
LOG.debug('%s = %s', key, value)
LOG.debug('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
for part in app_iter:
LOG.debug(part)
yield part
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify()
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify()
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
msg = _('The resource could not be found.')
return render_exception(exception.NotFound(msg),
request=req,
user_locale=best_match_language(req))
app = match['controller']
return app
class ComposingRouter(Router):
def __init__(self, mapper=None, routers=None):
if mapper is None:
mapper = routes.Mapper()
if routers is None:
routers = []
for router in routers:
router.add_routes(mapper)
super(ComposingRouter, self).__init__(mapper)
class ComposableRouter(Router):
"""Router that supports use by ComposingRouter."""
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.add_routes(mapper)
super(ComposableRouter, self).__init__(mapper)
def add_routes(self, mapper):
"""Add routes to given mapper."""
pass
class ExtensionRouter(Router):
"""A router that allows extensions to supplement or overwrite routes.
Expects to be subclassed.
"""
def __init__(self, application, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.application = application
self.add_routes(mapper)
mapper.connect('{path_info:.*}', controller=self.application)
super(ExtensionRouter, self).__init__(mapper)
def add_routes(self, mapper):
pass
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = keystone.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import keystone.analytics
keystone.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
class RoutersBase(object):
"""Base class for Routers."""
def __init__(self):
self.v3_resources = []
def append_v3_routers(self, mapper, routers):
"""Append v3 routers.
Subclasses should override this method to map its routes.
Use self._add_resource() to map routes for a resource.
"""
def _add_resource(self, mapper, controller, path, rel,
get_action=None, head_action=None, get_head_action=None,
put_action=None, post_action=None, patch_action=None,
delete_action=None, get_post_action=None,
path_vars=None, status=json_home.Status.STABLE):
if get_head_action:
getattr(controller, get_head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_head_action,
conditions=dict(method=['GET', 'HEAD']))
if get_action:
getattr(controller, get_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_action,
conditions=dict(method=['GET']))
if head_action:
getattr(controller, head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=head_action,
conditions=dict(method=['HEAD']))
if put_action:
getattr(controller, put_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=put_action,
conditions=dict(method=['PUT']))
if post_action:
getattr(controller, post_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=post_action,
conditions=dict(method=['POST']))
if patch_action:
getattr(controller, patch_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=patch_action,
conditions=dict(method=['PATCH']))
if delete_action:
getattr(controller, delete_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=delete_action,
conditions=dict(method=['DELETE']))
if get_post_action:
getattr(controller, get_post_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_post_action,
conditions=dict(method=['GET', 'POST']))
resource_data = dict()
if path_vars:
resource_data['href-template'] = path
resource_data['href-vars'] = path_vars
else:
resource_data['href'] = path
json_home.Status.update_resource_data(resource_data, status)
self.v3_resources.append((rel, resource_data))
class V3ExtensionRouter(ExtensionRouter, RoutersBase):
"""Base class for V3 extension router."""
def __init__(self, application, mapper=None):
self.v3_resources = list()
super(V3ExtensionRouter, self).__init__(application, mapper)
def _update_version_response(self, response_data):
response_data['resources'].update(self.v3_resources)
@webob.dec.wsgify()
def __call__(self, request):
if request.path_info != '/':
# Not a request for version info so forward to super.
return super(V3ExtensionRouter, self).__call__(request)
response = request.get_response(self.application)
if response.status_code != 200:
# The request failed, so don't update the response.
return response
if response.headers['Content-Type'] != 'application/json-home':
# Not a request for JSON Home document, so don't update the
# response.
return response
response_data = jsonutils.loads(response.body)
self._update_version_response(response_data)
response.body = jsonutils.dumps(response_data,
cls=utils.SmarterEncoder)
return response
def render_response(body=None, status=None, headers=None, method=None):
"""Forms a WSGI response."""
if headers is None:
headers = []
else:
headers = list(headers)
headers.append(('Vary', 'X-Auth-Token'))
if body is None:
body = ''
status = status or (204, 'No Content')
else:
content_types = [v for h, v in headers if h == 'Content-Type']
if content_types:
content_type = content_types[0]
else:
content_type = None
JSON_ENCODE_CONTENT_TYPES = ('application/json',
'application/json-home',)
if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
if content_type is None:
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
resp = webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
if method == 'HEAD':
# NOTE(morganfainberg): HEAD requests should return the same status
# as a GET request and same headers (including content-type and
# content-length). The webob.Response object automatically changes
# content-length (and other headers) if the body is set to b''. Capture
# all headers and reset them on the response object after clearing the
# body. The body can only be set to a binary-type (not TextType or
# NoneType), so b'' is used here and should be compatible with
# both py2x and py3x.
stored_headers = resp.headers.copy()
resp.body = b''
for header, value in six.iteritems(stored_headers):
resp.headers[header] = value
return resp
def render_exception(error, context=None, request=None, user_locale=None):
"""Forms a WSGI response based on the current error."""
error_message = error.args[0]
message = oslo_i18n.translate(error_message, desired_locale=user_locale)
if message is error_message:
# translate() didn't do anything because it wasn't a Message,
# convert to a string.
message = six.text_type(message)
body = {'error': {
'code': error.code,
'title': error.title,
'message': message,
}}
headers = []
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
elif isinstance(error, exception.Unauthorized):
url = CONF.public_endpoint
if not url:
if request:
context = {'host_url': request.host_url}
if context:
url = Application.base_url(context, 'public')
else:
url = 'http://localhost:%d' % CONF.eventlet_server.public_port
else:
substitutions = dict(
itertools.chain(six.iteritems(CONF),
six.iteritems(CONF.eventlet_server)))
url = url % substitutions
headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
return render_response(status=(error.code, error.title),
body=body,
headers=headers)
| apache-2.0 |
magic2du/contact_matrix | Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_01_09_2015_01.py | 1 | 25014 |
# coding: utf-8
# In[1]:
# this part imports libs and load data from csv file
import sys
sys.path.append('../../../libs/')
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pickle
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
import cPickle
import gzip
import os
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb, PIL
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[2]:
# set settings for this script
settings = {}
settings['with_auc_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['SVM_RBF'] = 1
settings['SVM_POLY'] = 1
settings['DL'] = 1
settings['Log'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_RBF'] = 1
settings['SAE_SVM_POLY'] = 1
settings['DL_S'] = 1
settings['SAE_S_SVM'] = 1
settings['SAE_S_SVM_RBF'] = 1
settings['SAE_S_SVM_POLY'] = 1
settings['number_iterations'] = 10
settings['finetune_lr'] = 0.1
settings['batch_size'] = 30
settings['pretraining_interations'] = 50000#10000
settings['pretrain_lr'] = 0.001
#settings['training_epochs'] = 300 #300
settings['training_interations'] = 50000 #300
settings['hidden_layers_sizes'] = [200, 200, 200, 200, 200]
settings['corruption_levels'] = [0.5, 0.5, 0.5, 0.5, 0.5 ]
settings['number_of_training'] = [10000]#[1000, 2500, 5000, 7500, 10000]
settings['test_set_from_test'] = True
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_handwritten_digits' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[3]:
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
X_train,y_train = train_set
X_valid,y_valid = valid_set
X_total=np.vstack((X_train, X_valid))
X_total = np.array(X_total, dtype= theano.config.floatX)
print'sample size', X_total.shape
y_total = np.concatenate([y_train, y_valid])
# In[5]:
################## generate data from training set###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole
# In[7]:
#pylab.imshow(imageB.reshape(28, 28), cmap="Greys")
# In[8]:
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
writer.writerow(['no.', 'number_of_training', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def run_models(settings = None):
analysis_scr = []
with_auc_score = settings['with_auc_score']
for subset_no in xrange(1,settings['number_iterations']+1):
print("Subset:", subset_no)
################## generate data ###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole.shape
x_train_pre_validation, x_test, y_train_pre_validation, y_test = train_test_split(X_whole,y_whole, test_size=0.2, random_state=211)
for number_of_training in settings['number_of_training']:
x_train, x_validation, y_train, y_validation = train_test_split(x_train_pre_validation[:number_of_training],
y_train_pre_validation[:number_of_training],\
test_size=0.2, random_state=21)
print x_train.shape, y_train.shape, x_validation.shape, y_validation.shape, x_test.shape, y_test.shape
x_train_minmax, x_validation_minmax, x_test_minmax = x_train, x_validation, x_test
train_X_reduced = x_train
train_y_reduced = y_train
test_X = x_test
test_y = y_test
###original data###
################ end of data ####################
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
if settings['SVM']:
print "SVM"
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, y_train)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, y_train)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['Log']:
print "Log"
log_clf_l2 = sklearn.linear_model.LogisticRegression(C=1, penalty='l2')
log_clf_l2.fit(scaled_train_X, train_y_reduced)
predicted_test_y = log_clf_l2.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'Log', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = log_clf_l2.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'Log', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes = settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train,
x_validation_minmax, y_validation,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
test_predicted = sda.predict(x_test_minmax)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
training_predicted = sda.predict(x_train_minmax)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
####transformed original data####
x = train_X_reduced
a_MAE_original = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_original.transform(train_X_reduced)
new_x_test_minmax_A = a_MAE_original.transform(x_test_minmax)
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A)
new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A)
new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A)
new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled))
new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled))
if settings['SAE_SVM']:
# SAE_SVM
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
# SAE_SVM
print 'SAE followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_POLY']:
# SAE_SVM
print 'SAE followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
#### separated transformed data ####
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_whole)
new_x_train_minmax_whole_scaled = standard_scaler.transform(new_x_train_minmax_whole)
new_x_test_minmax_whole_scaled = standard_scaler.transform(new_x_test_minmax_whole)
if settings['DL_S']:
# deep learning using split network
sda_transformed = trainSda(new_x_train_minmax_whole, y_train,
new_x_validationt_minmax_whole, y_validation ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
predicted_test_y = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'DL_S', isTest) + tuple(performance_score(y_test, predicted_test_y, with_auc_score).values()))
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
if settings['SAE_S_SVM']:
print 'SAE_S followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_S_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SAE_S_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_RBF']:
print 'SAE S followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_S_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_POLY']:
# SAE_SVM
print 'SAE S followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
report_name = 'DL_handwritten_digits' + '_size_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + '_' + str(settings['pretraining_interations']) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
return sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
# In[9]:
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr = run_models(settings)
# In[48]:
# save objects
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date +'sda.pickle', 'wb') as handle:
pickle.dump(sda, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_original.pickle', 'wb') as handle:
pickle.dump(a_MAE_original, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_A.pickle', 'wb') as handle:
pickle.dump(a_MAE_A, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_B.pickle', 'wb') as handle:
pickle.dump(a_MAE_B, handle)
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
# In[ ]:
# In[31]:
'''
weights_map_to_input_space = []
StackedNNobject = sda
image_dimension_x = 28*2
image_dimension_y = 28
if isinstance(StackedNNobject, SdA) or isinstance(StackedNNobject, MultipleAEs):
weights_product = StackedNNobject.dA_layers[0].W.get_value(borrow=True)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_0_layer_weights.png'
image.save(sample_image_path)
weights_map_to_input_space.append(weights_product)
for i_layer in range(1, len(StackedNNobject.dA_layers)):
i_weigths = StackedNNobject.dA_layers[i_layer].W.get_value(borrow=True)
weights_product = np.dot(weights_product, i_weigths)
weights_map_to_input_space.append(weights_product)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_'+ str(i_layer)+ '_layer_weights.png'
image.save(sample_image_path)
'''
# In[18]:
| gpl-2.0 |
maizy/ambient7 | ambient7-arduino/serial2influxdb/serial2influxdb.py | 1 | 4275 | #!/usr/bin/env python3
# coding: utf-8
import argparse
import configparser
import logging
import time
import re
import datetime
import serial
import influxdb
SERIAL_RETRY_DELAY = 5.0
logger = logging.getLogger('s2i')
def parse_args_and_config(args):
parser = argparse.ArgumentParser(description='ambient7 - serial2influxdb')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('config', metavar='config.cfg', type=argparse.FileType('r', encoding='UTF-8'),
default='./config.cfg', help='path to config', nargs='?')
cli_args = parser.parse_args(args)
config = configparser.ConfigParser()
config.read_file(cli_args.config)
return cli_args, config
def open_serial(config):
while True:
try:
return serial.Serial(
port=config['serial']['tty'],
baudrate=int(config['serial']['baud']),
timeout=1,
exclusive=True
)
except serial.SerialException as e:
logger.warning('unable to open pyserial connection: {}'.format(e))
logger.info('retry after {} second'.format(SERIAL_RETRY_DELAY))
time.sleep(SERIAL_RETRY_DELAY)
def resilient_line_generator(config):
ser = None
while True:
if ser is None:
ser = open_serial(config)
try:
byte_str = ser.readline()
except serial.SerialException as e:
try:
ser.close()
except Exception:
pass
ser = None
continue
if byte_str not in (b'', b'\r\n'):
try:
yield byte_str.decode('utf-8').rstrip('\r\n')
except UnicodeDecodeError:
pass
def collect_data(key, value, tags=None):
data = {
'time': datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'measurement': key,
}
if tags:
data['tags'] = tags
if key == 'uptime':
data['fields'] = {'value': int(value.rstrip('s'))}
elif key == 'humidity':
data['fields'] = {'value': float(value.strip('%'))}
elif key == 'co2':
if value.endswith('PPM'):
value = value[:-3]
data['fields'] = {'value': int(value)}
elif key == 'temperature':
data['fields'] = {'value': float(value.strip('C'))}
else:
return None
return [data]
def build_influxdb_client(config):
opts = {
'host': config['influxdb']['server'],
'port': int(config['influxdb']['port']),
'database': config['influxdb']['database']
}
if 'username' in config['influxdb']:
opts['username'] = config['influxdb']['username']
opts['password'] = config['influxdb']['password']
return influxdb.InfluxDBClient(**opts)
def main(args):
cli_args, config = parse_args_and_config(args)
influxdb_client = build_influxdb_client(config)
logging.basicConfig(
level=logging.DEBUG if cli_args.verbose else logging.INFO,
stream=sys.stderr,
format='%(asctime)s %(levelname).1s %(message)s'
)
tags = {}
if 'metrics' in config and 'tags' in config['metrics']:
for pair in config['metrics']['tags'].split(','):
tag_key, _, tag_value = pair.partition('=')
tags[tag_key] = tag_value
try:
for line in resilient_line_generator(config):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("receive line: %r", line)
data_match = re.match(r'DATA: (?P<key>[a-z0-9_]+)=(?P<value>.+)', line, re.IGNORECASE)
if data_match is not None:
key = data_match.group('key')
raw_value = data_match.group('value')
logging.info('%s=%s', key, raw_value)
data = collect_data(key, raw_value, tags)
if data is not None:
try:
influxdb_client.write_points(data)
except Exception as e:
logger.warning("Unable to send data to influxdb: %s", e)
except KeyboardInterrupt:
return 1
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
mannygit/zerorpc-python | zerorpc/events.py | 51 | 9209 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import msgpack
import gevent.pool
import gevent.queue
import gevent.event
import gevent.local
import gevent.lock
import gevent_zmq as zmq
from .context import Context
class Sender(object):
def __init__(self, socket):
self._socket = socket
self._send_queue = gevent.queue.Channel()
self._send_task = gevent.spawn(self._sender)
def __del__(self):
self.close()
def close(self):
if self._send_task:
self._send_task.kill()
def _sender(self):
running = True
for parts in self._send_queue:
for i in xrange(len(parts) - 1):
try:
self._socket.send(parts[i], flags=zmq.SNDMORE)
except gevent.GreenletExit:
if i == 0:
return
running = False
self._socket.send(parts[i], flags=zmq.SNDMORE)
self._socket.send(parts[-1])
if not running:
return
def __call__(self, parts):
self._send_queue.put(parts)
class Receiver(object):
def __init__(self, socket):
self._socket = socket
self._recv_queue = gevent.queue.Channel()
self._recv_task = gevent.spawn(self._recver)
def __del__(self):
self.close()
def close(self):
if self._recv_task:
self._recv_task.kill()
def _recver(self):
running = True
while True:
parts = []
while True:
try:
part = self._socket.recv()
except gevent.GreenletExit:
running = False
if len(parts) == 0:
return
part = self._socket.recv()
parts.append(part)
if not self._socket.getsockopt(zmq.RCVMORE):
break
if not running:
break
self._recv_queue.put(parts)
def __call__(self):
return self._recv_queue.get()
class Event(object):
__slots__ = ['_name', '_args', '_header']
def __init__(self, name, args, context, header=None):
self._name = name
self._args = args
if header is None:
self._header = {
'message_id': context.new_msgid(),
'v': 3
}
else:
self._header = header
@property
def header(self):
return self._header
@property
def name(self):
return self._name
@name.setter
def name(self, v):
self._name = v
@property
def args(self):
return self._args
def pack(self):
return msgpack.Packer().pack((self._header, self._name, self._args))
@staticmethod
def unpack(blob):
unpacker = msgpack.Unpacker()
unpacker.feed(blob)
unpacked_msg = unpacker.unpack()
try:
(header, name, args) = unpacked_msg
except Exception as e:
raise Exception('invalid msg format "{0}": {1}'.format(
unpacked_msg, e))
# Backward compatibility
if not isinstance(header, dict):
header = {}
return Event(name, args, None, header)
def __str__(self, ignore_args=False):
if ignore_args:
args = '[...]'
else:
args = self._args
try:
args = '<<{0}>>'.format(str(self.unpack(self._args)))
except:
pass
return '{0} {1} {2}'.format(self._name, self._header,
args)
class Events(object):
def __init__(self, zmq_socket_type, context=None):
self._zmq_socket_type = zmq_socket_type
self._context = context or Context.get_instance()
self._socket = zmq.Socket(self._context, zmq_socket_type)
self._send = self._socket.send_multipart
self._recv = self._socket.recv_multipart
if zmq_socket_type in (zmq.PUSH, zmq.PUB, zmq.DEALER, zmq.ROUTER):
self._send = Sender(self._socket)
if zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER):
self._recv = Receiver(self._socket)
@property
def recv_is_available(self):
return self._zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER)
def __del__(self):
try:
if not self._socket.closed:
self.close()
except AttributeError:
pass
def close(self):
try:
self._send.close()
except AttributeError:
pass
try:
self._recv.close()
except AttributeError:
pass
self._socket.close()
def _resolve_endpoint(self, endpoint, resolve=True):
if resolve:
endpoint = self._context.hook_resolve_endpoint(endpoint)
if isinstance(endpoint, (tuple, list)):
r = []
for sub_endpoint in endpoint:
r.extend(self._resolve_endpoint(sub_endpoint, resolve))
return r
return [endpoint]
def connect(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.connect(endpoint_))
return r
def bind(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.bind(endpoint_))
return r
def create_event(self, name, args, xheader=None):
xheader = {} if xheader is None else xheader
event = Event(name, args, context=self._context)
for k, v in xheader.items():
if k == 'zmqid':
continue
event.header[k] = v
return event
def emit_event(self, event, identity=None):
if identity is not None:
parts = list(identity)
parts.extend(['', event.pack()])
elif self._zmq_socket_type in (zmq.DEALER, zmq.ROUTER):
parts = ('', event.pack())
else:
parts = (event.pack(),)
self._send(parts)
def emit(self, name, args, xheader=None):
xheader = {} if xheader is None else xheader
event = self.create_event(name, args, xheader)
identity = xheader.get('zmqid', None)
return self.emit_event(event, identity)
def recv(self):
parts = self._recv()
if len(parts) == 1:
identity = None
blob = parts[0]
else:
identity = parts[0:-2]
blob = parts[-1]
event = Event.unpack(blob)
if identity is not None:
event.header['zmqid'] = identity
return event
def setsockopt(self, *args):
return self._socket.setsockopt(*args)
@property
def context(self):
return self._context
class WrappedEvents(object):
def __init__(self, channel):
self._channel = channel
def close(self):
pass
@property
def recv_is_available(self):
return self._channel.recv_is_available
def create_event(self, name, args, xheader=None):
xheader = {} if xheader is None else xheader
event = Event(name, args, self._channel.context)
event.header.update(xheader)
return event
def emit_event(self, event, identity=None):
event_payload = (event.header, event.name, event.args)
wrapper_event = self._channel.create_event('w', event_payload)
self._channel.emit_event(wrapper_event)
def emit(self, name, args, xheader=None):
wrapper_event = self.create_event(name, args, xheader)
self.emit_event(wrapper_event)
def recv(self, timeout=None):
wrapper_event = self._channel.recv()
(header, name, args) = wrapper_event.args
return Event(name, args, None, header)
@property
def context(self):
return self._channel.context
| mit |
yaroslavvb/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/estimator_utils.py | 75 | 7274 | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions relating DataFrames to Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers import feature_column
from tensorflow.contrib.learn.python.learn.dataframe import series as ss
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import parsing_ops
def _to_feature_spec(tensor, default_value=None):
if isinstance(tensor, sparse_tensor.SparseTensor):
return parsing_ops.VarLenFeature(dtype=tensor.dtype)
else:
return parsing_ops.FixedLenFeature(shape=tensor.get_shape(),
dtype=tensor.dtype,
default_value=default_value)
def _infer_feature_specs(dataframe, keys_with_defaults):
with ops.Graph().as_default():
tensors = dataframe.build()
feature_specs = {
name: _to_feature_spec(tensor, keys_with_defaults.get(name))
for name, tensor in tensors.items()}
return feature_specs
def _build_alternate_universe(
dataframe, base_input_keys_with_defaults, feature_keys):
"""Create an alternate universe assuming that the base series are defined.
The resulting graph will be used with an `input_fn` that provides exactly
those features.
Args:
dataframe: the underlying `DataFrame`
base_input_keys_with_defaults: a `dict` from the names of columns to
considered base features to their default values.
feature_keys: the names of columns to be used as features (including base
features and derived features).
Returns:
A `dict` mapping names to rebuilt `Series`.
"""
feature_specs = _infer_feature_specs(dataframe, base_input_keys_with_defaults)
alternate_universe_map = {
dataframe[name]: ss.PredefinedSeries(name, feature_specs[name])
for name in base_input_keys_with_defaults.keys()
}
def _in_alternate_universe(orig_series):
# pylint: disable=protected-access
# Map Series in the original DataFrame to series rebuilt assuming base_keys.
try:
return alternate_universe_map[orig_series]
except KeyError:
rebuilt_inputs = []
for i in orig_series._input_series:
rebuilt_inputs.append(_in_alternate_universe(i))
rebuilt_series = ss.TransformedSeries(rebuilt_inputs,
orig_series._transform,
orig_series._output_name)
alternate_universe_map[orig_series] = rebuilt_series
return rebuilt_series
orig_feature_series_dict = {fk: dataframe[fk] for fk in feature_keys}
new_feature_series_dict = ({name: _in_alternate_universe(x)
for name, x in orig_feature_series_dict.items()})
return new_feature_series_dict, feature_specs
def to_feature_columns_and_input_fn(dataframe,
base_input_keys_with_defaults,
feature_keys,
label_keys=None,
**kwargs):
"""Build a list of FeatureColumns and an input_fn for use with Estimator.
Args:
dataframe: the underlying dataframe
base_input_keys_with_defaults: a dict from the names of columns to be
considered base features to their default values. These columns will be
fed via input_fn.
feature_keys: the names of columns from which to generate FeatureColumns.
These may include base features and/or derived features.
label_keys: the names of columns to be used as labels. None is
acceptable for unsupervised learning.
**kwargs: Additional keyword arguments, unused here.
Returns:
A tuple of two elements:
* A list of `FeatureColumn`s to be used when constructing an Estimator
* An input_fn, i.e. a function that returns a pair of dicts
(features, labels), each mapping string names to Tensors.
the feature dict provides mappings for all the base columns required
by the FeatureColumns.
Raises:
ValueError: when the feature and label key sets are non-disjoint, or the
base_input and label sets are non-disjoint.
"""
if feature_keys is None or not feature_keys:
raise ValueError("feature_keys must be specified.")
if label_keys is None:
label_keys = []
base_input_keys = base_input_keys_with_defaults.keys()
in_two = (set(feature_keys) & set(label_keys)) or (set(base_input_keys) &
set(label_keys))
if in_two:
raise ValueError("Columns cannot be used for both features and labels: %s"
% ", ".join(in_two))
# Obtain the feature series in the alternate universe
new_feature_series_dict, feature_specs = _build_alternate_universe(
dataframe, base_input_keys_with_defaults, feature_keys)
# TODO(soergel): Allow non-real, non-dense DataFrameColumns
for key in new_feature_series_dict.keys():
spec = feature_specs[key]
if not (
isinstance(spec, parsing_ops.FixedLenFeature)
and (spec.dtype.is_integer or spec.dtype.is_floating)):
raise ValueError("For now, only real dense columns can be passed from "
"DataFrame to Estimator. %s is %s of %s" % (
(key, type(spec).__name__, spec.dtype)))
# Make FeatureColumns from these
feature_columns = [feature_column.DataFrameColumn(name, s)
for name, s in new_feature_series_dict.items()]
# Make a new DataFrame with only the Series needed for input_fn.
# This is important to avoid starting queue feeders that won't be used.
limited_dataframe = dataframe.select_columns(
list(base_input_keys) + list(label_keys))
# Build an input_fn suitable for use with Estimator.
def input_fn():
"""An input_fn() for feeding the given set of DataFrameColumns."""
# It's important to build all the tensors together in one DataFrame.
# If we did df.select() for both key sets and then build those, the two
# resulting DataFrames would be shuffled independently.
tensors = limited_dataframe.build(**kwargs)
base_input_features = {key: tensors[key] for key in base_input_keys}
labels = {key: tensors[key] for key in label_keys}
# TODO(soergel): Remove this special case when b/30367437 is fixed.
if len(labels) == 1:
labels = list(labels.values())[0]
return base_input_features, labels
return feature_columns, input_fn
| apache-2.0 |
Arable/old-www-do-not-use | lib/python2.7/site-packages/requests/packages/urllib3/_collections.py | 310 | 3111 | # urllib3/_collections.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
__all__ = ['RecentlyUsedContainer']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return self._container.keys()
| mit |
psychopy/versions | psychopy/app/utils.py | 1 | 17380 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""utility classes for the Builder
"""
from __future__ import absolute_import, division, print_function
import os
from builtins import object
from wx.lib.agw.aui.aui_constants import *
from wx.lib.agw.aui.aui_utilities import IndentPressedBitmap, ChopText, TakeScreenShot
import sys
import wx
import wx.lib.agw.aui as aui
from wx.lib import platebtn
import psychopy
from psychopy import logging
from . import pavlovia_ui
from . import icons
from .themes import ThemeMixin
from psychopy.localization import _translate
class FileDropTarget(wx.FileDropTarget):
"""On Mac simply setting a handler for the EVT_DROP_FILES isn't enough.
Need this too.
"""
def __init__(self, targetFrame):
wx.FileDropTarget.__init__(self)
self.target = targetFrame
def OnDropFiles(self, x, y, filenames):
logging.debug(
'PsychoPyBuilder: received dropped files: %s' % filenames)
for fname in filenames:
if fname.endswith('.psyexp') or fname.lower().endswith('.py'):
self.target.fileOpen(filename=fname)
else:
logging.warning(
'dropped file ignored: did not end in .psyexp or .py')
return True
class WindowFrozen(object):
"""
Equivalent to wxWindowUpdateLocker.
Usage::
with WindowFrozen(wxControl):
update multiple things
# will automatically thaw here
"""
def __init__(self, ctrl):
self.ctrl = ctrl
def __enter__(self): # started the with... statement
# Freeze should not be called if platform is win32.
if sys.platform == 'win32':
return self.ctrl
# check it hasn't been deleted
#
# Don't use StrictVersion() here, as `wx` doesn't follow the required
# numbering scheme.
if self.ctrl is not None and wx.__version__[:3] <= '2.8':
self.ctrl.Freeze()
return self.ctrl
def __exit__(self, exc_type, exc_val, exc_tb):
# Thaw should not be called if platform is win32.
if sys.platform == 'win32':
return
# check it hasn't been deleted
if self.ctrl is not None and self.ctrl.IsFrozen():
self.ctrl.Thaw()
def getSystemFonts(encoding='system', fixedWidthOnly=False):
"""Get a list of installed system fonts.
Parameters
----------
encoding : str
Get fonts with matching encodings.
fixedWidthOnly : bool
Return on fixed width fonts.
Returns
-------
list
List of font facenames.
"""
fontEnum = wx.FontEnumerator()
encoding = "FONTENCODING_" + encoding.upper()
if hasattr(wx, encoding):
encoding = getattr(wx, encoding)
return fontEnum.GetFacenames(encoding, fixedWidthOnly=fixedWidthOnly)
class PsychopyToolbar(wx.ToolBar, ThemeMixin):
"""Toolbar for the Builder/Coder Frame"""
def __init__(self, frame):
wx.ToolBar.__init__(self, frame)
self.frame = frame
self.app = self.frame.app
self._needMakeTools = True
# Configure toolbar appearance
self.SetWindowStyle(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT | wx.TB_NODIVIDER)
#self.SetBackgroundColour(ThemeMixin.appColors['frame_bg'])
# Set icon size (16 for win/linux small mode, 32 for everything else
self.iconSize = 32 # mac: 16 either doesn't work, or looks bad
self.SetToolBitmapSize((self.iconSize, self.iconSize))
# OS-dependent tool-tips
ctrlKey = 'Ctrl+'
if sys.platform == 'darwin':
ctrlKey = 'Cmd+'
# keys are the keyboard keys, not the keys of the dict
self.keys = {k: self.frame.app.keys[k].replace('Ctrl+', ctrlKey)
for k in self.frame.app.keys}
self.keys['none'] = ''
# self.makeTools() # will be done when theme is applied
# Finished setup. Make it happen
def makeTools(self):
frame = self.frame
# Create tools
cl = frame.__class__.__name__
pavButtons = pavlovia_ui.toolbar.PavloviaButtons(
frame, toolbar=self, tbSize=self.iconSize)
if frame.__class__.__name__ == 'BuilderFrame':
self.addPsychopyTool(
name='filenew',
label=_translate('New'),
shortcut='new',
tooltip=_translate("Create new experiment file"),
func=self.frame.app.newBuilderFrame) # New
self.addPsychopyTool(
name='fileopen',
label=_translate('Open'),
shortcut='open',
tooltip=_translate("Open an existing experiment file"),
func=self.frame.fileOpen) # Open
self.frame.bldrBtnSave = self.addPsychopyTool(
name='filesave',
label=_translate('Save'),
shortcut='save',
tooltip=_translate("Save current experiment file"),
func=self.frame.fileSave) # Save
self.addPsychopyTool(
name='filesaveas',
label=_translate('Save As...'),
shortcut='saveAs',
tooltip=_translate("Save current experiment file as..."),
func=self.frame.fileSaveAs) # SaveAs
self.frame.bldrBtnUndo = self.addPsychopyTool(
name='undo',
label=_translate('Undo'),
shortcut='undo',
tooltip=_translate("Undo last action"),
func=self.frame.undo) # Undo
self.frame.bldrBtnRedo = self.addPsychopyTool(
name='redo',
label=_translate('Redo'),
shortcut='redo',
tooltip=_translate("Redo last action"),
func=self.frame.redo) # Redo
self.AddSeparator() # Seperator
self.addPsychopyTool(
name='monitors',
label=_translate('Monitor Center'),
shortcut='none',
tooltip=_translate("Monitor settings and calibration"),
func=self.frame.app.openMonitorCenter) # Monitor Center
self.addPsychopyTool(
name='cogwindow',
label=_translate('Experiment Settings'),
shortcut='none',
tooltip=_translate("Edit experiment settings"),
func=self.frame.setExperimentSettings) # Settings
self.AddSeparator()
self.addPsychopyTool(
name='compile',
label=_translate('Compile Script'),
shortcut='compileScript',
tooltip=_translate("Compile to script"),
func=self.frame.compileScript) # Compile
self.frame.bldrBtnRunner = self.addPsychopyTool(
name='runner',
label=_translate('Runner'),
shortcut='runnerScript',
tooltip=_translate("Send experiment to Runner"),
func=self.frame.runFile) # Run
self.frame.bldrBtnRun = self.addPsychopyTool(
name='run',
label=_translate('Run'),
shortcut='runScript',
tooltip=_translate("Run experiment"),
func=self.frame.runFile) # Run
self.AddSeparator() # Seperator
pavButtons.addPavloviaTools()
elif frame.__class__.__name__ == 'CoderFrame':
self.addPsychopyTool('filenew', _translate('New'), 'new',
_translate("Create new experiment file"),
self.frame.fileNew) # New
self.addPsychopyTool('fileopen', _translate('Open'), 'open',
_translate("Open an existing experiment file"),
self.frame.fileOpen) # Open
self.frame.cdrBtnSave = \
self.addPsychopyTool('filesave', _translate('Save'), 'save',
_translate("Save current experiment file"),
self.frame.fileSave) # Save
self.addPsychopyTool('filesaveas', _translate('Save As...'), 'saveAs',
_translate("Save current experiment file as..."),
self.frame.fileSaveAs) # SaveAs
self.frame.cdrBtnUndo = \
self.addPsychopyTool('undo', _translate('Undo'), 'undo',
_translate("Undo last action"),
self.frame.undo) # Undo
self.frame.cdrBtnRedo = \
self.addPsychopyTool('redo', _translate('Redo'), 'redo',
_translate("Redo last action"),
self.frame.redo) # Redo
self.AddSeparator() # Seperator
self.addPsychopyTool('monitors', _translate('Monitor Center'), 'none',
_translate("Monitor settings and calibration"),
self.frame.app.openMonitorCenter)
self.addPsychopyTool('color', _translate('Color Picker'), 'none',
_translate("Color Picker -> clipboard"),
self.frame.app.colorPicker)
self.AddSeparator()
self.frame.cdrBtnRunner = self.addPsychopyTool(
'runner', _translate('Runner'), 'runnerScript',
_translate("Send experiment to Runner"),
self.frame.runFile)
self.frame.cdrBtnRun = self.addPsychopyTool(
'run', _translate('Run'), 'runScript',
_translate("Run experiment"),
self.frame.runFile)
self.AddSeparator()
pavButtons.addPavloviaTools(
buttons=['pavloviaSync', 'pavloviaSearch', 'pavloviaUser'])
frame.btnHandles.update(pavButtons.btnHandles)
self.Realize()
def addPsychopyTool(self, name, label, shortcut, tooltip, func,
emblem=None):
if not name.endswith('.png'):
name += '.png'
item = self.app.iconCache.makeBitmapButton(parent=self, filename=name,
name=label,
label=("%s [%s]" % (
label,
self.keys[shortcut])),
emblem=emblem, toolbar=self,
tip=tooltip,
size=self.iconSize)
# Bind function
self.Bind(wx.EVT_TOOL, func, item)
return item
class PsychopyPlateBtn(platebtn.PlateButton, ThemeMixin):
def __init__(self, parent, id=wx.ID_ANY, label='', bmp=None,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=1, name=wx.ButtonNameStr):
platebtn.PlateButton.__init__(self, parent, id, label, bmp, pos, size, style, name)
self.parent = parent
self.__InitColors()
self._applyAppTheme()
def _applyAppTheme(self):
cs = ThemeMixin.appColors
self.__InitColors()
self.SetBackgroundColour(wx.Colour(self.parent.GetBackgroundColour()))
self.SetPressColor(cs['txtbutton_bg_hover'])
self.SetLabelColor(cs['text'],
cs['txtbutton_fg_hover'])
def __InitColors(self):
cs = ThemeMixin.appColors
"""Initialize the default colors"""
colors = dict(default=True,
hlight=cs['txtbutton_bg_hover'],
press=cs['txtbutton_bg_hover'],
htxt=cs['text'])
return colors
class PsychopyScrollbar(wx.ScrollBar):
def __init__(self, parent, ori=wx.VERTICAL):
wx.ScrollBar.__init__(self)
if ori == wx.HORIZONTAL:
style = wx.SB_HORIZONTAL
else:
style = wx.SB_VERTICAL
self.Create(parent, style=style)
self.ori = ori
self.parent = parent
self.Bind(wx.EVT_SCROLL, self.DoScroll)
self.Resize()
def DoScroll(self, event):
if self.ori == wx.HORIZONTAL:
w = event.GetPosition()
h = self.parent.GetScrollPos(wx.VERTICAL)
elif self.ori == wx.VERTICAL:
w = self.parent.GetScrollPos(wx.HORIZONTAL)
h = event.GetPosition()
else:
return
self.parent.Scroll(w, h)
self.Resize()
def Resize(self):
sz = self.parent.GetSize()
vsz = self.parent.GetVirtualSize()
start = self.parent.GetViewStart()
if self.ori == wx.HORIZONTAL:
sz = (sz.GetWidth(), 20)
vsz = vsz.GetWidth()
elif self.ori == wx.VERTICAL:
sz = (20, sz.GetHeight())
vsz = vsz.GetHeight()
self.SetDimensions(start[0], start[1], sz[0], sz[1])
self.SetScrollbar(
position=self.GetScrollPos(self.ori),
thumbSize=10,
range=1,
pageSize=vsz
)
class FrameSwitcher(wx.Menu):
"""Menu for switching between different frames"""
def __init__(self, parent):
wx.Menu.__init__(self)
self.parent = parent
self.app = parent.app
self.itemFrames = {}
# Listen for window switch
self.next = self.Append(wx.ID_MDI_WINDOW_NEXT,
_translate("&Next Window\t%s") % self.app.keys['cycleWindows'],
_translate("&Next Window\t%s") % self.app.keys['cycleWindows'])
self.Bind(wx.EVT_MENU, self.nextWindow, self.next)
self.AppendSeparator()
# Add creator options
self.minItemSpec = [
{'label': "Builder", 'class': psychopy.app.builder.BuilderFrame, 'method': self.app.showBuilder},
{'label': "Coder", 'class': psychopy.app.coder.CoderFrame, 'method': self.app.showCoder},
{'label': "Runner", 'class': psychopy.app.runner.RunnerFrame, 'method': self.app.showRunner},
]
for spec in self.minItemSpec:
if not isinstance(self.Window, spec['class']):
item = self.Append(
wx.ID_ANY, spec['label'], spec['label']
)
self.Bind(wx.EVT_MENU, spec['method'], item)
self.AppendSeparator()
self.updateFrames()
@property
def frames(self):
return self.parent.app.getAllFrames()
def updateFrames(self):
"""Set items according to which windows are open"""
self.next.Enable(len(self.frames) > 1)
# Make new items if needed
for frame in self.frames:
if frame not in self.itemFrames:
if frame.filename:
label = type(frame).__name__.replace("Frame", "") + ": " + os.path.basename(frame.filename)
else:
label = type(frame).__name__.replace("Frame", "")
self.itemFrames[frame] = self.AppendRadioItem(wx.ID_ANY, label, label)
self.Bind(wx.EVT_MENU, self.showFrame, self.itemFrames[frame])
# Edit items to match frames
for frame in self.itemFrames:
item = self.itemFrames[frame]
if not item:
continue
if frame not in self.frames:
# Disable unused items
item.Enable(False)
else:
# Rename item
if frame.filename:
self.itemFrames[frame].SetItemLabel(
type(frame).__name__.replace("Frame", "") + ": " + os.path.basename(frame.filename)
)
else:
self.itemFrames[frame].SetItemLabel(
type(frame).__name__.replace("Frame", "") + ": None"
)
item.Check(frame == self.Window)
self.itemFrames = {key: self.itemFrames[key] for key in self.itemFrames if self.itemFrames[key] is not None}
def showFrame(self, event=None):
itemFrames = event.EventObject.itemFrames
frame = [key for key in itemFrames if itemFrames[key].Id == event.Id][0]
frame.Show(True)
frame.Raise()
self.parent.app.SetTopWindow(frame)
self.updateFrames()
def nextWindow(self, event=None):
"""Cycle through list of open windows"""
current = event.EventObject.Window
i = self.frames.index(current)
while self.frames[i] == current:
i -= 1
self.frames[i].Raise()
self.frames[i].Show()
self.updateFrames()
| gpl-3.0 |
netscaler/horizon | openstack_dashboard/dashboards/project/firewalls/tests.py | 5 | 19627 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: KC Wang, Big Switch Networks
from mox import IsA # noqa
from django.core.urlresolvers import reverse # noqa
from django.core.urlresolvers import reverse_lazy # noqa
from django import http
from openstack_dashboard import api
from openstack_dashboard.api import fwaas
from openstack_dashboard.test import helpers as test
class FirewallTests(test.TestCase):
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
DASHBOARD = 'project'
INDEX_URL = reverse_lazy('horizon:%s:firewalls:index' % DASHBOARD)
ADDRULE_PATH = 'horizon:%s:firewalls:addrule' % DASHBOARD
ADDPOLICY_PATH = 'horizon:%s:firewalls:addpolicy' % DASHBOARD
ADDFIREWALL_PATH = 'horizon:%s:firewalls:addfirewall' % DASHBOARD
RULE_DETAIL_PATH = 'horizon:%s:firewalls:ruledetails' % DASHBOARD
POLICY_DETAIL_PATH = 'horizon:%s:firewalls:policydetails' % DASHBOARD
FIREWALL_DETAIL_PATH = 'horizon:%s:firewalls:firewalldetails' % DASHBOARD
UPDATERULE_PATH = 'horizon:%s:firewalls:updaterule' % DASHBOARD
UPDATEPOLICY_PATH = 'horizon:%s:firewalls:updatepolicy' % DASHBOARD
UPDATEFIREWALL_PATH = 'horizon:%s:firewalls:updatefirewall' % DASHBOARD
INSERTRULE_PATH = 'horizon:%s:firewalls:insertrule' % DASHBOARD
REMOVERULE_PATH = 'horizon:%s:firewalls:removerule' % DASHBOARD
def set_up_expect(self):
# retrieve rules
rule1 = self.fw_rules.first()
tenant_id = rule1.tenant_id
api.fwaas.rules_list(
IsA(http.HttpRequest),
tenant_id=tenant_id).AndReturn(self.fw_rules.list())
# retrieves policies
policies = self.fw_policies.list()
api.fwaas.policies_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(policies)
# retrieves firewalls
firewalls = self.firewalls.list()
api.fwaas.firewalls_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(firewalls)
def set_up_expect_with_exception(self):
rule1 = self.fw_rules.first()
tenant_id = rule1.tenant_id
api.fwaas.rules_list(
IsA(http.HttpRequest),
tenant_id=tenant_id).AndRaise(self.exceptions.neutron)
api.fwaas.policies_list(
IsA(http.HttpRequest),
tenant_id=tenant_id).AndRaise(self.exceptions.neutron)
api.fwaas.firewalls_list(
IsA(http.HttpRequest),
tenant_id=tenant_id).AndRaise(self.exceptions.neutron)
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_firewalls(self):
self.set_up_expect()
self.mox.ReplayAll()
firewall = self.firewalls.first()
tenant_id = firewall.tenant_id
res = self.client.get(self.INDEX_URL, tenant_id=tenant_id)
self.assertTemplateUsed(res, '%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data),
len(self.firewalls.list()))
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_policies(self):
self.set_up_expect()
self.mox.ReplayAll()
policy = self.fw_policies.first()
tenant_id = policy.tenant_id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__policies',
tenant_id=tenant_id)
self.assertTemplateUsed(res, '%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['policiestable_table'].data),
len(self.fw_policies.list()))
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_rules(self):
self.set_up_expect()
self.mox.ReplayAll()
rule = self.fw_rules.first()
tenant_id = rule.tenant_id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__rules',
tenant_id=tenant_id)
self.assertTemplateUsed(res, '%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['rulestable_table'].data),
len(self.fw_rules.list()))
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_exception_firewalls(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
firewall = self.firewalls.first()
tenant_id = firewall.tenant_id
res = self.client.get(self.INDEX_URL, tenant_id=tenant_id)
self.assertTemplateUsed(res,
'%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_exception_policies(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
policy = self.fw_policies.first()
tenant_id = policy.tenant_id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__policies',
tenant_id=tenant_id)
self.assertTemplateUsed(res,
'%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['policiestable_table'].data), 0)
@test.create_stubs({api.fwaas: ('firewalls_list',
'policies_list',
'rules_list')}, )
def test_index_exception_rules(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
rule = self.fw_rules.first()
tenant_id = rule.tenant_id
res = self.client.get(self.INDEX_URL + '?tab=fwtabs__rules',
tenant_id=tenant_id)
self.assertTemplateUsed(res,
'%s/firewalls/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['rulestable_table'].data), 0)
@test.create_stubs({api.fwaas: ('rule_create',), })
def test_add_rule_post(self):
rule1 = self.fw_rules.first()
form_data = {'name': rule1.name,
'description': rule1.description,
'protocol': rule1.protocol,
'action': rule1.action,
'source_ip_address': rule1.source_ip_address,
'source_port': rule1.source_port,
'destination_ip_address': rule1.destination_ip_address,
'destination_port': rule1.destination_port,
'shared': rule1.shared,
'enabled': rule1.enabled
}
api.fwaas.rule_create(
IsA(http.HttpRequest), **form_data).AndReturn(rule1)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDRULE_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
def test_add_rule_post_with_error(self):
rule1 = self.fw_rules.first()
form_data = {'name': rule1.name,
'description': rule1.description,
'protocol': 'abc',
'action': 'pass',
'source_ip_address': rule1.source_ip_address,
'source_port': rule1.source_port,
'destination_ip_address': rule1.destination_ip_address,
'destination_port': rule1.destination_port,
'shared': rule1.shared,
'enabled': rule1.enabled
}
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDRULE_PATH), form_data)
self.assertFormErrors(res, 2)
@test.create_stubs({api.fwaas: ('policy_create', 'rules_list'), })
def test_add_policy_post(self):
policy = self.fw_policies.first()
rules = self.fw_rules.list()
tenant_id = policy.tenant_id
form_data = {'name': policy.name,
'description': policy.description,
'firewall_rules': policy.firewall_rules,
'shared': policy.shared,
'audited': policy.audited
}
api.fwaas.rules_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(rules)
api.fwaas.policy_create(
IsA(http.HttpRequest), **form_data).AndReturn(policy)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDPOLICY_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_create', 'rules_list'), })
def test_add_policy_post_with_error(self):
policy = self.fw_policies.first()
rules = self.fw_rules.list()
tenant_id = policy.tenant_id
form_data = {'description': policy.description,
'firewall_rules': None,
'shared': policy.shared,
'audited': policy.audited
}
api.fwaas.rules_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(rules)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDPOLICY_PATH), form_data)
self.assertFormErrors(res, 1)
@test.create_stubs({api.fwaas: ('firewall_create', 'policies_list'), })
def test_add_firewall_post(self):
firewall = self.firewalls.first()
policies = self.fw_policies.list()
tenant_id = firewall.tenant_id
form_data = {'name': firewall.name,
'description': firewall.description,
'firewall_policy_id': firewall.firewall_policy_id,
'shared': firewall.shared,
'admin_state_up': firewall.admin_state_up
}
api.fwaas.policies_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(policies)
api.fwaas.firewall_create(
IsA(http.HttpRequest), **form_data).AndReturn(firewall)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDFIREWALL_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('firewall_create', 'policies_list'), })
def test_add_firewall_post_with_error(self):
firewall = self.firewalls.first()
policies = self.fw_policies.list()
tenant_id = firewall.tenant_id
form_data = {'name': firewall.name,
'description': firewall.description,
'firewall_policy_id': None,
'shared': firewall.shared,
'admin_state_up': firewall.admin_state_up
}
api.fwaas.policies_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(policies)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDFIREWALL_PATH), form_data)
self.assertFormErrors(res, 1)
@test.create_stubs({api.fwaas: ('rule_get', 'rule_update')})
def test_update_rule_post(self):
rule = self.fw_rules.first()
api.fwaas.rule_get(IsA(http.HttpRequest), rule.id).AndReturn(rule)
rule.name = 'new name'
rule.description = 'new desc'
rule.protocol = 'ICMP'
rule.action = 'ALLOW'
rule.shared = False
rule.enabled = True
data = {'name': rule.name,
'description': rule.description,
'protocol': rule.protocol,
'action': rule.action,
'shared': rule.shared,
'enabled': rule.enabled,
'source_ip_address': rule.source_ip_address,
'destination_ip_address': None,
'source_port': None,
'destination_port': rule.destination_port,
}
api.fwaas.rule_update(IsA(http.HttpRequest), rule.id, **data)\
.AndReturn(rule)
self.mox.ReplayAll()
form_data = {'name': rule.name,
'description': rule.description,
'protocol': rule.protocol,
'action': rule.action,
'shared': rule.shared,
'enabled': rule.enabled,
'source_ip_address': rule.source_ip_address,
'destination_ip_address': '',
'source_port': '',
'destination_port': rule.destination_port,
}
res = self.client.post(
reverse(self.UPDATERULE_PATH, args=(rule.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_get', 'policy_update',
'rules_list')})
def test_update_policy_post(self):
policy = self.fw_policies.first()
api.fwaas.policy_get(IsA(http.HttpRequest),
policy.id).AndReturn(policy)
policy.name = 'new name'
policy.description = 'new desc'
policy.shared = True
policy.audited = False
data = {'name': policy.name,
'description': policy.description,
'shared': policy.shared,
'audited': policy.audited
}
api.fwaas.policy_update(IsA(http.HttpRequest), policy.id, **data)\
.AndReturn(policy)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.UPDATEPOLICY_PATH, args=(policy.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('firewall_get', 'policies_list',
'firewall_update')})
def test_update_firewall_post(self):
firewall = self.firewalls.first()
tenant_id = firewall.tenant_id
api.fwaas.firewall_get(IsA(http.HttpRequest),
firewall.id).AndReturn(firewall)
firewall.name = 'new name'
firewall.description = 'new desc'
firewall.admin_state_up = False
data = {'name': firewall.name,
'description': firewall.description,
'firewall_policy_id': firewall.firewall_policy_id,
'admin_state_up': firewall.admin_state_up
}
policies = self.fw_policies.list()
api.fwaas.policies_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(policies)
api.fwaas.firewall_update(IsA(http.HttpRequest), firewall.id, **data)\
.AndReturn(firewall)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.UPDATEFIREWALL_PATH, args=(firewall.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_get',
'policy_insert_rule',
'rules_list')})
def test_policy_insert_rule(self):
policy = self.fw_policies.first()
tenant_id = policy.tenant_id
rules = self.fw_rules.list()
new_rule_id = rules[2].id
data = {'firewall_rule_id': new_rule_id,
'insert_before': rules[1].id,
'insert_after': rules[0].id}
api.fwaas.policy_get(IsA(http.HttpRequest),
policy.id).AndReturn(policy)
policy.firewall_rules = [rules[0].id,
new_rule_id,
rules[1].id]
api.fwaas.rules_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(rules)
api.fwaas.policy_insert_rule(IsA(http.HttpRequest), policy.id, **data)\
.AndReturn(policy)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.INSERTRULE_PATH, args=(policy.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.fwaas: ('policy_get', 'policy_remove_rule',
'rules_list',)})
def test_policy_remove_rule(self):
policy = self.fw_policies.first()
tenant_id = policy.tenant_id
rules = self.fw_rules.list()
remove_rule_id = policy.firewall_rules[0]
left_rule_id = policy.firewall_rules[1]
data = {'firewall_rule_id': remove_rule_id}
after_remove_policy_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'tenant_id': '1',
'name': 'policy1',
'description': 'policy description',
'firewall_rules': [left_rule_id],
'audited': True,
'shared': True}
after_remove_policy = fwaas.Policy(after_remove_policy_dict)
api.fwaas.policy_get(IsA(http.HttpRequest),
policy.id).AndReturn(policy)
api.fwaas.rules_list(
IsA(http.HttpRequest), tenant_id=tenant_id).AndReturn(rules)
api.fwaas.policy_remove_rule(IsA(http.HttpRequest), policy.id, **data)\
.AndReturn(after_remove_policy)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.REMOVERULE_PATH, args=(policy.id,)), data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
| apache-2.0 |
foursquare/pants | src/python/pants/backend/jvm/tasks/jvm_compile/execution_graph.py | 1 | 11698 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import queue
import sys
import threading
import traceback
from builtins import map, object, str
from collections import defaultdict, deque
from heapq import heappop, heappush
from pants.base.worker_pool import Work
class Job(object):
"""A unit of scheduling for the ExecutionGraph.
The ExecutionGraph represents a DAG of dependent work. A Job a node in the graph along with the
keys of its dependent jobs.
"""
def __init__(self, key, fn, dependencies, size=0, on_success=None, on_failure=None):
"""
:param key: Key used to reference and look up jobs
:param fn callable: The work to perform
:param dependencies: List of keys for dependent jobs
:param size: Estimated job size used for prioritization
:param on_success: Zero parameter callback to run if job completes successfully. Run on main
thread.
:param on_failure: Zero parameter callback to run if job completes successfully. Run on main
thread."""
self.key = key
self.fn = fn
self.dependencies = dependencies
self.size = size
self.on_success = on_success
self.on_failure = on_failure
def __call__(self):
self.fn()
def run_success_callback(self):
if self.on_success:
self.on_success()
def run_failure_callback(self):
if self.on_failure:
self.on_failure()
UNSTARTED = 'Unstarted'
QUEUED = 'Queued'
SUCCESSFUL = 'Successful'
FAILED = 'Failed'
CANCELED = 'Canceled'
class StatusTable(object):
DONE_STATES = {SUCCESSFUL, FAILED, CANCELED}
def __init__(self, keys, pending_dependencies_count):
self._statuses = {key: UNSTARTED for key in keys}
self._pending_dependencies_count = pending_dependencies_count
def mark_as(self, state, key):
self._statuses[key] = state
def mark_queued(self, key):
self.mark_as(QUEUED, key)
def unfinished_items(self):
"""Returns a list of (name, status) tuples, only including entries marked as unfinished."""
return [(key, stat) for key, stat in self._statuses.items() if stat not in self.DONE_STATES]
def failed_keys(self):
return [key for key, stat in self._statuses.items() if stat == FAILED]
def is_unstarted(self, key):
return self._statuses.get(key) is UNSTARTED
def mark_one_successful_dependency(self, key):
self._pending_dependencies_count[key] -= 1
def is_ready_to_submit(self, key):
return self.is_unstarted(key) and self._pending_dependencies_count[key] == 0
def are_all_done(self):
return all(s in self.DONE_STATES for s in self._statuses.values())
def has_failures(self):
return any(stat is FAILED for stat in self._statuses.values())
class ExecutionFailure(Exception):
"""Raised when work units fail during execution"""
def __init__(self, message, cause=None):
if cause:
message = "{}: {}".format(message, str(cause))
super(ExecutionFailure, self).__init__(message)
self.cause = cause
class UnexecutableGraphError(Exception):
"""Base exception class for errors that make an ExecutionGraph not executable"""
def __init__(self, msg):
super(UnexecutableGraphError, self).__init__("Unexecutable graph: {}".format(msg))
class NoRootJobError(UnexecutableGraphError):
def __init__(self):
super(NoRootJobError, self).__init__(
"All scheduled jobs have dependencies. There must be a circular dependency.")
class UnknownJobError(UnexecutableGraphError):
def __init__(self, undefined_dependencies):
super(UnknownJobError, self).__init__("Undefined dependencies {}"
.format(", ".join(map(repr, undefined_dependencies))))
class JobExistsError(UnexecutableGraphError):
def __init__(self, key):
super(JobExistsError, self).__init__("Job already scheduled {!r}"
.format(key))
class ThreadSafeCounter(object):
def __init__(self):
self.lock = threading.Lock()
self._counter = 0
def get(self):
with self.lock:
return self._counter
def increment(self):
with self.lock:
self._counter += 1
def decrement(self):
with self.lock:
self._counter -= 1
class ExecutionGraph(object):
"""A directed acyclic graph of work to execute.
This is currently only used within jvm compile, but the intent is to unify it with the future
global execution graph.
"""
def __init__(self, job_list, print_stack_trace):
"""
:param job_list Job: list of Jobs to schedule and run.
"""
self._print_stack_trace = print_stack_trace
self._dependencies = defaultdict(list)
self._dependees = defaultdict(list)
self._jobs = {}
self._job_keys_as_scheduled = []
self._job_keys_with_no_dependencies = []
for job in job_list:
self._schedule(job)
unscheduled_dependencies = set(self._dependees.keys()) - set(self._job_keys_as_scheduled)
if unscheduled_dependencies:
raise UnknownJobError(unscheduled_dependencies)
if len(self._job_keys_with_no_dependencies) == 0:
raise NoRootJobError()
self._job_priority = self._compute_job_priorities(job_list)
def format_dependee_graph(self):
return "\n".join([
"{} -> {{\n {}\n}}".format(key, ',\n '.join(self._dependees[key]))
for key in self._job_keys_as_scheduled
])
def _schedule(self, job):
key = job.key
dependency_keys = job.dependencies
self._job_keys_as_scheduled.append(key)
if key in self._jobs:
raise JobExistsError(key)
self._jobs[key] = job
if len(dependency_keys) == 0:
self._job_keys_with_no_dependencies.append(key)
self._dependencies[key] = dependency_keys
for dependency_key in dependency_keys:
self._dependees[dependency_key].append(key)
def _compute_job_priorities(self, job_list):
"""Walks the dependency graph breadth-first, starting from the most dependent tasks,
and computes the job priority as the sum of the jobs sizes along the critical path."""
job_size = {job.key: job.size for job in job_list}
job_priority = defaultdict(int)
bfs_queue = deque()
for job in job_list:
if len(self._dependees[job.key]) == 0:
job_priority[job.key] = job_size[job.key]
bfs_queue.append(job.key)
satisfied_dependees_count = defaultdict(int)
while len(bfs_queue) > 0:
job_key = bfs_queue.popleft()
for dependency_key in self._dependencies[job_key]:
job_priority[dependency_key] = \
max(job_priority[dependency_key],
job_size[dependency_key] + job_priority[job_key])
satisfied_dependees_count[dependency_key] += 1
if satisfied_dependees_count[dependency_key] == len(self._dependees[dependency_key]):
bfs_queue.append(dependency_key)
return job_priority
def execute(self, pool, log):
"""Runs scheduled work, ensuring all dependencies for each element are done before execution.
:param pool: A WorkerPool to run jobs on
:param log: logger for logging debug information and progress
submits all the work without any dependencies to the worker pool
when a unit of work finishes,
if it is successful
calls success callback
checks for dependees whose dependencies are all successful, and submits them
if it fails
calls failure callback
marks dependees as failed and queues them directly into the finished work queue
when all work is either successful or failed,
cleans up the work pool
if there's an exception on the main thread,
calls failure callback for unfinished work
aborts work pool
re-raises
"""
log.debug(self.format_dependee_graph())
status_table = StatusTable(self._job_keys_as_scheduled,
{key: len(self._jobs[key].dependencies) for key in self._job_keys_as_scheduled})
finished_queue = queue.Queue()
heap = []
jobs_in_flight = ThreadSafeCounter()
def put_jobs_into_heap(job_keys):
for job_key in job_keys:
# minus because jobs with larger priority should go first
heappush(heap, (-self._job_priority[job_key], job_key))
def try_to_submit_jobs_from_heap():
def worker(worker_key, work):
try:
work()
result = (worker_key, SUCCESSFUL, None)
except Exception:
_, exc_value, exc_traceback = sys.exc_info()
result = (worker_key, FAILED, (exc_value, traceback.format_tb(exc_traceback)))
finished_queue.put(result)
jobs_in_flight.decrement()
while len(heap) > 0 and jobs_in_flight.get() < pool.num_workers:
priority, job_key = heappop(heap)
jobs_in_flight.increment()
status_table.mark_queued(job_key)
pool.submit_async_work(Work(worker, [(job_key, (self._jobs[job_key]))]))
def submit_jobs(job_keys):
put_jobs_into_heap(job_keys)
try_to_submit_jobs_from_heap()
try:
submit_jobs(self._job_keys_with_no_dependencies)
while not status_table.are_all_done():
try:
finished_key, result_status, value = finished_queue.get(timeout=10)
except queue.Empty:
log.debug("Waiting on \n {}\n".format("\n ".join(
"{}: {}".format(key, state) for key, state in status_table.unfinished_items())))
try_to_submit_jobs_from_heap()
continue
finished_job = self._jobs[finished_key]
direct_dependees = self._dependees[finished_key]
status_table.mark_as(result_status, finished_key)
# Queue downstream tasks.
if result_status is SUCCESSFUL:
try:
finished_job.run_success_callback()
except Exception as e:
log.debug(traceback.format_exc())
raise ExecutionFailure("Error in on_success for {}".format(finished_key), e)
ready_dependees = []
for dependee in direct_dependees:
status_table.mark_one_successful_dependency(dependee)
if status_table.is_ready_to_submit(dependee):
ready_dependees.append(dependee)
submit_jobs(ready_dependees)
else: # Failed or canceled.
try:
finished_job.run_failure_callback()
except Exception as e:
log.debug(traceback.format_exc())
raise ExecutionFailure("Error in on_failure for {}".format(finished_key), e)
# Propagate failures downstream.
for dependee in direct_dependees:
if status_table.is_unstarted(dependee):
status_table.mark_queued(dependee)
finished_queue.put((dependee, CANCELED, None))
# Log success or failure for this job.
if result_status is FAILED:
exception, tb = value
log.error("{} failed: {}".format(finished_key, exception))
if self._print_stack_trace:
log.error('Traceback:\n{}'.format('\n'.join(tb)))
else:
log.debug("{} finished with status {}".format(finished_key, result_status))
except ExecutionFailure:
raise
except Exception as e:
# Call failure callbacks for jobs that are unfinished.
for key, state in status_table.unfinished_items():
self._jobs[key].run_failure_callback()
log.debug(traceback.format_exc())
raise ExecutionFailure("Error running job", e)
if status_table.has_failures():
raise ExecutionFailure("Failed jobs: {}".format(', '.join(status_table.failed_keys())))
| apache-2.0 |
foursquare/pants | tests/python/pants_test/backend/jvm/tasks/test_jvmdoc_gen.py | 2 | 1845 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.jvm.tasks.jvmdoc_gen import Jvmdoc, JvmdocGen
from pants.base.exceptions import TaskError
from pants_test.jvm.jvm_task_test_base import JvmTaskTestBase
dummydoc = Jvmdoc(tool_name='dummydoc', product_type='dummydoc')
class DummyJvmdocGen(JvmdocGen):
@classmethod
def jvmdoc(cls):
return dummydoc
def execute(self):
self.generate_doc(lambda t: True, create_dummydoc_command)
def create_dummydoc_command(classpath, gendir, *targets):
# here we need to test that we get the expected classpath
pass
class JvmdocGenTest(JvmTaskTestBase):
"""Test some base functionality in JvmdocGen."""
@classmethod
def task_type(cls):
return DummyJvmdocGen
def setUp(self):
super(JvmdocGenTest, self).setUp()
self.t1 = self.make_target('t1')
context = self.context(target_roots=[self.t1])
self.targets = context.targets()
self.populate_runtime_classpath(context)
self.task = self.create_task(context)
def test_classpath(self):
self.task.execute()
def test_generate(self):
def create_jvmdoc_command_fail(classpath, gendir, *targets):
return ['python', os.path.join(os.path.dirname(__file__), "false.py")]
def create_jvmdoc_command_succeed(classpath, gendir, *targets):
return ['python', os.path.join(os.path.dirname(__file__), "true.py")]
for generate in [self.task._generate_individual,
self.task._generate_combined]:
with self.assertRaises(TaskError):
generate(self.targets, create_jvmdoc_command_fail)
generate(self.targets, create_jvmdoc_command_succeed)
| apache-2.0 |
xyuanmu/XX-Net | code/default/x_tunnel/local/cloudfront_front/web_control.py | 4 | 5336 | #!/usr/bin/env python
# coding:utf-8
import os
import time
import urllib.parse
import simple_http_server
from .front import front
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
top_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir, os.pardir))
web_ui_path = os.path.join(current_path, os.path.pardir, "web_ui")
class ControlHandler(simple_http_server.HttpServerHandler):
def __init__(self, client_address, headers, command, path, rfile, wfile):
self.client_address = client_address
self.headers = headers
self.command = command
self.path = path
self.rfile = rfile
self.wfile = wfile
def do_GET(self):
path = urllib.parse.urlparse(self.path).path
if path == "/log":
return self.req_log_handler()
elif path == "/ip_list":
return self.req_ip_list_handler()
elif path == "/debug":
return self.req_debug_handler()
else:
front.logger.warn('Control Req %s %s %s ', self.address_string(), self.command, self.path)
self.wfile.write(b'HTTP/1.1 404\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n404 Not Found')
front.logger.info('%s "%s %s HTTP/1.1" 404 -', self.address_string(), self.command, self.path)
def req_log_handler(self):
req = urllib.parse.urlparse(self.path).query
reqs = urllib.parse.parse_qs(req, keep_blank_values=True)
data = ''
cmd = "get_last"
if reqs["cmd"]:
cmd = reqs["cmd"][0]
if cmd == "get_last":
max_line = int(reqs["max_line"][0])
data = front.logger.get_last_lines(max_line)
elif cmd == "get_new":
last_no = int(reqs["last_no"][0])
data = front.logger.get_new_lines(last_no)
else:
front.logger.error('PAC %s %s %s ', self.address_string(), self.command, self.path)
mimetype = 'text/plain'
self.send_response_nc(mimetype, data)
def req_ip_list_handler(self):
time_now = time.time()
data = "<html><body><div style='float: left; white-space:nowrap;font-family: monospace;'>"
data += "time:%d pointer:%d<br>\r\n" % (time_now, front.ip_manager.ip_pointer)
data += "<table><tr><th>N</th><th>IP</th><th>HS</th><th>Fails</th>"
data += "<th>down_fail</th><th>links</th>"
data += "<th>get_time</th><th>success_time</th><th>fail_time</th><th>down_fail_time</th>"
data += "<th>data_active</th><th>transfered_data</th><th>Trans</th>"
data += "<th>history</th></tr>\n"
i = 1
for ip in front.ip_manager.ip_list:
handshake_time = front.ip_manager.ip_dict[ip]["handshake_time"]
fail_times = front.ip_manager.ip_dict[ip]["fail_times"]
down_fail = front.ip_manager.ip_dict[ip]["down_fail"]
links = front.ip_manager.ip_dict[ip]["links"]
get_time = front.ip_manager.ip_dict[ip]["get_time"]
if get_time:
get_time = time_now - get_time
success_time = front.ip_manager.ip_dict[ip]["success_time"]
if success_time:
success_time = time_now - success_time
fail_time = front.ip_manager.ip_dict[ip]["fail_time"]
if fail_time:
fail_time = time_now - fail_time
down_fail_time = front.ip_manager.ip_dict[ip]["down_fail_time"]
if down_fail_time:
down_fail_time = time_now - down_fail_time
data_active = front.ip_manager.ip_dict[ip]["data_active"]
if data_active:
active_time = time_now - data_active
else:
active_time = 0
history = front.ip_manager.ip_dict[ip]["history"]
t0 = 0
str_out = ''
for item in history:
t = item[0]
v = item[1]
if t0 == 0:
t0 = t
time_per = int((t - t0) * 1000)
t0 = t
str_out += "%d(%s) " % (time_per, v)
data += "<tr><td>%d</td><td>%s</td><td>%d</td><td>%d</td><td>%d</td><td>%d</td><td>%d</td><td>%d</td><td>%d</td>" \
"<td>%d</td><td>%d</td><td>%s</td></tr>\n" % \
(i, ip, handshake_time, fail_times, down_fail, links, get_time, success_time, fail_time, down_fail_time, \
active_time, str_out)
i += 1
data += "</table></div></body></html>"
mimetype = 'text/html'
self.send_response_nc(mimetype, data)
def req_debug_handler(self):
data = ""
objs = [front.connect_manager] + list(front.dispatchs.values())
for obj in objs:
data += "%s\r\n" % obj.__class__
for attr in dir(obj):
if attr.startswith("__"):
continue
sub_obj = getattr(obj, attr)
if callable(sub_obj):
continue
data += " %s = %s\r\n" % (attr, sub_obj)
if hasattr(obj, "to_string"):
data += obj.to_string()
mimetype = 'text/plain'
self.send_response_nc(mimetype, data) | bsd-2-clause |
lz1988/company-site | django/contrib/sites/management.py | 232 | 1587 | """
Creates the default Site object.
"""
from django.db.models import signals
from django.db import connections
from django.db import router
from django.contrib.sites.models import Site
from django.contrib.sites import models as site_app
from django.core.management.color import no_style
def create_default_site(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table
if Site in created_models and router.allow_syncdb(db, Site) :
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
# the next id will be 1, so we coerce it. See #15573 and #16353. This
# can also crop up outside of tests - see #15346.
if verbosity >= 2:
print("Creating example.com Site object")
Site(pk=1, domain="example.com", name="example.com").save(using=db)
# We set an explicit pk instead of relying on auto-incrementation,
# so we need to reset the database sequence. See #17415.
sequence_sql = connections[db].ops.sequence_reset_sql(no_style(), [Site])
if sequence_sql:
if verbosity >= 2:
print("Resetting sequence")
cursor = connections[db].cursor()
for command in sequence_sql:
cursor.execute(command)
Site.objects.clear_cache()
signals.post_syncdb.connect(create_default_site, sender=site_app)
| bsd-3-clause |
ruuk/script.module.youtube.dl | lib/youtube_dl/extractor/urort.py | 64 | 2249 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
from ..utils import (
unified_strdate,
)
class UrortIE(InfoExtractor):
IE_DESC = 'NRK P3 Urørt'
_VALID_URL = r'https?://(?:www\.)?urort\.p3\.no/#!/Band/(?P<id>[^/]+)$'
_TEST = {
'url': 'https://urort.p3.no/#!/Band/Gerilja',
'md5': '5ed31a924be8a05e47812678a86e127b',
'info_dict': {
'id': '33124-24',
'ext': 'mp3',
'title': 'The Bomb',
'thumbnail': r're:^https?://.+\.jpg',
'uploader': 'Gerilja',
'uploader_id': 'Gerilja',
'upload_date': '20100323',
},
'params': {
'matchtitle': '^The Bomb$', # To test, we want just one video
}
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
fstr = compat_urllib_parse.quote("InternalBandUrl eq '%s'" % playlist_id)
json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr
songs = self._download_json(json_url, playlist_id)
entries = []
for s in songs:
formats = [{
'tbr': f.get('Quality'),
'ext': f['FileType'],
'format_id': '%s-%s' % (f['FileType'], f.get('Quality', '')),
'url': 'http://p3urort.blob.core.windows.net/tracks/%s' % f['FileRef'],
'preference': 3 if f['FileType'] == 'mp3' else 2,
} for f in s['Files']]
self._sort_formats(formats)
e = {
'id': '%d-%s' % (s['BandId'], s['$id']),
'title': s['Title'],
'uploader_id': playlist_id,
'uploader': s.get('BandName', playlist_id),
'thumbnail': 'http://urort.p3.no/cloud/images/%s' % s['Image'],
'upload_date': unified_strdate(s.get('Released')),
'formats': formats,
}
entries.append(e)
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_id,
'entries': entries,
}
| gpl-2.0 |
sdopoku/flask-blog | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/utf8prober.py | 2919 | 2652 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
| gpl-2.0 |
Ted1993/Flasky | venv/lib/python2.7/site-packages/mako/ext/turbogears.py | 39 | 2132 | # ext/turbogears.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import compat
from mako.lookup import TemplateLookup
from mako.template import Template
class TGPlugin(object):
"""TurboGears compatible Template Plugin."""
def __init__(self, extra_vars_func=None, options=None, extension='mak'):
self.extra_vars_func = extra_vars_func
self.extension = extension
if not options:
options = {}
# Pull the options out and initialize the lookup
lookup_options = {}
for k, v in options.items():
if k.startswith('mako.'):
lookup_options[k[5:]] = v
elif k in ['directories', 'filesystem_checks', 'module_directory']:
lookup_options[k] = v
self.lookup = TemplateLookup(**lookup_options)
self.tmpl_options = {}
# transfer lookup args to template args, based on those available
# in getargspec
for kw in compat.inspect_getargspec(Template.__init__)[0]:
if kw in lookup_options:
self.tmpl_options[kw] = lookup_options[kw]
def load_template(self, templatename, template_string=None):
"""Loads a template from a file or a string"""
if template_string is not None:
return Template(template_string, **self.tmpl_options)
# Translate TG dot notation to normal / template path
if '/' not in templatename:
templatename = '/' + templatename.replace('.', '/') + '.' +\
self.extension
# Lookup template
return self.lookup.get_template(templatename)
def render(self, info, format="html", fragment=False, template=None):
if isinstance(template, compat.string_types):
template = self.load_template(template)
# Load extra vars func if provided
if self.extra_vars_func:
info.update(self.extra_vars_func())
return template.render(**info)
| mit |
telerik/cloudbase-init | cloudbaseinit/openstack/common/notifier/rabbit_notifier.py | 1 | 1108 | # Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cloudbaseinit.openstack.common.gettextutils import _
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.openstack.common.notifier import rpc_notifier
LOG = logging.getLogger(__name__)
def notify(context, message):
"""Deprecated in Grizzly. Please use rpc_notifier instead."""
LOG.deprecated(_("The rabbit_notifier is now deprecated."
" Please use rpc_notifier instead."))
rpc_notifier.notify(context, message)
| apache-2.0 |
AlexBoogaard/Sick-Beard-Torrent-Edition | lib/unidecode/x0b5.py | 253 | 4919 | data = (
'dyil', # 0x00
'dyilg', # 0x01
'dyilm', # 0x02
'dyilb', # 0x03
'dyils', # 0x04
'dyilt', # 0x05
'dyilp', # 0x06
'dyilh', # 0x07
'dyim', # 0x08
'dyib', # 0x09
'dyibs', # 0x0a
'dyis', # 0x0b
'dyiss', # 0x0c
'dying', # 0x0d
'dyij', # 0x0e
'dyic', # 0x0f
'dyik', # 0x10
'dyit', # 0x11
'dyip', # 0x12
'dyih', # 0x13
'di', # 0x14
'dig', # 0x15
'digg', # 0x16
'digs', # 0x17
'din', # 0x18
'dinj', # 0x19
'dinh', # 0x1a
'did', # 0x1b
'dil', # 0x1c
'dilg', # 0x1d
'dilm', # 0x1e
'dilb', # 0x1f
'dils', # 0x20
'dilt', # 0x21
'dilp', # 0x22
'dilh', # 0x23
'dim', # 0x24
'dib', # 0x25
'dibs', # 0x26
'dis', # 0x27
'diss', # 0x28
'ding', # 0x29
'dij', # 0x2a
'dic', # 0x2b
'dik', # 0x2c
'dit', # 0x2d
'dip', # 0x2e
'dih', # 0x2f
'dda', # 0x30
'ddag', # 0x31
'ddagg', # 0x32
'ddags', # 0x33
'ddan', # 0x34
'ddanj', # 0x35
'ddanh', # 0x36
'ddad', # 0x37
'ddal', # 0x38
'ddalg', # 0x39
'ddalm', # 0x3a
'ddalb', # 0x3b
'ddals', # 0x3c
'ddalt', # 0x3d
'ddalp', # 0x3e
'ddalh', # 0x3f
'ddam', # 0x40
'ddab', # 0x41
'ddabs', # 0x42
'ddas', # 0x43
'ddass', # 0x44
'ddang', # 0x45
'ddaj', # 0x46
'ddac', # 0x47
'ddak', # 0x48
'ddat', # 0x49
'ddap', # 0x4a
'ddah', # 0x4b
'ddae', # 0x4c
'ddaeg', # 0x4d
'ddaegg', # 0x4e
'ddaegs', # 0x4f
'ddaen', # 0x50
'ddaenj', # 0x51
'ddaenh', # 0x52
'ddaed', # 0x53
'ddael', # 0x54
'ddaelg', # 0x55
'ddaelm', # 0x56
'ddaelb', # 0x57
'ddaels', # 0x58
'ddaelt', # 0x59
'ddaelp', # 0x5a
'ddaelh', # 0x5b
'ddaem', # 0x5c
'ddaeb', # 0x5d
'ddaebs', # 0x5e
'ddaes', # 0x5f
'ddaess', # 0x60
'ddaeng', # 0x61
'ddaej', # 0x62
'ddaec', # 0x63
'ddaek', # 0x64
'ddaet', # 0x65
'ddaep', # 0x66
'ddaeh', # 0x67
'ddya', # 0x68
'ddyag', # 0x69
'ddyagg', # 0x6a
'ddyags', # 0x6b
'ddyan', # 0x6c
'ddyanj', # 0x6d
'ddyanh', # 0x6e
'ddyad', # 0x6f
'ddyal', # 0x70
'ddyalg', # 0x71
'ddyalm', # 0x72
'ddyalb', # 0x73
'ddyals', # 0x74
'ddyalt', # 0x75
'ddyalp', # 0x76
'ddyalh', # 0x77
'ddyam', # 0x78
'ddyab', # 0x79
'ddyabs', # 0x7a
'ddyas', # 0x7b
'ddyass', # 0x7c
'ddyang', # 0x7d
'ddyaj', # 0x7e
'ddyac', # 0x7f
'ddyak', # 0x80
'ddyat', # 0x81
'ddyap', # 0x82
'ddyah', # 0x83
'ddyae', # 0x84
'ddyaeg', # 0x85
'ddyaegg', # 0x86
'ddyaegs', # 0x87
'ddyaen', # 0x88
'ddyaenj', # 0x89
'ddyaenh', # 0x8a
'ddyaed', # 0x8b
'ddyael', # 0x8c
'ddyaelg', # 0x8d
'ddyaelm', # 0x8e
'ddyaelb', # 0x8f
'ddyaels', # 0x90
'ddyaelt', # 0x91
'ddyaelp', # 0x92
'ddyaelh', # 0x93
'ddyaem', # 0x94
'ddyaeb', # 0x95
'ddyaebs', # 0x96
'ddyaes', # 0x97
'ddyaess', # 0x98
'ddyaeng', # 0x99
'ddyaej', # 0x9a
'ddyaec', # 0x9b
'ddyaek', # 0x9c
'ddyaet', # 0x9d
'ddyaep', # 0x9e
'ddyaeh', # 0x9f
'ddeo', # 0xa0
'ddeog', # 0xa1
'ddeogg', # 0xa2
'ddeogs', # 0xa3
'ddeon', # 0xa4
'ddeonj', # 0xa5
'ddeonh', # 0xa6
'ddeod', # 0xa7
'ddeol', # 0xa8
'ddeolg', # 0xa9
'ddeolm', # 0xaa
'ddeolb', # 0xab
'ddeols', # 0xac
'ddeolt', # 0xad
'ddeolp', # 0xae
'ddeolh', # 0xaf
'ddeom', # 0xb0
'ddeob', # 0xb1
'ddeobs', # 0xb2
'ddeos', # 0xb3
'ddeoss', # 0xb4
'ddeong', # 0xb5
'ddeoj', # 0xb6
'ddeoc', # 0xb7
'ddeok', # 0xb8
'ddeot', # 0xb9
'ddeop', # 0xba
'ddeoh', # 0xbb
'dde', # 0xbc
'ddeg', # 0xbd
'ddegg', # 0xbe
'ddegs', # 0xbf
'dden', # 0xc0
'ddenj', # 0xc1
'ddenh', # 0xc2
'dded', # 0xc3
'ddel', # 0xc4
'ddelg', # 0xc5
'ddelm', # 0xc6
'ddelb', # 0xc7
'ddels', # 0xc8
'ddelt', # 0xc9
'ddelp', # 0xca
'ddelh', # 0xcb
'ddem', # 0xcc
'ddeb', # 0xcd
'ddebs', # 0xce
'ddes', # 0xcf
'ddess', # 0xd0
'ddeng', # 0xd1
'ddej', # 0xd2
'ddec', # 0xd3
'ddek', # 0xd4
'ddet', # 0xd5
'ddep', # 0xd6
'ddeh', # 0xd7
'ddyeo', # 0xd8
'ddyeog', # 0xd9
'ddyeogg', # 0xda
'ddyeogs', # 0xdb
'ddyeon', # 0xdc
'ddyeonj', # 0xdd
'ddyeonh', # 0xde
'ddyeod', # 0xdf
'ddyeol', # 0xe0
'ddyeolg', # 0xe1
'ddyeolm', # 0xe2
'ddyeolb', # 0xe3
'ddyeols', # 0xe4
'ddyeolt', # 0xe5
'ddyeolp', # 0xe6
'ddyeolh', # 0xe7
'ddyeom', # 0xe8
'ddyeob', # 0xe9
'ddyeobs', # 0xea
'ddyeos', # 0xeb
'ddyeoss', # 0xec
'ddyeong', # 0xed
'ddyeoj', # 0xee
'ddyeoc', # 0xef
'ddyeok', # 0xf0
'ddyeot', # 0xf1
'ddyeop', # 0xf2
'ddyeoh', # 0xf3
'ddye', # 0xf4
'ddyeg', # 0xf5
'ddyegg', # 0xf6
'ddyegs', # 0xf7
'ddyen', # 0xf8
'ddyenj', # 0xf9
'ddyenh', # 0xfa
'ddyed', # 0xfb
'ddyel', # 0xfc
'ddyelg', # 0xfd
'ddyelm', # 0xfe
'ddyelb', # 0xff
)
| gpl-3.0 |
DazWorrall/ansible | lib/ansible/modules/network/avi/avi_snmptrapprofile.py | 27 | 3396 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_snmptrapprofile
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SnmpTrapProfile Avi RESTful Object
description:
- This module is used to configure SnmpTrapProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
name:
description:
- A user-friendly name of the snmp trap configuration.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
trap_servers:
description:
- The ip address or hostname of the snmp trap destination server.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the snmp trap profile object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SnmpTrapProfile object
avi_snmptrapprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_snmptrapprofile
"""
RETURN = '''
obj:
description: SnmpTrapProfile (api/snmptrapprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
trap_servers=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'snmptrapprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
quodlibet/quodlibet | tests/test_pattern.py | 4 | 24758 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from senf import fsnative
from tests import TestCase
from quodlibet.formats import AudioFile
from quodlibet.pattern import (FileFromPattern, XMLFromPattern, Pattern,
XMLFromMarkupPattern, ArbitraryExtensionFileFromPattern)
class _TPattern(TestCase):
def setUp(self):
s1 = {'tracknumber': u'5/6', 'artist': u'Artist', 'title': u'Title5',
'~filename': '/path/to/a.mp3', 'xmltest': u"<&>"}
s2 = {'tracknumber': u'6', 'artist': u'Artist', 'title': u'Title6',
'~filename': '/path/to/b.ogg', 'discnumber': u'2',
'unislash': u"foo\uff0fbar"}
s3 = {'title': u'test/subdir', 'genre': u'/\n/',
'~filename': '/one/more/a.flac', 'version': u'Instrumental'}
s4 = {'performer': u'a\nb', 'artist': u'foo\nbar'}
s5 = {'tracknumber': u'7/1234', 'artist': u'Artist',
'title': u'Title7', '~filename': '/path/to/e.mp3'}
s6 = {'artist': u'Foo', 'albumartist': u'foo.bar', 'album': u'Best Of',
'~filename': '/path/to/f.mp3', 'title': u'The.Final.Word'}
s7 = {'artist': u'un élève français', '~filename': '/path/to/g.mp3',
'albumartist': u'Lee "Scratch" Perry',
'album': u"The 'only' way!", 'comment': u'Trouble|Strife'}
s8 = {'tracknumber': u'7/8', 'artist': u'Artist1\n\nArtist3',
'artistsort': u'SortA1\nSortA2',
'album': u'Album5', 'albumsort': u'SortAlbum5',
'~filename': '/path/to/g.mp3', 'xmltest': u"<&>"}
if os.name == "nt":
s1["~filename"] = u"C:\\path\\to\\a.mp3"
s2["~filename"] = u"C:\\path\\to\\b.ogg"
s3["~filename"] = u"C:\\one\\more\\a.flac"
s4["~filename"] = u"C:\\path\\to\\a.mp3"
s5["~filename"] = u"C:\\path\\to\\a.mp3"
s6["~filename"] = u"C:\\path\\to\\f.mp3"
s7["~filename"] = u"C:\\path\\to\\g.mp3"
s8["~filename"] = u"C:\\path\\to\\h.mp3"
self.a = AudioFile(s1)
self.b = AudioFile(s2)
self.c = AudioFile(s3)
self.d = AudioFile(s4)
self.e = AudioFile(s5)
self.f = AudioFile(s6)
self.g = AudioFile(s7)
self.h = AudioFile(s8)
class TPattern(_TPattern):
from quodlibet.formats import AudioFile
AudioFile
def test_numeric(self):
pat = Pattern("<~#rating>")
self.assertEqual(pat.format(self.a), "0.50")
def test_space(self):
pat = Pattern("a ")
self.assertEqual(pat.format(self.a), "a ")
pat = Pattern(" a")
self.assertEqual(pat.format(self.a), " a")
pat = Pattern("a\n\n")
self.assertEqual(pat.format(self.a), "a\n\n")
def test_escape(self):
pat = Pattern("a \\<foo\\|bla\\>")
self.assertEqual(pat.format(self.a), "a <foo|bla>")
pat = Pattern(r"a\\<foo>")
self.assertEqual(pat.format(self.a), "a\\")
def test_query_like_tag(self):
pat = Pattern("<t=v>")
self.assertEqual(pat.format(AudioFile({"t=v": "foo"})), "foo")
def test_conditional_number_dot_title(s):
pat = Pattern('<tracknumber|<tracknumber>. ><title>')
s.assertEquals(pat.format(s.a), '5/6. Title5')
s.assertEquals(pat.format(s.b), '6. Title6')
s.assertEquals(pat.format(s.c), 'test/subdir')
def test_conditional_other_number_dot_title(s):
pat = Pattern('<tracknumber|<tracknumber>|00>. <title>')
s.assertEquals(pat.format(s.a), '5/6. Title5')
s.assertEquals(pat.format(s.b), '6. Title6')
s.assertEquals(pat.format(s.c), '00. test/subdir')
def test_conditional_other_other(s):
# FIXME: was <tracknumber|a|b|c>.. but we can't put <>| in the format
# string since it would break the XML pattern formatter.
s.assertEqual(Pattern('<tracknumber|a|b|c>').format(s.a), "")
def test_conditional_genre(s):
pat = Pattern('<genre|<genre>|music>')
s.assertEquals(pat.format(s.a), 'music')
s.assertEquals(pat.format(s.b), 'music')
s.assertEquals(pat.format(s.c), '/, /')
def test_conditional_unknown(s):
pat = Pattern('<album|foo|bar>')
s.assertEquals(pat.format(s.a), 'bar')
def test_conditional_equals(s):
pat = Pattern('<artist=Artist|matched|not matched>')
s.assertEquals(pat.format(s.a), 'matched')
pat = Pattern('<artist=Artistic|matched|not matched>')
s.assertEquals(pat.format(s.a), 'not matched')
def test_conditional_equals_unicode(s):
pat = Pattern(u'<artist=Artist|matched|not matched>')
s.assertEquals(pat.format(s.g), 'not matched')
pat = Pattern(u'<artist=un élève français|matched|not matched>')
s.assertEquals(pat.format(s.g), 'matched')
def test_duplicate_query(self):
pat = Pattern('<u=yes|<u=yes|x|y>|<u=yes|q|z>>')
self.assertEqual(pat.format(AudioFile({"u": u"yes"})), "x")
self.assertEqual(pat.format(AudioFile({"u": u"no"})), "z")
def test_tag_query_escaping(s):
pat = Pattern('<albumartist=Lee "Scratch" Perry|matched|not matched>')
s.assertEquals(pat.format(s.g), 'matched')
def test_tag_query_escaped_pipe(s):
pat = Pattern(r'<albumartist=/Lee\|Bob/|matched|not matched>')
s.assertEquals(pat.format(s.g), 'matched')
pat = Pattern(r'<albumartist=\||matched|not matched>')
s.assertEquals(pat.format(s.g), 'not matched')
pat = Pattern(r'<comment=/Trouble\|Strife/|matched|not matched>')
s.assertEquals(pat.format(s.g), 'matched')
def test_tag_query_quoting(s):
pat = Pattern('<album=The only way|matched|not matched>')
s.assertEquals(pat.format(s.g), 'not matched')
pat = Pattern("<album=\"The 'only' way!\"|matched|not matched>")
s.assertEquals(pat.format(s.g), 'matched')
def test_tag_query_regex(s):
pat = Pattern("<album=/'only'/|matched|not matched>")
s.assertEquals(pat.format(s.g), 'matched')
pat = Pattern("<album=/The .+ way/|matched|not matched>")
s.assertEquals(pat.format(s.g), 'matched')
pat = Pattern("</The .+ way/|matched|not matched>")
s.assertEquals(pat.format(s.g), 'not matched')
def test_tag_internal(self):
if os.name != "nt":
pat = Pattern("<~filename='/path/to/a.mp3'|matched|not matched>")
self.assertEquals(pat.format(self.a), 'matched')
pat = Pattern(
"<~filename=/\\/path\\/to\\/a.mp3/|matched|not matched>")
self.assertEquals(pat.format(self.a), 'matched')
else:
pat = Pattern(
r"<~filename='C:\\\path\\\to\\\a.mp3'|matched|not matched>")
self.assertEquals(pat.format(self.a), 'matched')
def test_tag_query_disallowed_free_text(s):
pat = Pattern("<The only way|matched|not matched>")
s.assertEquals(pat.format(s.g), 'not matched')
def test_query_scope(self):
pat = Pattern("<foo|<artist=Foo|x|y>|<artist=Foo|z|q>>")
self.assertEqual(pat.format(self.f), "z")
def test_query_numeric(self):
pat = Pattern("<#(foo=42)|42|other>")
self.assertEqual(pat.format(AudioFile()), "other")
self.assertEqual(pat.format(AudioFile({"foo": "42"})), "42")
def test_conditional_notfile(s):
pat = Pattern('<tracknumber|<tracknumber>|00>')
s.assertEquals(pat.format(s.a), '5/6')
s.assertEquals(pat.format(s.b), '6')
s.assertEquals(pat.format(s.c), '00')
def test_conditional_subdir(s):
pat = Pattern('/a<genre|/<genre>>/<title>')
s.assertEquals(pat.format(s.a), '/a/Title5')
s.assertEquals(pat.format(s.b), '/a/Title6')
s.assertEquals(pat.format(s.c), '/a//, //test/subdir')
def test_number_dot_title(s):
pat = Pattern('<tracknumber>. <title>')
s.assertEquals(pat.format(s.a), '5/6. Title5')
s.assertEquals(pat.format(s.b), '6. Title6')
s.assertEquals(pat.format(s.c), '. test/subdir')
def test_recnumber_dot_title(s):
pat = Pattern(r'\<<tracknumber>\>. <title>')
s.assertEquals(pat.format(s.a), '<5/6>. Title5')
s.assertEquals(pat.format(s.b), '<6>. Title6')
s.assertEquals(pat.format(s.c), '<>. test/subdir')
def test_generated(s):
pat = Pattern('<~basename>')
s.assertEquals(pat.format(s.a), os.path.basename(s.a["~filename"]))
def test_generated_and_not_generated(s):
pat = Pattern('<~basename> <title>')
res = pat.format(s.a)
s.assertEquals(
res, os.path.basename(s.a["~filename"]) + " " + s.a["title"])
def test_number_dot_title_dot(s):
pat = Pattern('<tracknumber>. <title>.')
s.assertEquals(pat.format(s.a), '5/6. Title5.')
s.assertEquals(pat.format(s.b), '6. Title6.')
s.assertEquals(pat.format(s.c), '. test/subdir.')
def test_number_dot_genre(s):
pat = Pattern('<tracknumber>. <genre>')
s.assertEquals(pat.format(s.a), '5/6. ')
s.assertEquals(pat.format(s.b), '6. ')
s.assertEquals(pat.format(s.c), '. /, /')
def test_unicode_with_int(s):
song = AudioFile({"tracknumber": "5/6",
"title": b"\xe3\x81\x99\xe3\x81\xbf\xe3\x82\x8c".decode('utf-8')})
pat = Pattern('<~#track>. <title>')
s.assertEquals(pat.format(song),
b"5. \xe3\x81\x99\xe3\x81\xbf\xe3\x82\x8c".decode('utf-8'))
class _TFileFromPattern(_TPattern):
def _create(self, string):
return FileFromPattern(string)
def test_escape_slash(s):
fpat = s._create('<~filename>')
s.assertTrue(fpat.format(s.a).endswith("_path_to_a.mp3"))
pat = Pattern('<~filename>')
if os.name != "nt":
s.assertTrue(pat.format(s.a).startswith("/path/to/a"))
else:
s.assertTrue(pat.format(s.a).startswith("C:\\path\\to\\a"))
if os.name != "nt":
wpat = s._create(r'\\<artist>\\ "<title>')
s.assertTrue(
wpat.format(s.a).startswith(r'\Artist\ "Title5'))
else:
# FIXME..
pass
def test_directory_rooting(s):
if os.name == "nt":
s.assertRaises(ValueError, FileFromPattern, 'a\\<b>')
s.assertRaises(ValueError, FileFromPattern, '<a>\\<b>')
s._create('C:\\<a>\\<b>')
else:
s.assertRaises(ValueError, FileFromPattern, 'a/<b>')
s.assertRaises(ValueError, FileFromPattern, '<a>/<b>')
s._create('/<a>/<b>')
def test_backslash_conversion_win32(s):
if os.name == 'nt':
pat = s._create(r'Z:\<artist>\<title>')
s.assertTrue(pat.format(s.a).startswith(r'Z:\Artist\Title5'))
def test_raw_slash_preservation(s):
if os.name == "nt":
pat = s._create('C:\\a\\b\\<genre>')
s.assertTrue(pat.format(s.a).startswith('C:\\a\\b\\'))
s.assertTrue(pat.format(s.b).startswith('C:\\a\\b\\'))
s.assertTrue(pat.format(s.c).startswith('C:\\a\\b\\_, _'))
else:
pat = s._create('/a/b/<genre>')
s.assertTrue(pat.format(s.a).startswith('/a/b/'))
s.assertTrue(pat.format(s.b).startswith('/a/b/'))
s.assertTrue(pat.format(s.c).startswith('/a/b/_, _'))
def test_specialcase_anti_ext(s):
p1 = s._create('<~filename>')
p2 = s._create('<~dirname>_<~basename>')
s.assertEquals(p1.format(s.a), p2.format(s.a))
s.assertTrue(p1.format(s.a).endswith('_path_to_a.mp3'))
s.assertEquals(p1.format(s.b), p2.format(s.b))
s.assertTrue(p1.format(s.b).endswith('_path_to_b.ogg'))
s.assertEquals(p1.format(s.c), p2.format(s.c))
s.assertTrue(p1.format(s.c).endswith('_one_more_a.flac'))
def test_long_filename(s):
if os.name == "nt":
a = AudioFile({"title": "x" * 300, "~filename": u"C:\\f.mp3"})
path = s._create(u'C:\\foobar\\ä<title>\\<title>').format(a)
assert isinstance(path, fsnative)
s.failUnlessEqual(len(path), 3 + 6 + 1 + 255 + 1 + 255)
path = s._create(u'äüö<title><title>').format(a)
assert isinstance(path, fsnative)
s.failUnlessEqual(len(path), 255)
else:
a = AudioFile({"title": "x" * 300, "~filename": "/f.mp3"})
path = s._create(u'/foobar/ä<title>/<title>').format(a)
assert isinstance(path, fsnative)
s.failUnlessEqual(len(path), 1 + 6 + 1 + 255 + 1 + 255)
path = s._create(u'äüö<title><title>').format(a)
assert isinstance(path, fsnative)
s.failUnlessEqual(len(path), 255)
class TFileFromPattern(_TFileFromPattern):
def _create(self, string):
return FileFromPattern(string)
def test_type(self):
pat = self._create('')
self.assertTrue(isinstance(pat.format(self.a), fsnative))
pat = self._create('<title>')
self.assertTrue(isinstance(pat.format(self.a), fsnative))
def test_number_dot_title_dot(s):
pat = s._create('<tracknumber>. <title>.')
s.assertEquals(pat.format(s.a), '05. Title5..mp3')
s.assertEquals(pat.format(s.b), '06. Title6..ogg')
s.assertEquals(pat.format(s.c), '. test_subdir..flac')
def test_tracknumber_decimals(s):
pat = s._create('<tracknumber>. <title>')
s.assertEquals(pat.format(s.a), '05. Title5.mp3')
s.assertEquals(pat.format(s.e), '0007. Title7.mp3')
def test_ext_case_preservation(s):
x = AudioFile({'~filename': fsnative(u'/tmp/Xx.Flac'), 'title': 'Xx'})
# If pattern has a particular ext, preserve case of ext
p1 = s._create('<~basename>')
s.assertEquals(p1.format(x), 'Xx.Flac')
p2 = s._create('<title>.FLAC')
s.assertEquals(p2.format(x), 'Xx.FLAC')
# If pattern doesn't have a particular ext, lowercase ext
p3 = s._create('<title>')
s.assertEquals(p3.format(x), 'Xx.flac')
class TArbitraryExtensionFileFromPattern(_TFileFromPattern):
def _create(self, string):
return ArbitraryExtensionFileFromPattern(string)
def test_number_dot_title_dot(s):
pat = s._create('<tracknumber>. <title>.')
if os.name == 'nt':
# Can't have Windows names ending with dot
s.assertEquals(pat.format(s.a), '05. Title5_')
s.assertEquals(pat.format(s.b), '06. Title6_')
s.assertEquals(pat.format(s.c), '. test_subdir_')
else:
s.assertEquals(pat.format(s.a), '05. Title5.')
s.assertEquals(pat.format(s.b), '06. Title6.')
s.assertEquals(pat.format(s.c), '. test_subdir.')
def test_tracknumber_decimals(s):
pat = s._create('<tracknumber>. <title>')
s.assertEquals(pat.format(s.a), '05. Title5')
s.assertEquals(pat.format(s.e), '0007. Title7')
def test_constant_albumart_example(s):
pat = s._create("folder.jpg")
s.assertEquals(pat.format(s.a), 'folder.jpg')
def test_extra_dots(s):
pat = s._create("<artist~album>.png")
s.assertEquals(pat.format(s.f), 'Foo - Best Of.png')
pat = s._create("<albumartist~title>.png")
s.assertEquals(pat.format(s.f), 'foo.bar - The.Final.Word.png')
class TXMLFromPattern(_TPattern):
def test_markup_passthrough(s):
pat = XMLFromPattern(r'\<b\><<title>>\</b\>')
s.assertEquals(pat.format(s.a), '<b><Title5></b>')
s.assertEquals(pat.format(s.b), '<b><Title6></b>')
s.assertEquals(pat.format(s.c), '<b><test/subdir></b>')
def test_escape(s):
pat = XMLFromPattern(r'\<b\><<xmltest>>\</b\>')
s.assertEquals(pat.format(s.a), '<b><<&>></b>')
def test_cond_markup(s):
pat = XMLFromPattern(r'<title|\<b\><title> woo\</b\>>')
s.assertEquals(pat.format(s.a), '<b>Title5 woo</b>')
class TXMLFromMarkupPattern(_TPattern):
def _test_markup(self, text):
from gi.repository import Pango
Pango.parse_markup(text, -1, "\x00")
def test_convenience(s):
pat = XMLFromMarkupPattern(r'[b]foo[/b]')
s.assertEquals(pat.format(s.a), '<b>foo</b>')
s._test_markup(pat.format(s.a))
pat = XMLFromMarkupPattern('[small ]foo[/small \t]')
s.assertEquals(pat.format(s.a), '<small >foo</small \t>')
s._test_markup(pat.format(s.a))
def test_link(s):
pat = XMLFromMarkupPattern(r'[a href=""]foo[/a]')
s.assertEquals(pat.format(s.a), '<a href="">foo</a>')
def test_convenience_invalid(s):
pat = XMLFromMarkupPattern(r'[b foo="1"]')
s.assertEquals(pat.format(s.a), '[b foo="1"]')
s._test_markup(pat.format(s.a))
def test_span(s):
pat = XMLFromMarkupPattern(r'[span]foo[/span]')
s.assertEquals(pat.format(s.a), '<span>foo</span>')
s._test_markup(pat.format(s.a))
pat = XMLFromMarkupPattern(r'[span weight="bold"]foo[/span]')
s.assertEquals(pat.format(s.a), '<span weight="bold">foo</span>')
s._test_markup(pat.format(s.a))
def test_escape(s):
pat = XMLFromMarkupPattern(r'\[b]')
s.assertEquals(pat.format(s.a), '[b]')
s._test_markup(pat.format(s.a))
pat = XMLFromMarkupPattern(r'\\\\[b]\\\\[/b]')
s.assertEquals(pat.format(s.a), r'\\<b>\\</b>')
s._test_markup(pat.format(s.a))
class TRealTags(TestCase):
def test_empty(self):
self.failUnlessEqual(Pattern("").tags, [])
def test_both(self):
pat = "<foo|<~bar~fuu> - <fa>|<bar>>"
self.failUnlessEqual(Pattern(pat).tags, ["bar", "fuu", "fa"])
pat = "<foo|<~bar~fuu> - <fa>|<quux>>"
self.failUnlessEqual(Pattern(pat).tags, ["bar", "fuu", "fa", "quux"])
class TPatternFormatList(_TPattern):
def test_numeric(self):
pat = Pattern("<~#rating>")
self.assertEqual(pat.format_list(self.a), {("0.50", "0.50")})
def test_empty(self):
pat = Pattern("<nopenope>")
self.assertEqual(pat.format_list(self.a), {("", "")})
def test_same(s):
pat = Pattern('<~basename> <title>')
s.failUnlessEqual(pat.format_list(s.a),
{(pat.format(s.a), pat.format(s.a))})
pat = Pattern('/a<genre|/<genre>>/<title>')
s.failUnlessEqual(pat.format_list(s.a),
{(pat.format(s.a), pat.format(s.a))})
def test_same2(s):
fpat = FileFromPattern('<~filename>')
pat = Pattern('<~filename>')
s.assertEquals(fpat.format_list(s.a),
{(fpat.format(s.a), fpat.format(s.a))})
s.assertEquals(pat.format_list(s.a),
{(pat.format(s.a), pat.format(s.a))})
def test_tied(s):
pat = Pattern('<genre>')
s.failUnlessEqual(pat.format_list(s.c), {('/', '/')})
pat = Pattern('<performer>')
s.failUnlessEqual(pat.format_list(s.d), {('a', 'a'), ('b', 'b')})
pat = Pattern('<performer><performer>')
s.failUnlessEqual(set(pat.format_list(s.d)),
{('aa', 'aa'), ('ab', 'ab'),
('ba', 'ba'), ('bb', 'bb')})
pat = Pattern('<~performer~artist>')
s.failUnlessEqual(pat.format_list(s.d),
{('a', 'a'), ('b', 'b'),
('bar', 'bar'), ('foo', 'foo')})
pat = Pattern('<performer~artist>')
s.failUnlessEqual(pat.format_list(s.d),
{('a', 'a'), ('b', 'b'),
('bar', 'bar'), ('foo', 'foo')})
pat = Pattern('<artist|<artist>.|<performer>>')
s.failUnlessEqual(pat.format_list(s.d),
{('foo.', 'foo.'), ('bar.', 'bar.')})
pat = Pattern('<artist|<artist|<artist>.|<performer>>>')
s.failUnlessEqual(pat.format_list(s.d),
{('foo.', 'foo.'), ('bar.', 'bar.')})
def test_sort(s):
pat = Pattern('<album>')
s.failUnlessEqual(pat.format_list(s.f),
{(u'Best Of', u'Best Of')})
pat = Pattern('<album>')
s.failUnlessEqual(pat.format_list(s.h), {(u'Album5', u'SortAlbum5')})
pat = Pattern('<artist>')
s.failUnlessEqual(pat.format_list(s.h), {(u'Artist1', u'SortA1'),
(u'', u'SortA2'),
(u'Artist3', u'Artist3')})
pat = Pattern('<artist> x')
s.failUnlessEqual(pat.format_list(s.h), {(u'Artist1 x', u'SortA1 x'),
(u' x', u'SortA2 x'),
(u'Artist3 x', u'Artist3 x')})
def test_sort_tied(s):
pat = Pattern('<~artist~album>')
s.failUnlessEqual(pat.format_list(s.h), {(u'Artist1', u'SortA1'),
(u'', u'SortA2'),
(u'Artist3', u'Artist3'),
(u'Album5', u'SortAlbum5')})
pat = Pattern('<~album~artist>')
s.failUnlessEqual(pat.format_list(s.h), {(u'Artist1', u'SortA1'),
(u'', u'SortA2'),
(u'Artist3', u'Artist3'),
(u'Album5', u'SortAlbum5')})
pat = Pattern('<~artist~artist>')
s.failUnlessEqual(pat.format_list(s.h), {(u'Artist1', u'SortA1'),
(u'', u'SortA2'),
(u'Artist3', u'Artist3')})
def test_sort_combine(s):
pat = Pattern('<album> <artist>')
s.failUnlessEqual(pat.format_list(s.h),
{(u'Album5 Artist1', u'SortAlbum5 SortA1'),
(u'Album5 ', u'SortAlbum5 SortA2'),
(u'Album5 Artist3', u'SortAlbum5 Artist3')})
pat = Pattern('x <artist> <album>')
s.failUnlessEqual(pat.format_list(s.h),
{(u'x Artist1 Album5', u'x SortA1 SortAlbum5'),
(u'x Album5', u'x SortA2 SortAlbum5'),
(u'x Artist3 Album5', u'x Artist3 SortAlbum5')})
pat = Pattern(' <artist> <album> xx')
s.failUnlessEqual(pat.format_list(s.h),
{(u' Artist1 Album5 xx', u' SortA1 SortAlbum5 xx'),
(u' Album5 xx', u' SortA2 SortAlbum5 xx'),
(u' Artist3 Album5 xx', u' Artist3 SortAlbum5 xx')})
pat = Pattern('<album> <tracknumber> <artist>')
s.failUnlessEqual(pat.format_list(s.h),
{(u'Album5 7/8 Artist1', u'SortAlbum5 7/8 SortA1'),
(u'Album5 7/8 ', u'SortAlbum5 7/8 SortA2'),
(u'Album5 7/8 Artist3', u'SortAlbum5 7/8 Artist3')})
pat = Pattern('<tracknumber> <album> <artist>')
s.failUnlessEqual(pat.format_list(s.h),
{(u'7/8 Album5 Artist1', u'7/8 SortAlbum5 SortA1'),
(u'7/8 Album5 ', u'7/8 SortAlbum5 SortA2'),
(u'7/8 Album5 Artist3', u'7/8 SortAlbum5 Artist3')})
def test_sort_multiply(s):
pat = Pattern('<artist> <artist>')
s.failUnlessEqual(pat.format_list(s.h),
{(u'Artist1 Artist1', u'SortA1 SortA1'),
(u' Artist1', u'SortA2 SortA1'),
(u'Artist3 Artist1', u'Artist3 SortA1'),
(u'Artist1 ', u'SortA1 SortA2'),
(u' ', u'SortA2 SortA2'),
(u'Artist3 ', u'Artist3 SortA2'),
(u'Artist1 Artist3', u'SortA1 Artist3'),
(u' Artist3', u'SortA2 Artist3'),
(u'Artist3 Artist3', u'Artist3 Artist3')})
def test_missing_value(self):
pat = Pattern('<genre> - <artist>')
self.assertEqual(pat.format_list(self.a),
{(" - Artist", " - Artist")})
pat = Pattern('')
self.assertEqual(pat.format_list(self.a), {("", "")})
def test_string(s):
pat = Pattern('display')
s.assertEqual(pat.format_list(s.a), {("display", "display")})
| gpl-2.0 |
c0hen/django-venv | lib/python3.4/site-packages/psycopg2/pool.py | 3 | 8136 | """Connection pooling for psycopg2
This module implements thread-safe (and not) connection pools.
"""
# psycopg/pool.py - pooling code for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions as _ext
class PoolError(psycopg2.Error):
pass
class AbstractConnectionPool(object):
"""Generic key-based pooling code."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the connection pool.
New 'minconn' connections are created immediately calling 'connfunc'
with given parameters. The connection pool will support a maximum of
about 'maxconn' connections.
"""
self.minconn = int(minconn)
self.maxconn = int(maxconn)
self.closed = False
self._args = args
self._kwargs = kwargs
self._pool = []
self._used = {}
self._rused = {} # id(conn) -> key map
self._keys = 0
for i in range(self.minconn):
self._connect()
def _connect(self, key=None):
"""Create a new connection and assign it to 'key' if not None."""
conn = psycopg2.connect(*self._args, **self._kwargs)
if key is not None:
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn
def _getkey(self):
"""Return a new unique key."""
self._keys += 1
return self._keys
def _getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._getkey()
if key in self._used:
return self._used[key]
if self._pool:
self._used[key] = conn = self._pool.pop()
self._rused[id(conn)] = key
return conn
else:
if len(self._used) == self.maxconn:
raise PoolError("connection pool exhausted")
return self._connect(key)
def _putconn(self, conn, key=None, close=False):
"""Put away a connection."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._rused.get(id(conn))
if not key:
raise PoolError("trying to put unkeyed connection")
if len(self._pool) < self.minconn and not close:
# Return the connection into a consistent state before putting
# it back into the pool
if not conn.closed:
status = conn.get_transaction_status()
if status == _ext.TRANSACTION_STATUS_UNKNOWN:
# server connection lost
conn.close()
elif status != _ext.TRANSACTION_STATUS_IDLE:
# connection in error or in transaction
conn.rollback()
self._pool.append(conn)
else:
# regular idle connection
self._pool.append(conn)
# If the connection is closed, we just discard it.
else:
conn.close()
# here we check for the presence of key because it can happen that a
# thread tries to put back a connection after a call to close
if not self.closed or key in self._used:
del self._used[key]
del self._rused[id(conn)]
def _closeall(self):
"""Close all connections.
Note that this can lead to some code fail badly when trying to use
an already closed connection. If you call .closeall() make sure
your code can deal with it.
"""
if self.closed: raise PoolError("connection pool is closed")
for conn in self._pool + list(self._used.values()):
try:
conn.close()
except:
pass
self.closed = True
class SimpleConnectionPool(AbstractConnectionPool):
"""A connection pool that can't be shared across different threads."""
getconn = AbstractConnectionPool._getconn
putconn = AbstractConnectionPool._putconn
closeall = AbstractConnectionPool._closeall
class ThreadedConnectionPool(AbstractConnectionPool):
"""A connection pool that works with the threading module."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
def getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, key=None, close=False):
"""Put away an unused connection."""
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
class PersistentConnectionPool(AbstractConnectionPool):
"""A pool that assigns persistent connections to different threads.
Note that this connection pool generates by itself the required keys
using the current thread id. This means that until a thread puts away
a connection it will always get the same connection object by successive
`!getconn()` calls. This also means that a thread can't use more than one
single connection from the pool.
"""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import warnings
warnings.warn("deprecated: use ZPsycopgDA.pool implementation",
DeprecationWarning)
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
# we we'll need the thread module, to determine thread ids, so we
# import it here and copy it in an instance variable
import _thread as _thread # work around for 2to3 bug - see ticket #348
self.__thread = _thread
def getconn(self):
"""Generate thread id and return a connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, close=False):
"""Put away an unused connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
if not conn: conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
| gpl-3.0 |
Darthkpo/xtt | openpyxl/tests/test_backend.py | 4 | 2038 | #
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Make sure we're using the fastest backend available"""
from openpyxl import LXML
try:
from xml.etree.cElementTree import Element as cElement
C = True
except ImportError:
C = False
try:
from lxml.etree import Element as lElement
except ImportError:
lElement is None
from xml.etree.ElementTree import Element as pyElement
def test_backend():
from openpyxl.xml.functions import Element
if LXML is True:
assert Element == lElement
elif C is True:
assert Element == cElement
else:
assert Element == pyElement
def test_namespace_register():
from openpyxl.xml.functions import Element, tostring
from openpyxl.xml.constants import SHEET_MAIN_NS
e = Element('{%s}sheet' % SHEET_MAIN_NS)
xml = tostring(e)
if hasattr(xml, "decode"):
xml = xml.decode("utf-8")
assert xml.startswith("<s:sheet")
| mit |
InterfaceMasters/ONL | components/all/vendor-config/qemu/src/python/qemu/__init__.py | 9 | 1313 | #!/usr/bin/python
############################################################
# <bsn.cl fy=2013 v=onl>
#
# Copyright 2013, 2014 Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
#
# </bsn.cl>
############################################################
#
# OpenNetworkPlatform support for QEMU Platforms.
#
############################################################
from onl.platform.base import OpenNetworkPlatformBase, sysinfo
import struct
import time
class OpenNetworkPlatformQEMU(OpenNetworkPlatformBase):
def manufacturer(self):
return "QEMU"
def _sys_info_dict(self):
return {
sysinfo.MAGIC : 0,
sysinfo.PRODUCT_NAME : "QEMU Emulation",
sysinfo.PART_NUMBER : "QEMU"
}
| epl-1.0 |
shipci/boto | tests/unit/vpc/test_customergateway.py | 114 | 4610 | from tests.compat import OrderedDict
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, CustomerGateway
class TestDescribeCustomerGateways(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeCustomerGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<customerGatewaySet>
<item>
<customerGatewayId>cgw-b4dc3961</customerGatewayId>
<state>available</state>
<type>ipsec.1</type>
<ipAddress>12.1.2.3</ipAddress>
<bgpAsn>65534</bgpAsn>
<tagSet/>
</item>
</customerGatewaySet>
</DescribeCustomerGatewaysResponse>
"""
def test_get_all_customer_gateways(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_customer_gateways(
'cgw-b4dc3961',
filters=OrderedDict([('state', ['pending', 'available']),
('ip-address', '12.1.2.3')]))
self.assert_request_parameters({
'Action': 'DescribeCustomerGateways',
'CustomerGatewayId.1': 'cgw-b4dc3961',
'Filter.1.Name': 'state',
'Filter.1.Value.1': 'pending',
'Filter.1.Value.2': 'available',
'Filter.2.Name': 'ip-address',
'Filter.2.Value.1': '12.1.2.3'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 1)
self.assertIsInstance(api_response[0], CustomerGateway)
self.assertEqual(api_response[0].id, 'cgw-b4dc3961')
class TestCreateCustomerGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<customerGateway>
<customerGatewayId>cgw-b4dc3961</customerGatewayId>
<state>pending</state>
<type>ipsec.1</type>
<ipAddress>12.1.2.3</ipAddress>
<bgpAsn>65534</bgpAsn>
<tagSet/>
</customerGateway>
</CreateCustomerGatewayResponse>
"""
def test_create_customer_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_customer_gateway(
'ipsec.1', '12.1.2.3', 65534)
self.assert_request_parameters({
'Action': 'CreateCustomerGateway',
'Type': 'ipsec.1',
'IpAddress': '12.1.2.3',
'BgpAsn': 65534},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, CustomerGateway)
self.assertEquals(api_response.id, 'cgw-b4dc3961')
self.assertEquals(api_response.state, 'pending')
self.assertEquals(api_response.type, 'ipsec.1')
self.assertEquals(api_response.ip_address, '12.1.2.3')
self.assertEquals(api_response.bgp_asn, 65534)
class TestDeleteCustomerGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteCustomerGatewayResponse>
"""
def test_delete_customer_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_customer_gateway('cgw-b4dc3961')
self.assert_request_parameters({
'Action': 'DeleteCustomerGateway',
'CustomerGatewayId': 'cgw-b4dc3961'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
| mit |
hifly/OpenUpgrade | addons/product_extended/__init__.py | 374 | 1068 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_extended
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vim-IDE/python-mode | pymode/libs/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| lgpl-3.0 |
jayceyxc/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/writer/dump_worksheet.py | 61 | 8158 | # file openpyxl/writer/straight_worksheet.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write worksheets to xml representations in an optimized way"""
import datetime
import os
from ..cell import column_index_from_string, get_column_letter, Cell
from ..worksheet import Worksheet
from ..shared.xmltools import XMLGenerator, get_document_content, \
start_tag, end_tag, tag
from ..shared.date_time import SharedDate
from ..shared.ooxml import MAX_COLUMN, MAX_ROW
from tempfile import NamedTemporaryFile
from ..writer.excel import ExcelWriter
from ..writer.strings import write_string_table
from ..writer.styles import StyleWriter
from ..style import Style, NumberFormat
from ..shared.ooxml import ARC_SHARED_STRINGS, ARC_CONTENT_TYPES, \
ARC_ROOT_RELS, ARC_WORKBOOK_RELS, ARC_APP, ARC_CORE, ARC_THEME, \
ARC_STYLE, ARC_WORKBOOK, \
PACKAGE_WORKSHEETS, PACKAGE_DRAWINGS, PACKAGE_CHARTS
STYLES = {'datetime' : {'type':Cell.TYPE_NUMERIC,
'style':'1'},
'string':{'type':Cell.TYPE_STRING,
'style':'0'},
'numeric':{'type':Cell.TYPE_NUMERIC,
'style':'0'},
'formula':{'type':Cell.TYPE_FORMULA,
'style':'0'},
'boolean':{'type':Cell.TYPE_BOOL,
'style':'0'},
}
DATETIME_STYLE = Style()
DATETIME_STYLE.number_format.format_code = NumberFormat.FORMAT_DATE_YYYYMMDD2
BOUNDING_BOX_PLACEHOLDER = 'A1:%s%d' % (get_column_letter(MAX_COLUMN), MAX_ROW)
class DumpWorksheet(Worksheet):
"""
.. warning::
You shouldn't initialize this yourself, use :class:`openpyxl.workbook.Workbook` constructor instead,
with `optimized_write = True`.
"""
def __init__(self, parent_workbook):
Worksheet.__init__(self, parent_workbook)
self._max_col = 0
self._max_row = 0
self._parent = parent_workbook
self._fileobj_header = NamedTemporaryFile(mode='r+', prefix='openpyxl.', suffix='.header', delete=False)
self._fileobj_content = NamedTemporaryFile(mode='r+', prefix='openpyxl.', suffix='.content', delete=False)
self._fileobj = NamedTemporaryFile(mode='w', prefix='openpyxl.', delete=False)
self.doc = XMLGenerator(self._fileobj_content, 'utf-8')
self.header = XMLGenerator(self._fileobj_header, 'utf-8')
self.title = 'Sheet'
self._shared_date = SharedDate()
self._string_builder = self._parent.strings_table_builder
@property
def filename(self):
return self._fileobj.name
def write_header(self):
doc = self.header
start_tag(doc, 'worksheet',
{'xml:space': 'preserve',
'xmlns': 'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'xmlns:r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'})
start_tag(doc, 'sheetPr')
tag(doc, 'outlinePr',
{'summaryBelow': '1',
'summaryRight': '1'})
end_tag(doc, 'sheetPr')
tag(doc, 'dimension', {'ref': 'A1:%s' % (self.get_dimensions())})
start_tag(doc, 'sheetViews')
start_tag(doc, 'sheetView', {'workbookViewId': '0'})
tag(doc, 'selection', {'activeCell': 'A1',
'sqref': 'A1'})
end_tag(doc, 'sheetView')
end_tag(doc, 'sheetViews')
tag(doc, 'sheetFormatPr', {'defaultRowHeight': '15'})
start_tag(doc, 'sheetData')
def close(self):
self._close_content()
self._close_header()
self._write_fileobj(self._fileobj_header)
self._write_fileobj(self._fileobj_content)
self._fileobj.close()
def _write_fileobj(self, fobj):
fobj.flush()
fobj.seek(0)
while True:
chunk = fobj.read(4096)
if not chunk:
break
self._fileobj.write(chunk)
fobj.close()
os.remove(fobj.name)
self._fileobj.flush()
def _close_header(self):
doc = self.header
#doc.endDocument()
def _close_content(self):
doc = self.doc
end_tag(doc, 'sheetData')
end_tag(doc, 'worksheet')
#doc.endDocument()
def get_dimensions(self):
if not self._max_col or not self._max_row:
return 'A1'
else:
return '%s%d' % (get_column_letter(self._max_col), (self._max_row))
def append(self, row):
"""
:param row: iterable containing values to append
:type row: iterable
"""
doc = self.doc
self._max_row += 1
span = len(row)
self._max_col = max(self._max_col, span)
row_idx = self._max_row
attrs = {'r': '%d' % row_idx,
'spans': '1:%d' % span}
start_tag(doc, 'row', attrs)
for col_idx, cell in enumerate(row):
if cell is None:
continue
coordinate = '%s%d' % (get_column_letter(col_idx+1), row_idx)
attributes = {'r': coordinate}
if isinstance(cell, bool):
dtype = 'boolean'
elif isinstance(cell, (int, float)):
dtype = 'numeric'
elif isinstance(cell, (datetime.datetime, datetime.date)):
dtype = 'datetime'
cell = self._shared_date.datetime_to_julian(cell)
attributes['s'] = STYLES[dtype]['style']
elif cell and cell[0] == '=':
dtype = 'formula'
else:
dtype = 'string'
cell = self._string_builder.add(cell)
attributes['t'] = STYLES[dtype]['type']
start_tag(doc, 'c', attributes)
if dtype == 'formula':
tag(doc, 'f', body = '%s' % cell[1:])
tag(doc, 'v')
else:
tag(doc, 'v', body = '%s' % cell)
end_tag(doc, 'c')
end_tag(doc, 'row')
def save_dump(workbook, filename):
writer = ExcelDumpWriter(workbook)
writer.save(filename)
return True
class ExcelDumpWriter(ExcelWriter):
def __init__(self, workbook):
self.workbook = workbook
self.style_writer = StyleDumpWriter(workbook)
self.style_writer._style_list.append(DATETIME_STYLE)
def _write_string_table(self, archive):
shared_string_table = self.workbook.strings_table_builder.get_table()
archive.writestr(ARC_SHARED_STRINGS,
write_string_table(shared_string_table))
return shared_string_table
def _write_worksheets(self, archive, shared_string_table, style_writer):
for i, sheet in enumerate(self.workbook.worksheets):
sheet.write_header()
sheet.close()
archive.write(sheet.filename, PACKAGE_WORKSHEETS + '/sheet%d.xml' % (i + 1))
os.remove(sheet.filename)
class StyleDumpWriter(StyleWriter):
def _get_style_list(self, workbook):
return []
| apache-2.0 |
ApolloAuto/apollo | third_party/gpus/check_cuda_libs.py | 3 | 2904 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Verifies that a list of libraries is installed on the system.
Takes a a list of arguments with every two subsequent arguments being a logical
tuple of (path, check_soname). The path to the library and either True or False
to indicate whether to check the soname field on the shared library.
Example Usage:
./check_cuda_libs.py /path/to/lib1.so True /path/to/lib2.so False
"""
import os
import os.path
import platform
import subprocess
import sys
# pylint: disable=g-import-not-at-top,g-importing-member
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top,g-importing-member
class ConfigError(Exception):
pass
def check_cuda_lib(path, check_soname=True):
"""Tests if a library exists on disk and whether its soname matches the filename.
Args:
path: the path to the library.
check_soname: whether to check the soname as well.
Raises:
ConfigError: If the library does not exist or if its soname does not match
the filename.
"""
if not os.path.isfile(path):
raise ConfigError("No library found under: " + path)
objdump = which("objdump")
if check_soname and objdump is not None:
# Decode is necessary as in py3 the return type changed from str to bytes
output = subprocess.check_output([objdump, "-p", path]).decode("utf-8")
output = [line for line in output.splitlines() if "SONAME" in line]
sonames = [line.strip().split(" ")[-1] for line in output]
if not any(soname == os.path.basename(path) for soname in sonames):
raise ConfigError("None of the libraries match their SONAME: " + path)
def main():
try:
args = [argv for argv in sys.argv[1:]]
if len(args) % 2 == 1:
raise ConfigError("Expected even number of arguments")
checked_paths = []
for i in range(0, len(args), 2):
path = args[i]
check_cuda_lib(path, check_soname=args[i + 1] == "True")
checked_paths.append(path)
# pylint: disable=superfluous-parens
print(os.linesep.join(checked_paths))
# pylint: enable=superfluous-parens
except ConfigError as e:
sys.stderr.write(str(e))
sys.exit(1)
if __name__ == "__main__":
main()
| apache-2.0 |
slava-sh/NewsBlur | vendor/readability/cleaners.py | 13 | 1199 | # strip out a set of nuisance html attributes that can mess up rendering in RSS feeds
import re
from lxml.html.clean import Cleaner
bad_attrs = ['style', '[-a-z]*color', 'background[-a-z]*', 'on*']
single_quoted = "'[^']+'"
double_quoted = '"[^"]+"'
non_space = '[^ "\'>]+'
htmlstrip = re.compile("<" # open
"([^>]+) " # prefix
"(?:%s) *" % ('|'.join(bad_attrs),) + # undesirable attributes
'= *(?:%s|%s|%s)' % (non_space, single_quoted, double_quoted) + # value
"([^>]*)" # postfix
">" # end
, re.I)
def clean_attributes(html):
while htmlstrip.search(html):
html = htmlstrip.sub('<\\1\\2>', html)
return html
def normalize_spaces(s):
if not s: return ''
"""replace any sequence of whitespace
characters with a single space"""
return ' '.join(s.split())
html_cleaner = Cleaner(scripts=True, javascript=True, comments=True,
style=True, links=True, meta=False, add_nofollow=False,
page_structure=False, processing_instructions=True, embedded=False,
frames=False, forms=False, annoying_tags=False, remove_tags=None,
remove_unknown_tags=False, safe_attrs_only=False)
| mit |
polyaxon/polyaxon | core/polyaxon/polypod/common/container_resources.py | 1 | 1492 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional, Union
from polyaxon.k8s import k8s_schemas
def sanitize_resources(
resources: Union[k8s_schemas.V1ResourceRequirements, Dict]
) -> Optional[k8s_schemas.V1ResourceRequirements]:
def validate_resources(r_field: Dict) -> Dict:
if not r_field:
return r_field
for k in r_field:
r_field[k] = str(r_field[k])
return r_field
if not resources:
return None
if isinstance(resources, Dict):
return k8s_schemas.V1ResourceRequirements(
limits=validate_resources(resources.get("limits", None)),
requests=validate_resources(resources.get("requests", None)),
)
else:
return k8s_schemas.V1ResourceRequirements(
limits=validate_resources(resources.limits),
requests=validate_resources(resources.requests),
)
| apache-2.0 |
leapcode/bitmask-dev | tests/integration/mail/outgoing/test_outgoing.py | 1 | 9783 | # -*- coding: utf-8 -*-
# test_gateway.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
SMTP gateway tests.
"""
import re
from copy import deepcopy
from StringIO import StringIO
from email.parser import Parser
from datetime import datetime
from twisted.internet.defer import fail
from twisted.mail.smtp import User
from twisted.python import log
from mock import Mock
from leap.bitmask.mail.rfc3156 import RFC3156CompliantGenerator
from leap.bitmask.mail.outgoing.service import OutgoingMail
from leap.bitmask.mail.outgoing.sender import SMTPSender
from leap.bitmask.mail.testing import ADDRESS, ADDRESS_2, PUBLIC_KEY_2
from leap.bitmask.mail.testing import KeyManagerWithSoledadTestCase
from leap.bitmask.mail.testing.smtp import getSMTPFactory
from leap.bitmask.keymanager import errors
BEGIN_PUBLIC_KEY = "-----BEGIN PGP PUBLIC KEY BLOCK-----"
TEST_USER = u'[email protected]'
class TestOutgoingMail(KeyManagerWithSoledadTestCase):
EMAIL_DATA = ['HELO gateway.leap.se',
'MAIL FROM: <%s>' % ADDRESS_2,
'RCPT TO: <%s>' % ADDRESS,
'DATA',
'From: User <%s>' % ADDRESS_2,
'To: Leap <%s>' % ADDRESS,
'Date: ' + datetime.now().strftime('%c'),
'Subject: test message',
'',
'This is a secret message.',
'Yours,',
'A.',
'',
'.',
'QUIT']
def setUp(self):
self.lines = [line for line in self.EMAIL_DATA[4:12]]
self.lines.append('') # add a trailing newline
self.raw = '\r\n'.join(self.lines)
self.expected_body = '\r\n'.join(self.EMAIL_DATA[9:12]) + "\r\n"
self.fromAddr = ADDRESS_2
class opts:
cert = u'/tmp/cert'
key = u'/tmp/cert'
hostname = 'remote'
port = 666
self.opts = opts
def init_outgoing_and_proto(_):
self.outgoing = OutgoingMail(self.fromAddr, self.km)
self.outgoing.add_sender(
SMTPSender(self.fromAddr, opts.key, opts.hostname, opts.port))
user = TEST_USER
# TODO -- this shouldn't need SMTP to be tested!? or does it?
self.proto = getSMTPFactory(
{user: None}, {user: self.km}, {user: None})
self.dest = User(ADDRESS, 'gateway.leap.se', self.proto, ADDRESS_2)
d = KeyManagerWithSoledadTestCase.setUp(self)
d.addCallback(init_outgoing_and_proto)
return d
def test_message_encrypt(self):
"""
Test if message gets encrypted to destination email.
"""
def check_decryption(res):
decrypted, _ = res
self.assertIn(
self.expected_body,
decrypted,
'Decrypted text does not contain the original text.')
d = self.outgoing._maybe_encrypt_and_sign(self.raw, self.dest)
d.addCallback(self._assert_encrypted)
d.addCallback(lambda message: self.km.decrypt(
message.get_payload(1).get_payload(), ADDRESS))
d.addCallback(check_decryption)
return d
def test_message_encrypt_sign(self):
"""
Test if message gets encrypted to destination email and signed with
sender key.
'"""
def check_decryption_and_verify(res):
decrypted, signkey = res
self.assertIn(
self.expected_body,
decrypted,
'Decrypted text does not contain the original text.')
self.assertTrue(ADDRESS_2 in signkey.address,
"Verification failed")
d = self.outgoing._maybe_encrypt_and_sign(self.raw, self.dest)
d.addCallback(self._assert_encrypted)
d.addCallback(lambda message: self.km.decrypt(
message.get_payload(1).get_payload(), ADDRESS, verify=ADDRESS_2))
d.addCallback(check_decryption_and_verify)
return d
def test_message_sign(self):
"""
Test if message is signed with sender key.
"""
# mock the key fetching
# XXX this is fucking ugly.
self.km._fetch_keys_from_server_and_store_local = Mock(
return_value=fail(errors.KeyNotFound()))
recipient = User('[email protected]',
'gateway.leap.se', self.proto, ADDRESS)
self.outgoing = OutgoingMail(self.fromAddr, self.km)
def check_signed(res):
message, _ = res
self.assertTrue('Content-Type' in message)
self.assertEqual('multipart/signed', message.get_content_type())
self.assertEqual('application/pgp-signature',
message.get_param('protocol'))
self.assertEqual('pgp-sha512', message.get_param('micalg'))
# assert content of message
body = (message.get_payload(0)
.get_payload(0)
.get_payload(decode=True))
self.assertEqual(self.expected_body,
body)
# assert content of signature
self.assertTrue(
message.get_payload(1).get_payload().startswith(
'-----BEGIN PGP SIGNATURE-----\n'),
'Message does not start with signature header.')
self.assertTrue(
message.get_payload(1).get_payload().endswith(
'-----END PGP SIGNATURE-----\n'),
'Message does not end with signature footer.')
return message
def verify(message):
# replace EOL before verifying (according to rfc3156)
fp = StringIO()
g = RFC3156CompliantGenerator(
fp, mangle_from_=False, maxheaderlen=76)
g.flatten(message.get_payload(0))
signed_text = re.sub('\r?\n', '\r\n',
fp.getvalue())
def assert_verify(key):
self.assertTrue(ADDRESS_2 in key.address,
'Signature could not be verified.')
d = self.km.verify(
signed_text, ADDRESS_2,
detached_sig=message.get_payload(1).get_payload())
d.addCallback(assert_verify)
return d
# TODO shouldn't depend on private method on this test
d = self.outgoing._maybe_encrypt_and_sign(self.raw, recipient)
d.addCallback(check_signed)
d.addCallback(verify)
return d
def test_attach_key(self):
d = self.outgoing._maybe_encrypt_and_sign(self.raw, self.dest)
d.addCallback(self._assert_encrypted)
d.addCallback(self._check_headers, self.lines[:4])
d.addCallback(lambda message: self.km.decrypt(
message.get_payload(1).get_payload(), ADDRESS))
d.addCallback(lambda (decrypted, _):
self._check_key_attachment(Parser().parsestr(decrypted)))
return d
def test_attach_key_not_known(self):
unknown_address = "[email protected]"
lines = deepcopy(self.lines)
lines[1] = "To: <%s>" % (unknown_address,)
raw = '\r\n'.join(lines)
dest = User(unknown_address, 'gateway.leap.se', self.proto, ADDRESS_2)
d = self.outgoing._maybe_encrypt_and_sign(
raw, dest, fetch_remote=False)
d.addCallback(lambda (message, _):
self._check_headers(message, lines[:4]))
d.addCallback(self._check_key_attachment)
d.addErrback(log.err)
return d
def _check_headers(self, message, headers):
msgstr = message.as_string(unixfrom=False)
for header in headers:
self.assertTrue(header in msgstr,
"Missing header: %s" % (header,))
return message
def _check_key_attachment(self, message):
for payload in message.get_payload():
if payload.is_multipart():
return self._check_key_attachment(payload)
if 'application/pgp-keys' == payload.get_content_type():
keylines = PUBLIC_KEY_2.split('\n')
key = BEGIN_PUBLIC_KEY + '\n\n' + '\n'.join(keylines[4:-1])
self.assertTrue(key in payload.get_payload(decode=True),
"Key attachment don't match")
return
self.fail("No public key attachment found")
def _assert_encrypted(self, res):
message, _ = res
self.assertTrue('Content-Type' in message)
self.assertEqual('multipart/encrypted', message.get_content_type())
self.assertEqual('application/pgp-encrypted',
message.get_param('protocol'))
self.assertEqual(2, len(message.get_payload()))
self.assertEqual('application/pgp-encrypted',
message.get_payload(0).get_content_type())
self.assertEqual('application/octet-stream',
message.get_payload(1).get_content_type())
return message
| gpl-3.0 |
rvraghav93/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 8 | 35969 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_false(hasattr(t2, "idf_"))
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause |
xgds/xgds_core | setup.py | 1 | 1649 | # __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
import os
from setuptools import setup, find_packages
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
# Use the docstring of the __init__ file to be the description
DESC = " ".join(__import__('xgds_core').__doc__.splitlines()).strip()
setup(
name="xgds_core",
version=__import__('xgds_core').get_version().replace(' ', '-'),
url='',
author='tecohen',
author_email='',
description=DESC,
long_description=read_file('README'),
packages=find_packages(),
include_package_data=True,
install_requires=read_file('requirements.txt'),
classifiers=[
'License :: OSI Approved :: NASA Open Source Agreement',
'Framework :: Django',
],
)
| apache-2.0 |
40423126/2016fallcadp_ag9 | plugin/liquid_tags/mdx_liquid_tags.py | 281 | 3447 | """
Markdown Extension for Liquid-style Tags
----------------------------------------
A markdown extension to allow user-defined tags of the form::
{% tag arg1 arg2 ... argn %}
Where "tag" is associated with some user-defined extension.
These result in a preprocess step within markdown that produces
either markdown or html.
"""
import warnings
import markdown
import itertools
import re
import os
from functools import wraps
# Define some regular expressions
LIQUID_TAG = re.compile(r'\{%.*?%\}', re.MULTILINE | re.DOTALL)
EXTRACT_TAG = re.compile(r'(?:\s*)(\S+)(?:\s*)')
LT_CONFIG = { 'CODE_DIR': 'code',
'NOTEBOOK_DIR': 'notebooks',
'FLICKR_API_KEY': 'flickr',
'GIPHY_API_KEY': 'giphy'
}
LT_HELP = { 'CODE_DIR' : 'Code directory for include_code subplugin',
'NOTEBOOK_DIR' : 'Notebook directory for notebook subplugin',
'FLICKR_API_KEY': 'Flickr key for accessing the API',
'GIPHY_API_KEY': 'Giphy key for accessing the API'
}
class _LiquidTagsPreprocessor(markdown.preprocessors.Preprocessor):
_tags = {}
def __init__(self, configs):
self.configs = configs
def run(self, lines):
page = '\n'.join(lines)
liquid_tags = LIQUID_TAG.findall(page)
for i, markup in enumerate(liquid_tags):
# remove {% %}
markup = markup[2:-2]
tag = EXTRACT_TAG.match(markup).groups()[0]
markup = EXTRACT_TAG.sub('', markup, 1)
if tag in self._tags:
liquid_tags[i] = self._tags[tag](self, tag, markup.strip())
# add an empty string to liquid_tags so that chaining works
liquid_tags.append('')
# reconstruct string
page = ''.join(itertools.chain(*zip(LIQUID_TAG.split(page),
liquid_tags)))
# resplit the lines
return page.split("\n")
class LiquidTags(markdown.Extension):
"""Wrapper for MDPreprocessor"""
def __init__(self, config):
try:
# Needed for markdown versions >= 2.5
for key,value in LT_CONFIG.items():
self.config[key] = [value,LT_HELP[key]]
super(LiquidTags,self).__init__(**config)
except AttributeError:
# Markdown versions < 2.5
for key,value in LT_CONFIG.items():
config[key] = [config[key],LT_HELP[key]]
super(LiquidTags,self).__init__(config)
@classmethod
def register(cls, tag):
"""Decorator to register a new include tag"""
def dec(func):
if tag in _LiquidTagsPreprocessor._tags:
warnings.warn("Enhanced Markdown: overriding tag '%s'" % tag)
_LiquidTagsPreprocessor._tags[tag] = func
return func
return dec
def extendMarkdown(self, md, md_globals):
self.htmlStash = md.htmlStash
md.registerExtension(self)
# for the include_code preprocessor, we need to re-run the
# fenced code block preprocessor after substituting the code.
# Because the fenced code processor is run before, {% %} tags
# within equations will not be parsed as an include.
md.preprocessors.add('mdincludes',
_LiquidTagsPreprocessor(self), ">html_block")
def makeExtension(configs=None):
"""Wrapper for a MarkDown extension"""
return LiquidTags(configs=configs)
| agpl-3.0 |
f-guichard/cf-sample-php-buildpack-custo | extensions/composer/extension.py | 6 | 16424 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Composer Extension
Downloads, installs and runs Composer.
"""
import os
import os.path
import sys
import logging
import re
import json
import StringIO
from build_pack_utils import utils
from build_pack_utils import stream_output
from extension_helpers import ExtensionHelper
_log = logging.getLogger('composer')
def find_composer_paths(ctx):
build_dir = ctx['BUILD_DIR']
webdir = ctx['WEBDIR']
json_path = None
lock_path = None
json_paths = [
os.path.join(build_dir, 'composer.json'),
os.path.join(build_dir, webdir, 'composer.json')
]
lock_paths = [
os.path.join(build_dir, 'composer.lock'),
os.path.join(build_dir, webdir, 'composer.lock')
]
env_path = os.getenv('COMPOSER_PATH')
if env_path is not None:
json_paths = json_paths + [
os.path.join(build_dir, env_path, 'composer.json'),
os.path.join(build_dir, webdir, env_path, 'composer.json')
]
lock_paths = lock_paths + [
os.path.join(build_dir, env_path, 'composer.lock'),
os.path.join(build_dir, webdir, env_path, 'composer.lock')
]
for path in json_paths:
if os.path.exists(path):
json_path = path
for path in lock_paths:
if os.path.exists(path):
lock_path = path
return (json_path, lock_path)
class ComposerConfiguration(object):
def __init__(self, ctx):
self._ctx = ctx
self._log = _log
self._init_composer_paths()
def _init_composer_paths(self):
(self.json_path, self.lock_path) = \
find_composer_paths(self._ctx)
def read_exts_from_path(self, path):
exts = []
if path:
req_pat = re.compile(r'"require"\s?\:\s?\{(.*?)\}', re.DOTALL)
ext_pat = re.compile(r'"ext-(.*?)"')
with open(path, 'rt') as fp:
data = fp.read()
for req_match in req_pat.finditer(data):
for ext_match in ext_pat.finditer(req_match.group(1)):
exts.append(ext_match.group(1))
return exts
def pick_php_version(self, requested):
selected = None
if requested is None:
selected = self._ctx['PHP_VERSION']
elif requested == '5.5.*' or requested == '>=5.5':
selected = self._ctx['PHP_55_LATEST']
elif requested == '5.6.*' or requested == '>=5.6':
selected = self._ctx['PHP_56_LATEST']
elif requested.startswith('5.5.'):
selected = requested
elif requested.startswith('5.6.'):
selected = requested
else:
selected = self._ctx['PHP_VERSION']
return selected
def read_version_from_composer(self, key):
(json_path, lock_path) = find_composer_paths(self._ctx)
if json_path is not None:
composer = json.load(open(json_path, 'r'))
require = composer.get('require', {})
return require.get(key, None)
if lock_path is not None:
composer = json.load(open(lock_path, 'r'))
platform = composer.get('platform', {})
return platform.get(key, None)
return None
def configure(self):
if self.json_path or self.lock_path:
exts = []
# include any existing extensions
exts.extend(self._ctx.get('PHP_EXTENSIONS', []))
# add 'openssl' extension
exts.append('openssl')
# add platform extensions from composer.json & composer.lock
exts.extend(self.read_exts_from_path(self.json_path))
exts.extend(self.read_exts_from_path(self.lock_path))
hhvm_version = self.read_version_from_composer('hhvm')
if hhvm_version:
self._ctx['PHP_VM'] = 'hhvm'
self._log.debug('Composer picked HHVM Version [%s]',
hhvm_version)
else:
# update context with new list of extensions,
# if composer.json exists
php_version = self.read_version_from_composer('php')
self._log.debug('Composer picked PHP Version [%s]',
php_version)
self._ctx['PHP_VERSION'] = self.pick_php_version(php_version)
self._ctx['PHP_EXTENSIONS'] = utils.unique(exts)
self._ctx['PHP_VM'] = 'php'
class ComposerExtension(ExtensionHelper):
def __init__(self, ctx):
ExtensionHelper.__init__(self, ctx)
self._log = _log
def _defaults(self):
return {
'COMPOSER_VERSION': '1.0.0-alpha10',
'COMPOSER_PACKAGE': 'composer.phar',
'COMPOSER_DOWNLOAD_URL': '/composer/'
'{COMPOSER_VERSION}/{COMPOSER_PACKAGE}',
'COMPOSER_INSTALL_OPTIONS': ['--no-interaction', '--no-dev'],
'COMPOSER_VENDOR_DIR': '{BUILD_DIR}/{LIBDIR}/vendor',
'COMPOSER_BIN_DIR': '{BUILD_DIR}/php/bin',
'COMPOSER_CACHE_DIR': '{CACHE_DIR}/composer'
}
def _should_compile(self):
(json_path, lock_path) = \
find_composer_paths(self._ctx)
return (json_path is not None or lock_path is not None)
def _compile(self, install):
self._builder = install.builder
self.composer_runner = ComposerCommandRunner(self._ctx, self._builder)
self.move_local_vendor_folder()
self.install()
self.run()
def move_local_vendor_folder(self):
vendor_path = os.path.join(self._ctx['BUILD_DIR'],
self._ctx['WEBDIR'],
'vendor')
if os.path.exists(vendor_path):
self._log.debug("Vendor [%s] exists, moving to LIBDIR",
vendor_path)
(self._builder.move()
.under('{BUILD_DIR}/{WEBDIR}')
.into('{BUILD_DIR}/{LIBDIR}')
.where_name_matches('^%s/.*$' % vendor_path)
.done())
def install(self):
self._builder.install().package('PHP').done()
if self._ctx['COMPOSER_VERSION'] == 'latest':
dependencies_path = os.path.join(self._ctx['BP_DIR'],
'dependencies')
if os.path.exists(dependencies_path):
raise RuntimeError('"COMPOSER_VERSION": "latest" ' \
'is not supported in the cached buildpack. Please vendor your preferred version of composer with your app, or use the provided default composer version.')
self._ctx['COMPOSER_DOWNLOAD_URL'] = \
'https://getcomposer.org/composer.phar'
self._builder.install()._installer.install_binary_direct(
self._ctx['COMPOSER_DOWNLOAD_URL'], None,
os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'),
extract=False)
else:
self._builder.install()._installer._install_binary_from_manifest(
self._ctx['COMPOSER_DOWNLOAD_URL'],
os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'),
extract=False)
def _github_oauth_token_is_valid(self, candidate_oauth_token):
stringio_writer = StringIO.StringIO()
curl_command = 'curl -H "Authorization: token %s" ' \
'https://api.github.com/rate_limit' % candidate_oauth_token
stream_output(stringio_writer,
curl_command,
env=os.environ,
cwd=self._ctx['BUILD_DIR'],
shell=True)
github_response = stringio_writer.getvalue()
github_response_json = json.loads(github_response)
return 'resources' in github_response_json
def _github_rate_exceeded(self, token_is_valid):
stringio_writer = StringIO.StringIO()
if token_is_valid:
candidate_oauth_token = os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN')
curl_command = 'curl -H "Authorization: token %s" ' \
'https://api.github.com/rate_limit' % candidate_oauth_token
else:
curl_command = 'curl https://api.github.com/rate_limit'
stream_output(stringio_writer,
curl_command,
env=os.environ,
cwd=self._ctx['BUILD_DIR'],
shell=True)
github_response = stringio_writer.getvalue()
github_response_json = json.loads(github_response)
rate = github_response_json['rate']
num_remaining = rate['remaining']
return num_remaining <= 0
def setup_composer_github_token(self):
github_oauth_token = os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN')
if self._github_oauth_token_is_valid(github_oauth_token):
print('-----> Using custom GitHub OAuth token in'
' $COMPOSER_GITHUB_OAUTH_TOKEN')
self.composer_runner.run('config', '-g',
'github-oauth.github.com',
'"%s"' % github_oauth_token)
return True
else:
print('-----> The GitHub OAuth token supplied from '
'$COMPOSER_GITHUB_OAUTH_TOKEN is invalid')
return False
def check_github_rate_exceeded(self, token_is_valid):
if self._github_rate_exceeded(token_is_valid):
print('-----> The GitHub api rate limit has been exceeded. '
'Composer will continue by downloading from source, which might result in slower downloads. '
'You can increase your rate limit with a GitHub OAuth token. '
'Please obtain a GitHub OAuth token by registering your application at '
'https://github.com/settings/applications/new. '
'Then set COMPOSER_GITHUB_OAUTH_TOKEN in your environment to the value of this token.')
def run(self):
# Move composer files into root directory
(json_path, lock_path) = find_composer_paths(self._ctx)
if json_path is not None and os.path.dirname(json_path) != self._ctx['BUILD_DIR']:
(self._builder.move()
.under(os.path.dirname(json_path))
.where_name_is('composer.json')
.into('BUILD_DIR')
.done())
if lock_path is not None and os.path.dirname(lock_path) != self._ctx['BUILD_DIR']:
(self._builder.move()
.under(os.path.dirname(lock_path))
.where_name_is('composer.lock')
.into('BUILD_DIR')
.done())
# Sanity Checks
if not os.path.exists(os.path.join(self._ctx['BUILD_DIR'],
'composer.lock')):
msg = (
'PROTIP: Include a `composer.lock` file with your '
'application! This will make sure the exact same version '
'of dependencies are used when you deploy to CloudFoundry.')
self._log.warning(msg)
print msg
# dump composer version, if in debug mode
if self._ctx.get('BP_DEBUG', False):
self.composer_runner.run('-V')
if not os.path.exists(os.path.join(self._ctx['BP_DIR'], 'dependencies')):
token_is_valid = False
# config composer to use github token, if provided
if os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN', False):
token_is_valid = self.setup_composer_github_token()
# check that the api rate limit has not been exceeded, otherwise exit
self.check_github_rate_exceeded(token_is_valid)
# install dependencies w/Composer
self.composer_runner.run('install', '--no-progress',
*self._ctx['COMPOSER_INSTALL_OPTIONS'])
class ComposerCommandRunner(object):
def __init__(self, ctx, builder):
self._log = _log
self._ctx = ctx
self._strategy = HHVMComposerStrategy(ctx) \
if ctx['PHP_VM'] == 'hhvm' else PHPComposerStrategy(ctx)
self._php_path = self._strategy.binary_path()
self._composer_path = os.path.join(ctx['BUILD_DIR'], 'php',
'bin', 'composer.phar')
self._strategy.write_config(builder)
def _build_composer_environment(self):
env = {}
for key in os.environ.keys():
val = self._ctx.get(key, '')
env[key] = val if type(val) == str else json.dumps(val)
# add basic composer vars
env['COMPOSER_VENDOR_DIR'] = self._ctx['COMPOSER_VENDOR_DIR']
env['COMPOSER_BIN_DIR'] = self._ctx['COMPOSER_BIN_DIR']
env['COMPOSER_CACHE_DIR'] = self._ctx['COMPOSER_CACHE_DIR']
# prevent key system variables from being overridden
env['LD_LIBRARY_PATH'] = self._strategy.ld_library_path()
env['PHPRC'] = self._ctx['TMPDIR']
env['PATH'] = ':'.join(filter(None,
[env.get('PATH', ''),
os.path.dirname(self._php_path)]))
self._log.debug("ENV IS: %s",
'\n'.join(["%s=%s (%s)" % (key, val, type(val))
for (key, val) in env.iteritems()]))
return env
def run(self, *args):
try:
cmd = [self._php_path, self._composer_path]
cmd.extend(args)
self._log.debug("Running command [%s]", ' '.join(cmd))
stream_output(sys.stdout,
' '.join(cmd),
env=self._build_composer_environment(),
cwd=self._ctx['BUILD_DIR'],
shell=True)
except:
print "-----> Composer command failed"
raise
class HHVMComposerStrategy(object):
def __init__(self, ctx):
self._ctx = ctx
def binary_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'hhvm', 'usr', 'bin', 'hhvm')
def write_config(self, builder):
pass
def ld_library_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'hhvm', 'usr', 'lib', 'hhvm')
class PHPComposerStrategy(object):
def __init__(self, ctx):
self._ctx = ctx
def binary_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'php', 'bin', 'php')
def write_config(self, builder):
# rewrite a temp copy of php.ini for use by composer
(builder.copy()
.under('{BUILD_DIR}/php/etc')
.where_name_is('php.ini')
.into('TMPDIR')
.done())
utils.rewrite_cfgs(os.path.join(self._ctx['TMPDIR'], 'php.ini'),
{'TMPDIR': self._ctx['TMPDIR'],
'HOME': self._ctx['BUILD_DIR']},
delim='@')
def ld_library_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'php', 'lib')
# Extension Methods
def configure(ctx):
config = ComposerConfiguration(ctx)
config.configure()
def preprocess_commands(ctx):
composer = ComposerExtension(ctx)
return composer.preprocess_commands()
def service_commands(ctx):
composer = ComposerExtension(ctx)
return composer.service_commands()
def service_environment(ctx):
composer = ComposerExtension(ctx)
return composer.service_environment()
def compile(install):
composer = ComposerExtension(install.builder._ctx)
return composer.compile(install)
| apache-2.0 |
pipermerriam/django | django/contrib/auth/migrations/0001_initial.py | 108 | 4524 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.auth.models
from django.core import validators
from django.db import migrations, models
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('content_type', models.ForeignKey(
to='contenttypes.ContentType',
on_delete=models.CASCADE,
to_field='id',
verbose_name='content type',
)),
('codename', models.CharField(max_length=100, verbose_name='codename')),
],
options={
'ordering': ('content_type__app_label', 'content_type__model', 'codename'),
'unique_together': set([('content_type', 'codename')]),
'verbose_name': 'permission',
'verbose_name_plural': 'permissions',
},
managers=[
('objects', django.contrib.auth.models.PermissionManager()),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80, verbose_name='name')),
('permissions', models.ManyToManyField(to='auth.Permission', verbose_name='permissions', blank=True)),
],
options={
'verbose_name': 'group',
'verbose_name_plural': 'groups',
},
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(to='auth.Group', verbose_name='groups', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user')),
('user_permissions', models.ManyToManyField(to='auth.Permission', verbose_name='user permissions', blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user')),
],
options={
'swappable': 'AUTH_USER_MODEL',
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| bsd-3-clause |
blackye/luscan-devel | thirdparty_libs/yaml/parser.py | 409 | 25542 |
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
#
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
# block_node_or_indentless_sequence ::=
# ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# FIRST sets:
#
# stream: { STREAM-START }
# explicit_document: { DIRECTIVE DOCUMENT-START }
# implicit_document: FIRST(block_node)
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_sequence: { BLOCK-SEQUENCE-START }
# block_mapping: { BLOCK-MAPPING-START }
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
# indentless_sequence: { ENTRY }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_sequence: { FLOW-SEQUENCE-START }
# flow_mapping: { FLOW-MAPPING-START }
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
__all__ = ['Parser', 'ParserError']
from error import MarkedYAMLError
from tokens import *
from events import *
from scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser(object):
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
DEFAULT_TAGS = {
u'!': u'!',
u'!!': u'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
event = self.process_empty_scalar(self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
def process_directives(self):
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == u'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == u'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle.encode('utf-8'),
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
# block_node_or_indentless_sequence ::= ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
if self.check_token(AliasToken):
token = self.get_token()
event = AliasEvent(token.value, token.start_mark, token.end_mark)
self.state = self.states.pop()
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None:
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle.encode('utf-8'),
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == u'!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = (tag is None or tag == u'!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
self.state = self.parse_indentless_sequence_entry
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == u'!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_sequence_first_entry
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), u'',
start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while parsing a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
def parse_block_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_sequence_entry()
def parse_block_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
self.states.append(self.parse_block_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_block_sequence_entry
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block collection", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
def parse_indentless_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.peek_token()
event = SequenceEndEvent(token.start_mark, token.start_mark)
self.state = self.states.pop()
return event
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
def parse_block_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_mapping_key()
def parse_block_mapping_key(self):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_value)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_value
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block mapping", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_block_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_key)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_block_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(FlowSequenceEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow sequence", self.marks[-1],
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.peek_token()
event = MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_key(self):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_value
return self.process_empty_scalar(token.end_mark)
def parse_flow_sequence_entry_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_end)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_end
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_sequence_entry_mapping_end
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return MappingEndEvent(token.start_mark, token.start_mark)
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(FlowMappingEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow mapping", self.marks[-1],
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_value
return self.process_empty_scalar(token.end_mark)
elif not self.check_token(FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_mapping_empty_value(self):
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(self.peek_token().start_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), u'', mark, mark)
| gpl-2.0 |
allinpaybusiness/ACS | allinpay projects/creditscoreMLP/classMLP.py | 1 | 9585 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import sys;
import os;
sys.path.append("allinpay projects")
from creditscore.creditscore import CreditScore
import numpy as np
import pandas as pd
import time
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
class CreditScoreMLP(CreditScore):
def MLP_trainandtest(self, testsize, cv, feature_sel, varthreshold, activation,solver, alpha, max_iter =1000,nclusters=10, cmethod=None, *hidden_layer_sizes):
#分割数据集为训练集和测试集
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
X_train, X_test, y_train, y_test = train_test_split(data_feature, data_target, test_size=testsize, random_state=0)
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectFromModel":
estimator = LogisticRegression()
selector = SelectFromModel(estimator)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectKBest":
selector = SelectKBest()
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#训练并预测模型
classifier = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, activation=activation,solver=solver,alpha=alpha, max_iter =1000) # 使用类,参数全是默认的
#为避免单次神经网络训练不收敛的情况,反复训练10次,最终预测概率为10次的平均值
probability = 0
for i in range(10):
#训练模型
classifier.fit(X_train1, y_train)
#预测概率
probability += classifier.predict_proba(X_test1)[:,1]
probability = probability / 10
predresult = pd.DataFrame({'target' : y_test, 'probability' : probability})
return predresult
def MLP_trainandtest_kfold(self, nsplit, cv, feature_sel, varthreshold, activation,solver, alpha, max_iter =1000,nclusters=10, cmethod=None, *hidden_layer_sizes):
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
#将数据集分割成k个分段分别进行训练和测试,对每个分段,该分段为测试集,其余数据为训练集
kf = KFold(n_splits=nsplit, shuffle=True)
predresult = pd.DataFrame()
for train_index, test_index in kf.split(data_feature):
X_train, X_test = data_feature.iloc[train_index, ], data_feature.iloc[test_index, ]
y_train, y_test = data_target.iloc[train_index, ], data_target.iloc[test_index, ]
#如果随机抽样造成train或者test中只有一个分类,跳过此次预测
if (len(y_train.unique()) == 1) or (len(y_test.unique()) == 1):
continue
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectFromModel":
estimator = LogisticRegression()
selector = SelectFromModel(estimator)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectKBest":
selector = SelectKBest()
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#训练并预测模型
classifier = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, activation=activation,solver=solver, alpha=alpha,max_iter =max_iter) # 使用类,参数全是默认的
#为避免单次神经网络训练不收敛的情况,反复训练10次,最终预测概率为10次的平均值
probability = 0
for i in range(10):
#训练模型
classifier.fit(X_train1, y_train)
#预测概率
probability += classifier.predict_proba(X_test1)[:,1]
probability = probability / 10
temp = pd.DataFrame({'target' : y_test, 'probability' : probability})
predresult = pd.concat([predresult, temp], ignore_index = True)
return predresult
def loopMLP_trainandtest(self, testsize, cv, feature_sel, varthreshold, activation, solver,alpha, max_iter =1000, nclusters=10, cmethod=None):
df = pd.DataFrame()
for i in range (3 , 101,3):#对神经元做循环
hidden_layer_sizes = (i,)
#分割train test做测试
predresult = self.MLP_trainandtest(testsize, cv, feature_sel, varthreshold, activation,solver ,alpha, max_iter,nclusters, cmethod, *hidden_layer_sizes)
#评估并保存测试结果
auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult)
temp = pd.DataFrame({'hidden_first_layer' : i, 'auc_value' : auc ,'ks_value' :ks ,'p0=0.5' :metrics_p['accuracy'][5]} ,index=[0])
df = pd.concat([df, temp], ignore_index = False)
print('num %s complete' %i)
time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
exist = os.path.exists('d:/ACS_CSVS')
if exist:
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
else:
os.makedirs('d:/ACS_CSVS/')
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
def loopMLP_trainandtest_kfold(self, testsize, cv, feature_sel, varthreshold, activation, solver,alpha, max_iter =1000, nclusters=10, cmethod=None):
df = pd.DataFrame()
for i in range (3 , 101,3):#对神经元做循环
hidden_layer_sizes = (i,)
#分割train test做测试
predresult = self.MLP_trainandtest_kfold(testsize, cv, feature_sel, varthreshold, activation,solver ,alpha, max_iter,nclusters, cmethod, *hidden_layer_sizes)
#评估并保存测试结果
auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult)
temp = pd.DataFrame({'hidden_first_layer' : i, 'auc_value' : auc ,'ks_value' :ks ,'p0=0.5' :metrics_p['accuracy'][5]} ,index=[0])
df = pd.concat([df, temp], ignore_index = False)
print('num %s complete' %i)
time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
exist = os.path.exists('d:/ACS_CSVS')
if exist:
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
else:
os.makedirs('d:/ACS_CSVS/')
df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
| apache-2.0 |
jlmucb/cloudproxy | src/third_party/googlemock/gtest/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| apache-2.0 |
silizium/ardupilot | Tools/scripts/frame_sizes.py | 351 | 1117 | #!/usr/bin/env python
import re, sys, operator, os
code_line = re.compile("^\s*\d+:/")
frame_line = re.compile("^\s*\d+\s+/\* frame size = (\d+) \*/")
class frame(object):
def __init__(self, code, frame_size):
self.code = code
self.frame_size = int(frame_size)
frames = []
def process_lst(filename):
'''process one lst file'''
last_code = ''
h = open(filename, mode='r')
for line in h:
if code_line.match(line):
last_code = line.strip()
elif frame_line.match(line):
frames.append(frame(last_code, frame_line.match(line).group(1)))
h.close()
if len(sys.argv) > 1:
dname = sys.argv[1]
else:
dname = '.'
for root, dirs, files in os.walk(dname):
for f in files:
if f.endswith(".lst"):
process_lst(os.path.join(root, f))
sorted_frames = sorted(frames,
key=operator.attrgetter('frame_size'),
reverse=True)
print("FrameSize Code")
for frame in sorted_frames:
if frame.frame_size > 0:
print("%9u %s" % (frame.frame_size, frame.code))
| gpl-3.0 |
mlperf/training_results_v0.7 | SIAT/benchmarks/resnet/implementations/tensorflow_open_src/resnet.py | 1 | 21852 | import tensorflow as tf
# from mlperf_compliance import mlperf_log
# from mlperf_compliance import resnet_log_helper
# from configs.res50.res50_config import res50_config
_BATCH_NORM_EPSILON = 1e-4
_BATCH_NORM_DECAY = 0.9
ML_PERF_LOG=False
class LayerBuilder(object):
def __init__(self, activation=None, data_format='channels_last',
training=False, use_batch_norm=False, batch_norm_config=None,
conv_initializer=None, bn_init_mode='adv_bn_init', bn_gamma_initial_value=1.0 ):
self.activation = activation
self.data_format = data_format
self.training = training
self.use_batch_norm = use_batch_norm
self.batch_norm_config = batch_norm_config
self.conv_initializer = conv_initializer
self.bn_init_mode = bn_init_mode
self.bn_gamma_initial_value = bn_gamma_initial_value
if self.batch_norm_config is None:
self.batch_norm_config = {
'decay': _BATCH_NORM_DECAY,
'epsilon': _BATCH_NORM_EPSILON,
'scale': True,
'zero_debias_moving_mean': False,
}
def _conv2d(self, inputs, activation, *args, **kwargs):
x = tf.layers.conv2d(
inputs, data_format=self.data_format,
use_bias=not self.use_batch_norm,
kernel_initializer=self.conv_initializer,
activation=None if self.use_batch_norm else activation,
*args, **kwargs)
if self.use_batch_norm:
param_initializers = {
'moving_mean': tf.zeros_initializer(),
'moving_variance': tf.ones_initializer(),
'beta': tf.zeros_initializer(),
}
if self.bn_init_mode == 'adv_bn_init':
param_initializers['gamma'] = tf.ones_initializer()
elif self.bn_init_mode == 'conv_bn_init':
param_initializers['gamma'] = tf.constant_initializer(self.bn_gamma_initial_value)
else:
raise ValueError("--bn_init_mode must be 'conv_bn_init' or 'adv_bn_init' ")
x = self.batch_norm(x)
x = activation(x) if activation is not None else x
return x
def conv2d_linear_last_bn(self, inputs, *args, **kwargs):
x = tf.layers.conv2d(
inputs, data_format=self.data_format,
use_bias=False,
kernel_initializer=self.conv_initializer,
activation=None, *args, **kwargs)
param_initializers = {
'moving_mean': tf.zeros_initializer(),
'moving_variance': tf.ones_initializer(),
'beta': tf.zeros_initializer(),
}
if self.bn_init_mode == 'adv_bn_init':
param_initializers['gamma'] = tf.zeros_initializer()
elif self.bn_init_mode == 'conv_bn_init':
param_initializers['gamma'] = tf.constant_initializer(self.bn_gamma_initial_value)
else:
raise ValueError("--bn_init_mode must be 'conv_bn_init' or 'adv_bn_init' ")
x = self.batch_norm(x, param_initializers=param_initializers)
return x
def conv2d_linear(self, inputs, *args, **kwargs):
return self._conv2d(inputs, None, *args, **kwargs)
def conv2d(self, inputs, *args, **kwargs):
return self._conv2d(inputs, self.activation, *args, **kwargs)
def pad2d(self, inputs, begin, end=None):
if end is None:
end = begin
try:
_ = begin[1]
except TypeError:
begin = [begin, begin]
try:
_ = end[1]
except TypeError:
end = [end, end]
if self.data_format == 'channels_last':
padding = [[0, 0], [begin[0], end[0]], [begin[1], end[1]], [0, 0]]
else:
padding = [[0, 0], [0, 0], [begin[0], end[0]], [begin[1], end[1]]]
return tf.pad(inputs, padding)
def max_pooling2d(self, inputs, *args, **kwargs):
return tf.layers.max_pooling2d(
inputs, data_format=self.data_format, *args, **kwargs)
def average_pooling2d(self, inputs, *args, **kwargs):
return tf.layers.average_pooling2d(
inputs, data_format=self.data_format, *args, **kwargs)
def dense_linear(self, inputs, units, **kwargs):
return tf.layers.dense(inputs, units, activation=None)
def dense(self, inputs, units, **kwargs):
return tf.layers.dense(inputs, units, activation=self.activation)
def activate(self, inputs, activation=None):
activation = activation or self.activation
return activation(inputs) if activation is not None else inputs
def batch_norm(self, inputs, **kwargs):
all_kwargs = dict(self.batch_norm_config)
all_kwargs.update(kwargs)
data_format = 'NHWC' if self.data_format == 'channels_last' else 'NCHW'
bn_inputs = inputs
outputs = tf.contrib.layers.batch_norm(
inputs, is_training=self.training, data_format=data_format,
fused=True, **all_kwargs)
if ML_PERF_LOG:
resnet_log_helper.log_batch_norm(
input_tensor=bn_inputs, output_tensor=outputs, momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=self.training)
return outputs
def spatial_average2d(self, inputs):
shape = inputs.get_shape().as_list()
if self.data_format == 'channels_last':
n, h, w, c = shape
else:
n, c, h, w = shape
n = -1 if n is None else n
# x = tf.layers.average_pooling2d(inputs, (h, w), (1, 1),
# data_format=self.data_format)
axis = [1,2]
x = tf.reduce_mean( inputs, axis, keepdims=True )
x = tf.reshape(x, [n, c])
return tf.reshape(x, [n, c])
def flatten2d(self, inputs):
x = inputs
if self.data_format != 'channels_last':
# Note: This ensures the output order matches that of NHWC networks
x = tf.transpose(x, [0, 2, 3, 1])
input_shape = x.get_shape().as_list()
num_inputs = 1
for dim in input_shape[1:]:
num_inputs *= dim
return tf.reshape(x, [-1, num_inputs], name='flatten')
def residual2d(self, inputs, network, units=None, scale=1.0, activate=False):
outputs = network(inputs)
c_axis = -1 if self.data_format == 'channels_last' else 1
h_axis = 1 if self.data_format == 'channels_last' else 2
w_axis = h_axis + 1
ishape, oshape = [y.get_shape().as_list() for y in [inputs, outputs]]
ichans, ochans = ishape[c_axis], oshape[c_axis]
strides = ((ishape[h_axis] - 1) // oshape[h_axis] + 1,
(ishape[w_axis] - 1) // oshape[w_axis] + 1)
with tf.name_scope('residual'):
if (ochans != ichans or strides[0] != 1 or strides[1] != 1):
inputs = self.conv2d_linear(inputs, units, 1, strides, 'SAME')
x = inputs + scale * outputs
if activate:
x = self.activate(x)
return x
def Squeeze_excitation_layer(self, inputs):
se_ratio = 4
shape = inputs.get_shape().as_list()
n, h, w, c = shape # for channels last
with tf.name_scope('SE_block'):
x = self.spatial_average2d(inputs)
x = tf.layers.dense(inputs=x, units=c/se_ratio)
x = tf.nn.relu(x)
x = tf.layers.dense(inputs=x, units=c)
x = tf.nn.sigmoid(x)
x = tf.reshape(x, [-1,1,1,c]) # for channels last
scaled_outputs = inputs * x
return scaled_outputs
def resnet_bottleneck_v1_with_senet(builder, inputs, depth, depth_bottleneck, stride, filters, arch_type,
basic=False):
num_inputs = inputs.get_shape().as_list()[3]
x = inputs
print ('=== inputs and num_inputs:', num_inputs, inputs)
with tf.name_scope('resnet_v1'):
if ML_PERF_LOG:
resnet_log_helper.log_begin_block(input_tensor=x, block_type=mlperf_log.BOTTLENECK_BLOCK)
if depth == num_inputs:
if stride == 1:#v1.5
shortcut = x
else:#v1
shortcut = builder.max_pooling2d(x, 1, stride)
else:
if 'D1' in arch_type:
shortcut = builder.average_pooling2d(x, stride, stride)
shortcut = builder.conv2d_linear( shortcut, depth, 1,1,'SAME' )
else:
shortcut = builder.conv2d_linear(x, depth, 1, stride, 'SAME')
conv_input = x
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=shortcut, stride=stride,
filters=filters*4, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
resnet_log_helper.log_projection(input_tensor=conv_input, output_tensor=shortcut)
if basic:
x = builder.pad2d(x, 1)
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'VALID')
x = builder.conv2d_linear(x, depth, 3, 1, 'SAME')
else:
conv_input = x
x = builder.conv2d(x, depth_bottleneck, 1, 1, 'SAME')
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=1,
filters=filters, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
conv_input = x
if stride == 1:
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
else:
if 'E2' in arch_type:
x = builder.conv2d(x, depth_bottleneck, 3, 1, 'SAME')
x = builder.average_pooling2d(x, stride, stride)
else:
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=stride,
filters=filters, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
# x = builder.conv2d_linear(x, depth, 1, 1, 'SAME')
conv_input = x
x = builder.conv2d_linear_last_bn(x, depth, 1, 1, 'SAME')
x = builder.Squeeze_excitation_layer(x) #------------------------bottleneck末尾加senet
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=1,
filters=filters*4, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_SHORTCUT_ADD)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
x = tf.nn.relu(x + shortcut)
if ML_PERF_LOG:
resnet_log_helper.log_end_block(output_tensor=x)
return x
# num_inputs = inputs.get_shape().as_list()[3]
# x = inputs
# print ('=== inputs and num_inputs:', num_inputs, inputs)
# with tf.name_scope('resnet_v1'):
# if ML_PERF_LOG:
# resnet_log_helper.log_begin_block(input_tensor=x, block_type=mlperf_log.BOTTLENECK_BLOCK)
# if depth == num_inputs:
# if stride == 1:#v1.5
# shortcut = x
# else:#v1
# shortcut = builder.max_pooling2d(x, 1, stride)
# else:
# if 'D1' in arch_type:
# shortcut = builder.average_pooling2d(x, stride, stride)
# shortcut = builder.conv2d_linear( shortcut, depth, 1,1,'SAME' )
# else:
# shortcut = builder.conv2d_linear(x, depth, 1, stride, 'SAME')
#
# conv_input = x
# if ML_PERF_LOG:
# resnet_log_helper.log_conv2d(
# input_tensor=conv_input, output_tensor=shortcut, stride=stride,
# filters=filters*4, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
# resnet_log_helper.log_projection(input_tensor=conv_input, output_tensor=shortcut)
# if basic:
# x = builder.pad2d(x, 1)
# x = builder.conv2d(x, depth_bottleneck, 3, stride, 'VALID')
# x = builder.conv2d_linear(x, depth, 3, 1, 'SAME')
# else:
# conv_input = x
# x = builder.conv2d(x, depth_bottleneck, 1, 1, 'SAME')
# conv_input = x
#
# if stride == 1:
# x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
# else:
# if 'E2' in arch_type:
# x = builder.conv2d(x, depth_bottleneck, 3, 1, 'SAME')
# x = builder.average_pooling2d(x, stride, stride)
# else:
# x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
#
# # x = builder.conv2d_linear(x, depth, 1, 1, 'SAME')
# conv_input = x
# x = builder.conv2d_linear_last_bn(x, depth, 1, 1, 'SAME')
# x = builder.Squeeze_excitation_layer(x) #------------------------bottleneck末尾加senet
#
#
#
# x = tf.nn.relu(x + shortcut)
# if ML_PERF_LOG:
# resnet_log_helper.log_end_block(output_tensor=x)
# return x
def resnet_bottleneck_v1(builder, inputs, depth, depth_bottleneck, stride, filters, arch_type,
basic=False):
num_inputs = inputs.get_shape().as_list()[3]
x = inputs
print ('=== inputs and num_inputs:', num_inputs, inputs)
with tf.name_scope('resnet_v1'):
if ML_PERF_LOG:
resnet_log_helper.log_begin_block(input_tensor=x, block_type=mlperf_log.BOTTLENECK_BLOCK)
if depth == num_inputs:
if stride == 1:#v1.5
shortcut = x
else:#v1
shortcut = builder.max_pooling2d(x, 1, stride)
else:
if 'D1' in arch_type:
shortcut = builder.average_pooling2d(x, stride, stride)
shortcut = builder.conv2d_linear( shortcut, depth, 1,1,'SAME' )
else:
shortcut = builder.conv2d_linear(x, depth, 1, stride, 'SAME')
conv_input = x
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=shortcut, stride=stride,
filters=filters*4, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
resnet_log_helper.log_projection(input_tensor=conv_input, output_tensor=shortcut)
if basic:
x = builder.pad2d(x, 1)
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'VALID')
x = builder.conv2d_linear(x, depth, 3, 1, 'SAME')
else:
conv_input = x
x = builder.conv2d(x, depth_bottleneck, 1, 1, 'SAME')
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=1,
filters=filters, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
conv_input = x
if stride == 1:
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
else:
if 'E2' in arch_type:
x = builder.conv2d(x, depth_bottleneck, 3, 1, 'SAME')
x = builder.average_pooling2d(x, stride, stride)
else:
x = builder.conv2d(x, depth_bottleneck, 3, stride, 'SAME')
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=stride,
filters=filters, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
# x = builder.conv2d_linear(x, depth, 1, 1, 'SAME')
conv_input = x
x = builder.conv2d_linear_last_bn(x, depth, 1, 1, 'SAME')
if ML_PERF_LOG:
resnet_log_helper.log_conv2d(
input_tensor=conv_input, output_tensor=x, stride=1,
filters=filters*4, initializer=mlperf_log.TRUNCATED_NORMAL, use_bias=False)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_SHORTCUT_ADD)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
x = tf.nn.relu(x + shortcut)
if ML_PERF_LOG:
resnet_log_helper.log_end_block(output_tensor=x)
return x
def inference_resnet_v1_impl(builder, inputs, layer_counts, arch_type='ori+C1+D1+E2+4se', resnet_version='v1.5', basic=False):
#def inference_resnet_v1_impl(builder, inputs, layer_counts, arch_type='ori', resnet_version='v1.5', basic=False):
x = inputs
if 'C1' in arch_type:
x = builder.conv2d( x, 32, 3, 2, 'SAME' )
x = builder.conv2d( x, 32, 3, 1, 'SAME' )
x = builder.conv2d( x, 64, 3, 1, 'SAME' )
else:
x = builder.conv2d(x, 64, 7, 2, 'SAME')
num_filters=64
x, argmax = tf.nn.max_pool_with_argmax( input=x, ksize=(1,3,3,1), strides=(1,2,2,1), padding='SAME' )
if '4se' in arch_type:
with tf.name_scope('block_1'):
for i in range(layer_counts[0]):
# if i ==layer_counts[0]-1:
# x = resnet_bottleneck_v1_with_senet(builder, x, 256, 64, 1, num_filters, arch_type=arch_type, basic=basic)
# else:
x = resnet_bottleneck_v1(builder, x, 256, 64, 1, num_filters, arch_type=arch_type, basic=basic)
with tf.name_scope('block_2'):
for i in range(layer_counts[1]):
num_filters=num_filters*2
# if i ==layer_counts[1]-1:
# x = resnet_bottleneck_v1_with_senet(builder, x, 512, 128, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
# else:
x = resnet_bottleneck_v1(builder, x, 512, 128, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
with tf.name_scope('block_3'):
for i in range(layer_counts[2]):
num_filters=num_filters*2
if i ==layer_counts[2]-1:
x = resnet_bottleneck_v1_with_senet(builder, x, 1024, 256, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
else:
x = resnet_bottleneck_v1(builder, x, 1024, 256, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
with tf.name_scope('block_4'):
for i in range(layer_counts[3]):
num_filters=num_filters*2
if i ==2:
x = resnet_bottleneck_v1_with_senet(builder, x, 2048, 512, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
else:
x = resnet_bottleneck_v1(builder, x, 2048, 512, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
else:
for i in range(layer_counts[0]):
x = resnet_bottleneck_v1(builder, x, 256, 64, 1, num_filters, arch_type=arch_type, basic=basic)
for i in range(layer_counts[1]):
num_filters=num_filters*2
x = resnet_bottleneck_v1(builder, x, 512, 128, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
for i in range(layer_counts[2]):
num_filters=num_filters*2
x = resnet_bottleneck_v1(builder, x, 1024, 256, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
for i in range(layer_counts[3]):
num_filters=num_filters*2
x = resnet_bottleneck_v1(builder, x, 2048, 512, 2 if i == 0 else 1, num_filters, arch_type=arch_type, basic=basic)
# x = builder.spatial_average2d(x)
# same function as spatial average
axis = [1,2]
x = tf.reduce_mean( x, axis, keepdims=True )
x = tf.reshape(x, [-1,2048])
logits = tf.layers.dense(x, 1001,
kernel_initializer=tf.random_normal_initializer(stddev=0.01, seed=1))
return logits
def inference_resnet_v1(config, inputs, nlayer, arch_type, data_format='channels_last',
training=False, conv_initializer=None, bn_init_mode='adv_bn_init', bn_gamma_initial_value=1.0 ):
"""Deep Residual Networks family of models
https://arxiv.org/abs/1512.03385
"""
if ML_PERF_LOG:
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_INITIAL_SHAPE,
value=inputs.shape.as_list()[1:])
builder = LayerBuilder(tf.nn.relu, data_format, training, use_batch_norm=True,
conv_initializer=conv_initializer, bn_init_mode=bn_init_mode, bn_gamma_initial_value=bn_gamma_initial_value)
if nlayer == 18:
return inference_resnet_v1_impl(builder, inputs, [2, 2, 2, 2], arch_type, basic=True)
elif nlayer == 34:
return inference_resnet_v1_impl(builder, inputs, [3, 4, 6, 3], basic=True)
elif nlayer == 50:
return inference_resnet_v1_impl(builder, inputs, [3, 4, 6, 3])
elif nlayer == 101:
return inference_resnet_v1_impl(builder, inputs, [3, 4, 23, 3])
elif nlayer == 152:
return inference_resnet_v1_impl(builder, inputs, [3, 8, 36, 3])
else:
raise ValueError("Invalid nlayer (%i); must be one of: 18,34,50,101,152" %
nlayer)
| apache-2.0 |
jmvalin/aom | tools/intersect-diffs.py | 98 | 2364 | #!/usr/bin/env python
## Copyright (c) 2012 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
"""Calculates the "intersection" of two unified diffs.
Given two diffs, A and B, it finds all hunks in B that had non-context lines
in A and prints them to stdout. This is useful to determine the hunks in B that
are relevant to A. The resulting file can be applied with patch(1) on top of A.
"""
__author__ = "[email protected]"
import sys
import diff
def FormatDiffHunks(hunks):
"""Re-serialize a list of DiffHunks."""
r = []
last_header = None
for hunk in hunks:
this_header = hunk.header[0:2]
if last_header != this_header:
r.extend(hunk.header)
last_header = this_header
else:
r.extend(hunk.header[2])
r.extend(hunk.lines)
r.append("\n")
return "".join(r)
def ZipHunks(rhs_hunks, lhs_hunks):
"""Join two hunk lists on filename."""
for rhs_hunk in rhs_hunks:
rhs_file = rhs_hunk.right.filename.split("/")[1:]
for lhs_hunk in lhs_hunks:
lhs_file = lhs_hunk.left.filename.split("/")[1:]
if lhs_file != rhs_file:
continue
yield (rhs_hunk, lhs_hunk)
def main():
old_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[1], "r"))]
new_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[2], "r"))]
out_hunks = []
# Join the right hand side of the older diff with the left hand side of the
# newer diff.
for old_hunk, new_hunk in ZipHunks(old_hunks, new_hunks):
if new_hunk in out_hunks:
continue
old_lines = old_hunk.right
new_lines = new_hunk.left
# Determine if this hunk overlaps any non-context line from the other
for i in old_lines.delta_line_nums:
if i in new_lines:
out_hunks.append(new_hunk)
break
if out_hunks:
print FormatDiffHunks(out_hunks)
sys.exit(1)
if __name__ == "__main__":
main()
| bsd-3-clause |
surchs/brainbox | visu/base.py | 1 | 8414 | __author__ = 'surchs'
import sys
import numpy as np
from matplotlib import gridspec
from nilearn import plotting as nlp
from matplotlib import pyplot as plt
from matplotlib import colors as mpc
def add_subplot_axes(ax, rect, axisbg='w'):
fig = plt.gcf()
box = ax.get_position()
width = box.width
height = box.height
inax_position = ax.transAxes.transform(rect[0:2])
trans_figure = fig.transFigure.inverted()
infig_position = trans_figure.transform(inax_position)
x = infig_position[0]
y = infig_position[1]
width *= rect[2]
height *= rect[3]
subax = fig.add_axes([x, y, width, height], axisbg=axisbg)
return subax
def add_four_grid(ax, dist=0.05, ticks=False, border=False, titles=None):
"""
Function that creates a symmetric four grid inside a subplot
:param ax: Axis handle of parent subplot
:param dist: Distance between neighbouring fields of the grd
:param ticks: True if ticks shall be visible
:param border: True if border shall be visible
:param titles: Iterable with length 4 in this order:
0) top left
1) bottom left
2) top right
3) bottom right
If set, distance the fields will be made narrower to
accommodate the title
:return: Axis handles for the four subfields in this order:
0) top left
1) bottom left
2) top right
3) bottom right
"""
# See if titles are provided for all subplots
if titles and len(titles) == 4:
title = True
else:
title = False
# Make left top plot
lt = add_subplot_axes(ax, [0, 0.5+dist/2,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
lt.set_title(titles[0])
if not ticks:
lt.set_xticks([])
lt.set_yticks([])
if not border:
lt.spines["top"].set_visible(False)
lt.spines["right"].set_visible(False)
lt.spines["left"].set_visible(False)
lt.spines["bottom"].set_visible(False)
# Make left bottom plot
lb = add_subplot_axes(ax, [0, 0,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
lb.set_title(titles[1])
if not ticks:
lb.set_xticks([])
lb.set_yticks([])
if not border:
lb.spines["top"].set_visible(False)
lb.spines["right"].set_visible(False)
lb.spines["left"].set_visible(False)
lb.spines["bottom"].set_visible(False)
# Make right top plot
rt = add_subplot_axes(ax, [0.5+dist/2, 0.5+dist/2,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
rt.set_title(titles[2])
if not border:
rt.set_xticks([])
rt.set_yticks([])
if not border:
rt.spines["top"].set_visible(False)
rt.spines["right"].set_visible(False)
rt.spines["left"].set_visible(False)
rt.spines["bottom"].set_visible(False)
# Make right bottom plot
rb = add_subplot_axes(ax, [0.5+dist/2, 0,
0.5-dist/(2-title), 0.5-dist/(2-title)])
if title:
rb.set_title(titles[3])
if not ticks:
rb.set_xticks([])
rb.set_yticks([])
if not border:
rb.spines["top"].set_visible(False)
rb.spines["right"].set_visible(False)
rb.spines["left"].set_visible(False)
rb.spines["bottom"].set_visible(False)
return lt, lb, rt, rb
def make_montage(vol, axis='coronal', x_step=5, y_step=6):
"""
Makes a montage of a 3D volume
"""
n_steps = x_step * y_step
if axis == 'coronal':
it_dim = vol.shape[1]
x_dim = vol.shape[0]
y_dim = vol.shape[2]
elif axis == 'axial':
it_dim = vol.shape[0]
x_dim = vol.shape[1]
y_dim = vol.shape[2]
vis_mat = np.zeros((x_step*x_dim, y_step*y_dim))
it_slc = np.linspace(0, it_dim-1, n_steps)
itc = 0
for y in np.arange(y_step):
for x in np.arange(x_step):
slc_ind = it_slc[itc]
get_slc = np.floor(slc_ind)
if axis == 'coronal':
slc = vol[:, get_slc, :]
elif axis == 'axial':
slc = vol[get_slc, ...]
vis_mat[x_dim * x:x_dim * (x + 1), y_dim * y:y_dim * (y + 1)] = slc
itc += 1
out_mat = np.fliplr(np.rot90(vis_mat))
return out_mat
def montage(img, thr=0, mode='coronal', rows=5, cloumns=6, fsz=(10, 20)):
"""
Make a montage using nilearn for the background
The output figure will be 5 slices wide and 6
slices deep
:param img: nilearn image containing the data
:param thr: threshold for the image
:param mode: view mode. saggital, coronal, axial
:param rows: number of rows in the figure
:param cloumns: number of columns in the figure
:param fsz: size of the figure
:return fig: figure handle for saving or whatnot
"""
# Hardwired view range
sag_rng = [-65, 65]
cor_rng = [-100, 65]
axi_rng = [-71, 85]
# Get the number of slices
n_slices = rows * cloumns
if mode == 'coronal':
# Get the slice indices
view_range = np.floor(np.linspace(cor_rng[0], cor_rng[1], n_slices))
view_mode = 'y'
if mode == 'axial':
# Get the slice indices
view_range = np.floor(np.linspace(axi_rng[0], axi_rng[1], n_slices))
view_mode = 'z'
if mode == 'saggital':
# Get the slice indices
view_range = np.floor(np.linspace(sag_rng[0], sag_rng[1], n_slices))
view_mode = 'x'
# Prepare the figure
fig = plt.figure(figsize=fsz)
gs = gridspec.GridSpec(cloumns, 1, hspace=0, wspace=0)
# Loop through the rows of the image
for row_id in range(cloumns):
# Create the axis to show
ax = fig.add_subplot(gs[row_id, 0])
# Get the slices in the column direction
row_range = view_range[row_id*rows:(row_id+1)*rows]
# Display the thing
nlp.plot_stat_map(img, cut_coords=row_range,
display_mode=view_mode, threshold=thr,
axes=ax, black_bg=True)
return fig
def make_cmap(colors, position=None, bit=False):
"""
make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color.
"""
bit_rgb = np.linspace(0,1,256)
if position:
position = np.linspace(0,1,len(colors))
else:
if len(position) != len(colors):
sys.exit("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
sys.exit("position must start with 0 and end with 1")
if bit:
for i in range(len(colors)):
colors[i] = (bit_rgb[colors[i][0]],
bit_rgb[colors[i][1]],
bit_rgb[colors[i][2]])
cdict = {'red':[], 'green':[], 'blue':[]}
for pos, color in zip(position, colors):
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpc.LinearSegmentedColormap('my_colormap',cdict,256)
return cmap
def hot_cold():
"""
This generates a niak-like colormap of hot cold
:return:
"""
# Define a new colormap
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 1.0, 1.0),
(0.25, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.75, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 1.0, 1.0),
(0.25, 1.0, 1.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0))
}
hotcold = mpc.LinearSegmentedColormap('hotcold', cdict)
return hotcold
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.