repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
aplanas/rally | tests/unit/verification/test_diff.py | 20 | 4951 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.verification.tempest import diff
from tests.unit import test
class DiffTestCase(test.TestCase):
def test_main(self):
results1 = {"test.NONE": {"name": "test.NONE",
"output": "test.NONE",
"status": "SKIPPED",
"time": 0.000},
"test.zerofive": {"name": "test.zerofive",
"output": "test.zerofive",
"status": "FAILED",
"time": 0.05},
"test.one": {"name": "test.one",
"output": "test.one",
"status": "OK",
"time": 0.111},
"test.two": {"name": "test.two",
"output": "test.two",
"status": "OK",
"time": 0.222},
"test.three": {"name": "test.three",
"output": "test.three",
"status": "FAILED",
"time": 0.333},
"test.four": {"name": "test.four",
"output": "test.four",
"status": "OK",
"time": 0.444},
"test.five": {"name": "test.five",
"output": "test.five",
"status": "OK",
"time": 0.555}
}
results2 = {"test.one": {"name": "test.one",
"output": "test.one",
"status": "FAIL",
"time": 0.1111},
"test.two": {"name": "test.two",
"output": "test.two",
"status": "OK",
"time": 0.222},
"test.three": {"name": "test.three",
"output": "test.three",
"status": "OK",
"time": 0.3333},
"test.four": {"name": "test.four",
"output": "test.four",
"status": "FAIL",
"time": 0.4444},
"test.five": {"name": "test.five",
"output": "test.five",
"status": "OK",
"time": 0.555},
"test.six": {"name": "test.six",
"output": "test.six",
"status": "OK",
"time": 0.666},
"test.seven": {"name": "test.seven",
"output": "test.seven",
"status": "OK",
"time": 0.777}
}
diff_ = diff.Diff(results1, results2, 0)
assert len(diff_.diffs) == 10
assert len([test for test in diff_.diffs
if test["type"] == "removed_test"]) == 2
assert len([test for test in diff_.diffs
if test["type"] == "new_test"]) == 2
assert len([test for test in diff_.diffs
if test["type"] == "value_changed"]) == 6
assert diff_.to_csv() != ""
assert diff_.to_html() != ""
assert diff_.to_json() != ""
def test_zero_values(self):
results1 = {"test.one": {"name": "test.one",
"output": "test.one",
"status": "OK",
"time": 1}}
results2 = {"test.one": {"name": "test.one",
"output": "test.one",
"status": "FAIL",
"time": 0}}
# This must NOT raise ZeroDivisionError
diff_ = diff.Diff(results1, results2, 0)
self.assertEqual(2, len(diff_.diffs))
diff_ = diff.Diff(results2, results1, 0)
self.assertEqual(2, len(diff_.diffs))
| apache-2.0 | 6,392,971,523,426,498,000 | 45.271028 | 78 | 0.361139 | false |
NL66278/OCB | addons/l10n_fr_hr_payroll/l10n_fr_hr_payroll.py | 340 | 2012 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'plafond_secu': fields.float('Plafond de la Securite Sociale', digits_compute=dp.get_precision('Payroll')),
'nombre_employes': fields.integer('Nombre d\'employes'),
'cotisation_prevoyance': fields.float('Cotisation Patronale Prevoyance', digits_compute=dp.get_precision('Payroll')),
'org_ss': fields.char('Organisme de securite sociale'),
'conv_coll': fields.char('Convention collective'),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_columns = {
'qualif': fields.char('Qualification'),
'niveau': fields.char('Niveau'),
'coef': fields.char('Coefficient'),
}
class hr_payslip(osv.osv):
_inherit = 'hr.payslip'
_columns = {
'payment_mode': fields.char('Mode de paiement'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,073,569,388,437,999,000 | 36.259259 | 125 | 0.629225 | false |
fusionbox/mezzanine | mezzanine/pages/migrations/south/0014_auto__add_field_page_created__add_field_page_updated.py | 8 | 6156 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Page.created'
db.add_column(u'pages_page', 'created',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
# Adding field 'Page.updated'
db.add_column(u'pages_page', 'updated',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Page.created'
db.delete_column(u'pages_page', 'created')
# Deleting field 'Page.updated'
db.delete_column(u'pages_page', 'updated')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'pages.link': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Link', '_ormbases': [u'pages.Page']},
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'pages.page': {
'Meta': {'ordering': "('titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '(1, 2, 3)', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'pages.richtextpage': {
'Meta': {'ordering': "('_order',)", 'object_name': 'RichTextPage', '_ormbases': [u'pages.Page']},
'content': ('mezzanine.core.fields.RichTextField', [], {}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['pages']
| bsd-2-clause | 2,893,206,036,587,356,000 | 63.8 | 176 | 0.542235 | false |
vicky2135/lucious | oscar/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.py | 326 | 6536 | import base64
import io
import json
import zlib
from pip._vendor.requests.structures import CaseInsensitiveDict
from .compat import HTTPResponse, pickle, text_type
def _b64_encode_bytes(b):
return base64.b64encode(b).decode("ascii")
def _b64_encode_str(s):
return _b64_encode_bytes(s.encode("utf8"))
def _b64_encode(s):
if isinstance(s, text_type):
return _b64_encode_str(s)
return _b64_encode_bytes(s)
def _b64_decode_bytes(b):
return base64.b64decode(b.encode("ascii"))
def _b64_decode_str(s):
return _b64_decode_bytes(s).decode("utf8")
class Serializer(object):
def dumps(self, request, response, body=None):
response_headers = CaseInsensitiveDict(response.headers)
if body is None:
body = response.read(decode_content=False)
# NOTE: 99% sure this is dead code. I'm only leaving it
# here b/c I don't have a test yet to prove
# it. Basically, before using
# `cachecontrol.filewrapper.CallbackFileWrapper`,
# this made an effort to reset the file handle. The
# `CallbackFileWrapper` short circuits this code by
# setting the body as the content is consumed, the
# result being a `body` argument is *always* passed
# into cache_response, and in turn,
# `Serializer.dump`.
response._fp = io.BytesIO(body)
data = {
"response": {
"body": _b64_encode_bytes(body),
"headers": dict(
(_b64_encode(k), _b64_encode(v))
for k, v in response.headers.items()
),
"status": response.status,
"version": response.version,
"reason": _b64_encode_str(response.reason),
"strict": response.strict,
"decode_content": response.decode_content,
},
}
# Construct our vary headers
data["vary"] = {}
if "vary" in response_headers:
varied_headers = response_headers['vary'].split(',')
for header in varied_headers:
header = header.strip()
data["vary"][header] = request.headers.get(header, None)
# Encode our Vary headers to ensure they can be serialized as JSON
data["vary"] = dict(
(_b64_encode(k), _b64_encode(v) if v is not None else v)
for k, v in data["vary"].items()
)
return b",".join([
b"cc=2",
zlib.compress(
json.dumps(
data, separators=(",", ":"), sort_keys=True,
).encode("utf8"),
),
])
def loads(self, request, data):
# Short circuit if we've been given an empty set of data
if not data:
return
# Determine what version of the serializer the data was serialized
# with
try:
ver, data = data.split(b",", 1)
except ValueError:
ver = b"cc=0"
# Make sure that our "ver" is actually a version and isn't a false
# positive from a , being in the data stream.
if ver[:3] != b"cc=":
data = ver + data
ver = b"cc=0"
# Get the version number out of the cc=N
ver = ver.split(b"=", 1)[-1].decode("ascii")
# Dispatch to the actual load method for the given version
try:
return getattr(self, "_loads_v{0}".format(ver))(request, data)
except AttributeError:
# This is a version we don't have a loads function for, so we'll
# just treat it as a miss and return None
return
def prepare_response(self, request, cached):
"""Verify our vary headers match and construct a real urllib3
HTTPResponse object.
"""
# Special case the '*' Vary value as it means we cannot actually
# determine if the cached response is suitable for this request.
if "*" in cached.get("vary", {}):
return
# Ensure that the Vary headers for the cached response match our
# request
for header, value in cached.get("vary", {}).items():
if request.headers.get(header, None) != value:
return
body_raw = cached["response"].pop("body")
headers = CaseInsensitiveDict(data=cached['response']['headers'])
if headers.get('transfer-encoding', '') == 'chunked':
headers.pop('transfer-encoding')
cached['response']['headers'] = headers
try:
body = io.BytesIO(body_raw)
except TypeError:
# This can happen if cachecontrol serialized to v1 format (pickle)
# using Python 2. A Python 2 str(byte string) will be unpickled as
# a Python 3 str (unicode string), which will cause the above to
# fail with:
#
# TypeError: 'str' does not support the buffer interface
body = io.BytesIO(body_raw.encode('utf8'))
return HTTPResponse(
body=body,
preload_content=False,
**cached["response"]
)
def _loads_v0(self, request, data):
# The original legacy cache data. This doesn't contain enough
# information to construct everything we need, so we'll treat this as
# a miss.
return
def _loads_v1(self, request, data):
try:
cached = pickle.loads(data)
except ValueError:
return
return self.prepare_response(request, cached)
def _loads_v2(self, request, data):
try:
cached = json.loads(zlib.decompress(data).decode("utf8"))
except ValueError:
return
# We need to decode the items that we've base64 encoded
cached["response"]["body"] = _b64_decode_bytes(
cached["response"]["body"]
)
cached["response"]["headers"] = dict(
(_b64_decode_str(k), _b64_decode_str(v))
for k, v in cached["response"]["headers"].items()
)
cached["response"]["reason"] = _b64_decode_str(
cached["response"]["reason"],
)
cached["vary"] = dict(
(_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
for k, v in cached["vary"].items()
)
return self.prepare_response(request, cached)
| bsd-3-clause | -4,706,950,758,376,271,000 | 32.346939 | 78 | 0.549266 | false |
clebergnu/autotest | client/tests/aio_dio_bugs/aio_dio_bugs.py | 6 | 1345 | import os
from autotest_lib.client.bin import test, utils
# tests is a simple array of "cmd" "arguments"
tests = [["aio-dio-invalidate-failure", "poo"],
["aio-dio-subblock-eof-read", "eoftest"],
["aio-free-ring-with-bogus-nr-pages", ""],
["aio-io-setup-with-nonwritable-context-pointer", ""],
["aio-dio-extend-stat", "file"],
]
name = 0
arglist = 1
class aio_dio_bugs(test.test):
version = 5
preserve_srcdir = True
def initialize(self):
self.job.require_gcc()
self.job.setup_dep(['libaio'])
ldflags = '-L ' + self.autodir + '/deps/libaio/lib'
cflags = '-I ' + self.autodir + '/deps/libaio/include'
self.gcc_flags = ldflags + ' ' + cflags
def setup(self):
os.chdir(self.srcdir)
utils.make('"CFLAGS=' + self.gcc_flags + '"')
def execute(self, args = ''):
os.chdir(self.tmpdir)
libs = self.autodir + '/deps/libaio/lib/'
ld_path = utils.prepend_path(libs,
utils.environ('LD_LIBRARY_PATH'))
var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
for test in tests:
cmd = self.srcdir + '/' + test[name] + ' ' + args + ' ' \
+ test[arglist]
utils.system(var_ld_path + ' ' + cmd)
| gpl-2.0 | 8,529,738,110,963,801,000 | 31.804878 | 78 | 0.524164 | false |
nurmd2/nurmd | addons/google_spreadsheet/google_spreadsheet.py | 41 | 4717 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
import cgi
import json
import logging
from lxml import etree
import re
import werkzeug.urls
import urllib2
from openerp.osv import osv
from openerp.addons.google_account import TIMEOUT
_logger = logging.getLogger(__name__)
class config(osv.osv):
_inherit = 'google.drive.config'
def get_google_scope(self):
scope = super(config, self).get_google_scope()
return '%s https://spreadsheets.google.com/feeds' % scope
def write_config_formula(self, cr, uid, attachment_id, spreadsheet_key, model, domain, groupbys, view_id, context=None):
access_token = self.get_access_token(cr, uid, scope='https://spreadsheets.google.com/feeds', context=context)
fields = self.pool.get(model).fields_view_get(cr, uid, view_id=view_id, view_type='tree')
doc = etree.XML(fields.get('arch'))
display_fields = []
for node in doc.xpath("//field"):
if node.get('modifiers'):
modifiers = json.loads(node.get('modifiers'))
if not modifiers.get('invisible') and not modifiers.get('tree_invisible'):
display_fields.append(node.get('name'))
fields = " ".join(display_fields)
domain = domain.replace("'", r"\'").replace('"', "'")
if groupbys:
fields = "%s %s" % (groupbys, fields)
formula = '=oe_read_group("%s";"%s";"%s";"%s")' % (model, fields, groupbys, domain)
else:
formula = '=oe_browse("%s";"%s";"%s")' % (model, fields, domain)
url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
dbname = cr.dbname
user = self.pool['res.users'].read(cr, uid, [uid], ['login', 'password'], context=context)[0]
username = user['login']
password = user['password']
if not password:
config_formula = '=oe_settings("%s";"%s")' % (url, dbname)
else:
config_formula = '=oe_settings("%s";"%s";"%s";"%s")' % (url, dbname, username, password)
request = '''<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:batch="http://schemas.google.com/gdata/batch"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<id>https://spreadsheets.google.com/feeds/cells/{key}/od6/private/full</id>
<entry>
<batch:id>A1</batch:id>
<batch:operation type="update"/>
<id>https://spreadsheets.google.com/feeds/cells/{key}/od6/private/full/R1C1</id>
<link rel="edit" type="application/atom+xml"
href="https://spreadsheets.google.com/feeds/cells/{key}/od6/private/full/R1C1"/>
<gs:cell row="1" col="1" inputValue="{formula}"/>
</entry>
<entry>
<batch:id>A2</batch:id>
<batch:operation type="update"/>
<id>https://spreadsheets.google.com/feeds/cells/{key}/od6/private/full/R60C15</id>
<link rel="edit" type="application/atom+xml"
href="https://spreadsheets.google.com/feeds/cells/{key}/od6/private/full/R60C15"/>
<gs:cell row="60" col="15" inputValue="{config}"/>
</entry>
</feed>''' .format(key=spreadsheet_key, formula=cgi.escape(formula, quote=True), config=cgi.escape(config_formula, quote=True))
try:
req = urllib2.Request(
'https://spreadsheets.google.com/feeds/cells/%s/od6/private/full/batch?%s' % (spreadsheet_key, werkzeug.url_encode({'v': 3, 'access_token': access_token})),
data=request,
headers={'content-type': 'application/atom+xml', 'If-Match': '*'})
urllib2.urlopen(req, timeout=TIMEOUT)
except (urllib2.HTTPError, urllib2.URLError):
_logger.warning("An error occured while writting the formula on the Google Spreadsheet.")
description = '''
formula: %s
''' % formula
if attachment_id:
self.pool['ir.attachment'].write(cr, uid, attachment_id, {'description': description}, context=context)
return True
def set_spreadsheet(self, cr, uid, model, domain, groupbys, view_id, context=None):
try:
config_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'google_spreadsheet', 'google_spreadsheet_template')[1]
except ValueError:
raise
config = self.browse(cr, uid, config_id, context=context)
title = 'Spreadsheet %s' % model
res = self.copy_doc(cr, uid, False, config.google_drive_resource_id, title, model, context=context)
mo = re.search("(key=|/d/)([A-Za-z0-9-_]+)", res['url'])
if mo:
key = mo.group(2)
self.write_config_formula(cr, uid, res.get('id'), key, model, domain, groupbys, view_id, context=context)
return res
| gpl-3.0 | -5,975,585,331,864,059,000 | 45.245098 | 172 | 0.61607 | false |
Fireblend/chromium-crosswalk | tools/telemetry/telemetry/util/find_dependencies.py | 5 | 9310 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import imp
import logging
import modulefinder
import optparse
import os
import sys
import zipfile
from telemetry import benchmark
from telemetry.core import command_line
from telemetry.core import discover
from telemetry.util import bootstrap
from telemetry.util import cloud_storage
from telemetry.util import path
from telemetry.util import path_set
DEPS_FILE = 'bootstrap_deps'
def FindBootstrapDependencies(base_dir):
deps_file = os.path.join(base_dir, DEPS_FILE)
if not os.path.exists(deps_file):
return []
deps_paths = bootstrap.ListAllDepsPaths(deps_file)
return set(os.path.realpath(os.path.join(
path.GetChromiumSrcDir(), os.pardir, deps_path))
for deps_path in deps_paths)
def FindPythonDependencies(module_path):
logging.info('Finding Python dependencies of %s' % module_path)
# Load the module to inherit its sys.path modifications.
imp.load_source(
os.path.splitext(os.path.basename(module_path))[0], module_path)
# Analyze the module for its imports.
finder = modulefinder.ModuleFinder()
finder.run_script(module_path)
# Filter for only imports in Chromium.
for module in finder.modules.itervalues():
# If it's an __init__.py, module.__path__ gives the package's folder.
module_path = module.__path__[0] if module.__path__ else module.__file__
if not module_path:
continue
module_path = os.path.realpath(module_path)
if not path.IsSubpath(module_path, path.GetChromiumSrcDir()):
continue
yield module_path
def FindPageSetDependencies(base_dir):
logging.info('Finding page sets in %s' % base_dir)
# Add base_dir to path so our imports relative to base_dir will work.
sys.path.append(base_dir)
tests = discover.DiscoverClasses(base_dir, base_dir, benchmark.Benchmark,
index_by_class_name=True)
for test_class in tests.itervalues():
test_obj = test_class()
# Ensure the test's default options are set if needed.
parser = optparse.OptionParser()
test_obj.AddCommandLineArgs(parser, None)
options = optparse.Values()
for k, v in parser.get_default_values().__dict__.iteritems():
options.ensure_value(k, v)
# Page set paths are relative to their runner script, not relative to us.
path.GetBaseDir = lambda: base_dir
# TODO: Loading the page set will automatically download its Cloud Storage
# deps. This is really expensive, and we don't want to do this by default.
page_set = test_obj.CreatePageSet(options)
# Add all of its serving_dirs as dependencies.
for serving_dir in page_set.serving_dirs:
yield serving_dir
def FindExcludedFiles(files, options):
# Define some filters for files.
def IsHidden(path_string):
for pathname_component in path_string.split(os.sep):
if pathname_component.startswith('.'):
return True
return False
def IsPyc(path_string):
return os.path.splitext(path_string)[1] == '.pyc'
def IsInCloudStorage(path_string):
return os.path.exists(path_string + '.sha1')
def MatchesExcludeOptions(path_string):
for pattern in options.exclude:
if (fnmatch.fnmatch(path_string, pattern) or
fnmatch.fnmatch(os.path.basename(path_string), pattern)):
return True
return False
# Collect filters we're going to use to exclude files.
exclude_conditions = [
IsHidden,
IsPyc,
IsInCloudStorage,
MatchesExcludeOptions,
]
# Check all the files against the filters.
for file_path in files:
if any(condition(file_path) for condition in exclude_conditions):
yield file_path
def FindDependencies(target_paths, options):
# Verify arguments.
for target_path in target_paths:
if not os.path.exists(target_path):
raise ValueError('Path does not exist: %s' % target_path)
dependencies = path_set.PathSet()
# Including Telemetry's major entry points will (hopefully) include Telemetry
# and all its dependencies. If the user doesn't pass any arguments, we just
# have Telemetry.
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path.GetTelemetryDir(), 'telemetry', 'benchmark_runner.py')))
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path.GetTelemetryDir(),
'telemetry', 'unittest_util', 'run_tests.py')))
dependencies |= FindBootstrapDependencies(path.GetTelemetryDir())
# Add dependencies.
for target_path in target_paths:
base_dir = os.path.dirname(os.path.realpath(target_path))
dependencies.add(base_dir)
dependencies |= FindBootstrapDependencies(base_dir)
dependencies |= FindPythonDependencies(target_path)
if options.include_page_set_data:
dependencies |= FindPageSetDependencies(base_dir)
# Remove excluded files.
dependencies -= FindExcludedFiles(set(dependencies), options)
return dependencies
def ZipDependencies(target_paths, dependencies, options):
base_dir = os.path.dirname(os.path.realpath(path.GetChromiumSrcDir()))
with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file:
# Add dependencies to archive.
for dependency_path in dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(dependency_path, base_dir))
zip_file.write(dependency_path, path_in_archive)
# Add symlinks to executable paths, for ease of use.
for target_path in target_paths:
link_info = zipfile.ZipInfo(
os.path.join('telemetry', os.path.basename(target_path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0100777 << 16 # Octal.
relative_path = os.path.relpath(target_path, base_dir)
link_script = (
'#!/usr/bin/env python\n\n'
'import os\n'
'import sys\n\n\n'
'script = os.path.join(os.path.dirname(__file__), \'%s\')\n'
'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])'
% relative_path)
zip_file.writestr(link_info, link_script)
# Add gsutil to the archive, if it's available. The gsutil in
# depot_tools is modified to allow authentication using prodaccess.
# TODO: If there's a gsutil in telemetry/third_party/, bootstrap_deps
# will include it. Then there will be two copies of gsutil at the same
# location in the archive. This can be confusing for users.
gsutil_path = os.path.realpath(cloud_storage.FindGsutil())
if cloud_storage.SupportsProdaccess(gsutil_path):
gsutil_base_dir = os.path.join(os.path.dirname(gsutil_path), os.pardir)
gsutil_dependencies = path_set.PathSet()
gsutil_dependencies.add(os.path.dirname(gsutil_path))
# Also add modules from depot_tools that are needed by gsutil.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'boto'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'fancy_urllib'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'retry_decorator'))
gsutil_dependencies -= FindExcludedFiles(
set(gsutil_dependencies), options)
# Also add upload.py to the archive from depot_tools, if it is available.
# This allows us to post patches without requiring a full depot_tools
# install. There's no real point in including upload.py if we do not
# also have gsutil, which is why this is inside the gsutil block.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'upload.py'))
for dependency_path in gsutil_dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(path.GetTelemetryDir(), base_dir),
'third_party', os.path.relpath(dependency_path, gsutil_base_dir))
zip_file.write(dependency_path, path_in_archive)
class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""
@classmethod
def AddCommandLineArgs(cls, parser, _):
parser.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed).')
parser.add_option(
'-p', '--include-page-set-data', action='store_true', default=False,
help='Scan tests for page set data and include them.')
parser.add_option(
'-e', '--exclude', action='append', default=[],
help='Exclude paths matching EXCLUDE. Can be used multiple times.')
parser.add_option(
'-z', '--zip',
help='Store files in a zip archive at ZIP.')
@classmethod
def ProcessCommandLineArgs(cls, parser, args, _):
if args.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
def Run(self, args):
target_paths = args.positional_args
dependencies = FindDependencies(target_paths, args)
if args.zip:
ZipDependencies(target_paths, dependencies, args)
print 'Zip archive written to %s.' % args.zip
else:
print '\n'.join(sorted(dependencies))
return 0
| bsd-3-clause | -1,086,916,298,362,818,400 | 35.944444 | 79 | 0.694737 | false |
DrMeers/django | tests/order_with_respect_to/tests.py | 21 | 2849 | from __future__ import unicode_literals
from operator import attrgetter
from django.test import TestCase
from .models import Post, Question, Answer
class OrderWithRespectToTests(TestCase):
def test_basic(self):
q1 = Question.objects.create(text="Which Beatle starts with the letter 'R'?")
q2 = Question.objects.create(text="What is your name?")
Answer.objects.create(text="John", question=q1)
Answer.objects.create(text="Jonno", question=q2)
Answer.objects.create(text="Paul", question=q1)
Answer.objects.create(text="Paulo", question=q2)
Answer.objects.create(text="George", question=q1)
Answer.objects.create(text="Ringo", question=q1)
# The answers will always be ordered in the order they were inserted.
self.assertQuerysetEqual(
q1.answer_set.all(), [
"John", "Paul", "George", "Ringo",
],
attrgetter("text"),
)
# We can retrieve the answers related to a particular object, in the
# order they were created, once we have a particular object.
a1 = Answer.objects.filter(question=q1)[0]
self.assertEqual(a1.text, "John")
a2 = a1.get_next_in_order()
self.assertEqual(a2.text, "Paul")
a4 = list(Answer.objects.filter(question=q1))[-1]
self.assertEqual(a4.text, "Ringo")
self.assertEqual(a4.get_previous_in_order().text, "George")
# Determining (and setting) the ordering for a particular item is also
# possible.
id_list = [o.pk for o in q1.answer_set.all()]
self.assertEqual(a2.question.get_answer_order(), id_list)
a5 = Answer.objects.create(text="Number five", question=q1)
# It doesn't matter which answer we use to check the order, it will
# always be the same.
self.assertEqual(
a2.question.get_answer_order(), a5.question.get_answer_order()
)
# The ordering can be altered:
id_list = [o.pk for o in q1.answer_set.all()]
x = id_list.pop()
id_list.insert(-1, x)
self.assertNotEqual(a5.question.get_answer_order(), id_list)
a5.question.set_answer_order(id_list)
self.assertQuerysetEqual(
q1.answer_set.all(), [
"John", "Paul", "George", "Number five", "Ringo"
],
attrgetter("text")
)
def test_recursive_ordering(self):
p1 = Post.objects.create(title='1')
p2 = Post.objects.create(title='2')
p1_1 = Post.objects.create(title="1.1", parent=p1)
p1_2 = Post.objects.create(title="1.2", parent=p1)
Post.objects.create(title="2.1", parent=p2)
p1_3 = Post.objects.create(title="1.3", parent=p1)
self.assertEqual(p1.get_post_order(), [p1_1.pk, p1_2.pk, p1_3.pk])
| bsd-3-clause | -7,001,601,081,572,186,000 | 38.027397 | 85 | 0.609688 | false |
hackerbot/DjangoDev | tests/postgres_tests/test_hstore.py | 5 | 7364 | import json
from django.core import exceptions, serializers
from . import PostgresSQLTestCase
from .models import HStoreModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import HStoreField
from django.contrib.postgres.validators import KeysValidator
except ImportError:
pass
class SimpleTests(PostgresSQLTestCase):
apps = ['django.contrib.postgres']
def test_save_load_success(self):
value = {'a': 'b'}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
def test_null(self):
instance = HStoreModel(field=None)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, None)
def test_value_null(self):
value = {'a': None}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
class TestQuerying(PostgresSQLTestCase):
def setUp(self):
self.objs = [
HStoreModel.objects.create(field={'a': 'b'}),
HStoreModel.objects.create(field={'a': 'b', 'c': 'd'}),
HStoreModel.objects.create(field={'c': 'd'}),
HStoreModel.objects.create(field={}),
HStoreModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__exact={'a': 'b'}),
self.objs[:1]
)
def test_contained_by(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contained_by={'a': 'b', 'c': 'd'}),
self.objs[:4]
)
def test_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contains={'a': 'b'}),
self.objs[:2]
)
def test_has_key(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_key='c'),
self.objs[1:3]
)
def test_has_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_keys=['a', 'c']),
self.objs[1:2]
)
def test_key_transform(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a='b'),
self.objs[:2]
)
def test_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys=['a']),
self.objs[:1]
)
def test_values(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values=['b']),
self.objs[:1]
)
def test_field_chaining(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__contains='b'),
self.objs[:2]
)
def test_keys_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys__contains=['a']),
self.objs[:2]
)
def test_values_overlap(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values__overlap=['b', 'd']),
self.objs[:3]
)
class TestSerialization(PostgresSQLTestCase):
test_data = '[{"fields": {"field": "{\\"a\\": \\"b\\"}"}, "model": "postgres_tests.hstoremodel", "pk": null}]'
def test_dumping(self):
instance = HStoreModel(field={'a': 'b'})
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b'})
class TestValidation(PostgresSQLTestCase):
def test_not_a_string(self):
field = HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean({'a': 1}, None)
self.assertEqual(cm.exception.code, 'not_a_string')
self.assertEqual(cm.exception.message % cm.exception.params, 'The value of "a" is not a string.')
class TestFormField(PostgresSQLTestCase):
def test_valid(self):
field = forms.HStoreField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_invalid_json(self):
field = forms.HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{"a": "b"')
self.assertEqual(cm.exception.messages[0], 'Could not load JSON data.')
self.assertEqual(cm.exception.code, 'invalid_json')
def test_not_string_values(self):
field = forms.HStoreField()
value = field.clean('{"a": 1}')
self.assertEqual(value, {'a': '1'})
def test_empty(self):
field = forms.HStoreField(required=False)
value = field.clean('')
self.assertEqual(value, {})
def test_model_field_formfield(self):
model_field = HStoreField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.HStoreField)
class TestValidator(PostgresSQLTestCase):
def test_simple_valid(self):
validator = KeysValidator(keys=['a', 'b'])
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
def test_missing_keys(self):
validator = KeysValidator(keys=['a', 'b'])
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some keys were missing: b')
self.assertEqual(cm.exception.code, 'missing_keys')
def test_strict_valid(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
validator({'a': 'foo', 'b': 'bar'})
def test_extra_keys(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_custom_messages(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Foobar')
self.assertEqual(cm.exception.code, 'missing_keys')
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_deconstruct(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
path, args, kwargs = validator.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.validators.KeysValidator')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'keys': ['a', 'b'], 'strict': True, 'messages': messages})
| bsd-3-clause | -2,227,871,661,116,768,300 | 32.935484 | 114 | 0.601711 | false |
decebel/dataAtom_alpha | bin/plug/py/external/pattern/vector/svm/svm.py | 1 | 9178 | #!/usr/bin/env python
from ctypes import *
from ctypes.util import find_library
import sys
import os
# For unix the prefix 'lib' is not considered.
if find_library('svm'):
libsvm = CDLL(find_library('svm'))
elif find_library('libsvm'):
libsvm = CDLL(find_library('libsvm'))
else:
for i, binary in enumerate((
# If you have OS X 32-bit, you need a 32-bit Python and libsvm-mac32.so.
# If you have OS X 32-bit with 64-bit Python,
# it will try to load libsvm-mac64.so which fails since OS X is 32-bit.
# It won't load libsvm-mac32.so since Python is 64-bit.
"libsvm-win32.dll", # 1) 32-bit Windows
"libsvm-mac32.so", # 2) 32-bit Mac OS X
"libsvm-mac64.so", # 3) 64-bit Mac OS X
"libsvm-ubuntu64.so")): # 4) 64-bit Linux Ubuntu
try:
libsvm = CDLL(os.path.join(os.path.dirname(__file__), binary)); break
except OSError, e:
if i == 4-1: # <= Adjust according to available binaries.
raise ImportError, "can't import libsvm (%sbit-%s)" % (
sizeof(c_voidp) * 8,
sys.platform)
# Construct constants
SVM_TYPE = ['C_SVC', 'NU_SVC', 'ONE_CLASS', 'EPSILON_SVR', 'NU_SVR' ]
KERNEL_TYPE = ['LINEAR', 'POLY', 'RBF', 'SIGMOID', 'PRECOMPUTED']
for i, s in enumerate(SVM_TYPE): exec("%s = %d" % (s , i))
for i, s in enumerate(KERNEL_TYPE): exec("%s = %d" % (s , i))
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return
def genFields(names, types):
return list(zip(names, types))
def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes
class svm_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def gen_svm_nodearray(xi, feature_max=None, issparse=None):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
index_range = range(len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')
if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if issparse:
index_range = filter(lambda j:xi[j] != 0, index_range)
index_range = sorted(index_range)
ret = (svm_node * (len(index_range)+1))()
ret[-1].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range:
max_idx = index_range[-1]
return ret, max_idx
class svm_problem(Structure):
_names = ["l", "y", "x"]
_types = [c_int, POINTER(c_double), POINTER(POINTER(svm_node))]
_fields_ = genFields(_names, _types)
def __init__(self, y, x):
if len(y) != len(x):
raise ValueError("len(y) != len(x)")
self.l = l = len(y)
max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_svm_nodearray(xi)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx
self.y = (c_double * l)()
for i, yi in enumerate(y): self.y[i] = yi
self.x = (POINTER(svm_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi
class svm_parameter(Structure):
_names = ["svm_type", "kernel_type", "degree", "gamma", "coef0",
"cache_size", "eps", "C", "nr_weight", "weight_label", "weight",
"nu", "p", "shrinking", "probability"]
_types = [c_int, c_int, c_int, c_double, c_double,
c_double, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double),
c_double, c_double, c_int, c_int]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)
def show(self):
attrs = svm_parameter._names + self.__dict__.keys()
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
print(' %s: %s' % (attr, val))
def set_to_default_values(self):
self.svm_type = C_SVC;
self.kernel_type = RBF
self.degree = 3
self.gamma = 0
self.coef0 = 0
self.nu = 0.5
self.cache_size = 100
self.C = 1
self.eps = 0.001
self.p = 0.1
self.shrinking = 1
self.probability = 0
self.nr_weight = 0
self.weight_label = (c_int*0)()
self.weight = (c_double*0)()
self.cross_validation = False
self.nr_fold = 0
self.print_func = None
def parse_options(self, options):
argv = options.split()
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while i < len(argv):
if argv[i] == "-s":
i = i + 1
self.svm_type = int(argv[i])
elif argv[i] == "-t":
i = i + 1
self.kernel_type = int(argv[i])
elif argv[i] == "-d":
i = i + 1
self.degree = int(argv[i])
elif argv[i] == "-g":
i = i + 1
self.gamma = float(argv[i])
elif argv[i] == "-r":
i = i + 1
self.coef0 = float(argv[i])
elif argv[i] == "-n":
i = i + 1
self.nu = float(argv[i])
elif argv[i] == "-m":
i = i + 1
self.cache_size = float(argv[i])
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-p":
i = i + 1
self.p = float(argv[i])
elif argv[i] == "-h":
i = i + 1
self.shrinking = int(argv[i])
elif argv[i] == "-b":
i = i + 1
self.probability = int(argv[i])
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
elif argv[i] == "-v":
i = i + 1
self.cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2:
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
nr_weight = self.nr_weight
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
else:
raise ValueError("Wrong options")
i += 1
libsvm.svm_set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
class svm_model(Structure):
_names = ['param', 'nr_class', 'l', 'SV', 'sv_coef', 'rho',
'probA', 'probB', 'label', 'nSV', 'free_sv']
_types = [svm_parameter, c_int, c_int, POINTER(POINTER(svm_node)),
POINTER(POINTER(c_double)), POINTER(c_double),
POINTER(c_double), POINTER(c_double), POINTER(c_int),
POINTER(c_int), c_int]
_fields_ = genFields(_names, _types)
def __init__(self):
self.__createfrom__ = 'python'
def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
try: libsvm.svm_free_and_destroy_model(pointer(self))
except:
pass
def get_svm_type(self):
return libsvm.svm_get_svm_type(self)
def get_nr_class(self):
return libsvm.svm_get_nr_class(self)
def get_svr_probability(self):
return libsvm.svm_get_svr_probability(self)
def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
libsvm.svm_get_labels(self, labels)
return labels[:nr_class]
def is_probability_model(self):
return (libsvm.svm_check_probability_model(self) == 1)
def get_sv_coef(self):
return [tuple(self.sv_coef[j][i] for j in xrange(self.nr_class - 1))
for i in xrange(self.l)]
def get_SV(self):
result = []
for sparse_sv in self.SV[:self.l]:
row = dict()
i = 0
while True:
row[sparse_sv[i].index] = sparse_sv[i].value
if sparse_sv[i].index == -1:
break
i += 1
result.append(row)
return result
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> svm_model
Convert a ctypes POINTER(svm_model) to a Python svm_model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
fillprototype(libsvm.svm_train, POINTER(svm_model), [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_cross_validation, None, [POINTER(svm_problem), POINTER(svm_parameter), c_int, POINTER(c_double)])
fillprototype(libsvm.svm_save_model, c_int, [c_char_p, POINTER(svm_model)])
fillprototype(libsvm.svm_load_model, POINTER(svm_model), [c_char_p])
fillprototype(libsvm.svm_get_svm_type, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_nr_class, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_labels, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_svr_probability, c_double, [POINTER(svm_model)])
fillprototype(libsvm.svm_predict_values, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_predict, c_double, [POINTER(svm_model), POINTER(svm_node)])
fillprototype(libsvm.svm_predict_probability, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_free_model_content, None, [POINTER(svm_model)])
fillprototype(libsvm.svm_free_and_destroy_model, None, [POINTER(POINTER(svm_model))])
fillprototype(libsvm.svm_destroy_param, None, [POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_parameter, c_char_p, [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_probability_model, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_set_print_string_function, None, [PRINT_STRING_FUN])
| apache-2.0 | -6,594,749,808,247,385,000 | 29.491694 | 122 | 0.635759 | false |
Tokyo-Buffalo/tokyosouth | env/lib/python3.6/site-packages/pyasn1_modules/rfc2459.py | 72 | 44828 | #
# X.509 message syntax
#
# ASN.1 source from:
# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/x509.asn
# http://www.ietf.org/rfc/rfc2459.txt
#
# Sample captures from:
# http://wiki.wireshark.org/SampleCaptures/
#
from pyasn1.type import tag,namedtype,namedval,univ,constraint,char,useful
MAX = 64 # XXX ?
#
# PKIX1Explicit88
#
# Upper Bounds
ub_name = univ.Integer(32768)
ub_common_name = univ.Integer(64)
ub_locality_name = univ.Integer(128)
ub_state_name = univ.Integer(128)
ub_organization_name = univ.Integer(64)
ub_organizational_unit_name = univ.Integer(64)
ub_title = univ.Integer(64)
ub_match = univ.Integer(128)
ub_emailaddress_length = univ.Integer(128)
ub_common_name_length = univ.Integer(64)
ub_country_name_alpha_length = univ.Integer(2)
ub_country_name_numeric_length = univ.Integer(3)
ub_domain_defined_attributes = univ.Integer(4)
ub_domain_defined_attribute_type_length = univ.Integer(8)
ub_domain_defined_attribute_value_length = univ.Integer(128)
ub_domain_name_length = univ.Integer(16)
ub_extension_attributes = univ.Integer(256)
ub_e163_4_number_length = univ.Integer(15)
ub_e163_4_sub_address_length = univ.Integer(40)
ub_generation_qualifier_length = univ.Integer(3)
ub_given_name_length = univ.Integer(16)
ub_initials_length = univ.Integer(5)
ub_integer_options = univ.Integer(256)
ub_numeric_user_id_length = univ.Integer(32)
ub_organization_name_length = univ.Integer(64)
ub_organizational_unit_name_length = univ.Integer(32)
ub_organizational_units = univ.Integer(4)
ub_pds_name_length = univ.Integer(16)
ub_pds_parameter_length = univ.Integer(30)
ub_pds_physical_address_lines = univ.Integer(6)
ub_postal_code_length = univ.Integer(16)
ub_surname_length = univ.Integer(40)
ub_terminal_id_length = univ.Integer(24)
ub_unformatted_address_length = univ.Integer(180)
ub_x121_address_length = univ.Integer(16)
class UniversalString(char.UniversalString): pass
class BMPString(char.BMPString): pass
class UTF8String(char.UTF8String): pass
id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
id_qt = univ.ObjectIdentifier('1.3.6.1.5.5.7.2')
id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
id_ad = univ.ObjectIdentifier('1.3.6.1.5.5.7.48')
id_qt_cps = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.1')
id_qt_unotice = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.2')
id_ad_ocsp = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.1')
id_ad_caIssuers = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.2')
class AttributeValue(univ.Any): pass
class AttributeType(univ.ObjectIdentifier): pass
class AttributeTypeAndValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('value', AttributeValue())
)
class Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
)
id_at = univ.ObjectIdentifier('2.5.4')
id_at_name = univ.ObjectIdentifier('2.5.4.41')
# preserve misspelled variable for compatibility
id_at_sutname = id_at_surname = univ.ObjectIdentifier('2.5.4.4')
id_at_givenName = univ.ObjectIdentifier('2.5.4.42')
id_at_initials = univ.ObjectIdentifier('2.5.4.43')
id_at_generationQualifier = univ.ObjectIdentifier('2.5.4.44')
class X520name(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
)
id_at_commonName = univ.ObjectIdentifier('2.5.4.3')
class X520CommonName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
)
id_at_localityName = univ.ObjectIdentifier('2.5.4.7')
class X520LocalityName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
)
id_at_stateOrProvinceName = univ.ObjectIdentifier('2.5.4.8')
class X520StateOrProvinceName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
)
id_at_organizationName = univ.ObjectIdentifier('2.5.4.10')
class X520OrganizationName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
)
id_at_organizationalUnitName = univ.ObjectIdentifier('2.5.4.11')
class X520OrganizationalUnitName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
)
id_at_title = univ.ObjectIdentifier('2.5.4.12')
class X520Title(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('printableString', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('universalString', char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
)
id_at_dnQualifier = univ.ObjectIdentifier('2.5.4.46')
class X520dnQualifier(char.PrintableString): pass
id_at_countryName = univ.ObjectIdentifier('2.5.4.6')
class X520countryName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(2, 2)
pkcs_9 = univ.ObjectIdentifier('1.2.840.113549.1.9')
emailAddress = univ.ObjectIdentifier('1.2.840.113549.1.9.1')
class Pkcs9email(char.IA5String):
subtypeSpec = char.IA5String.subtypeSpec + constraint.ValueSizeConstraint(1, ub_emailaddress_length)
# ----
class DSAPrivateKey(univ.Sequence):
"""PKIX compliant DSA private key structure"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 0)))),
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('g', univ.Integer()),
namedtype.NamedType('public', univ.Integer()),
namedtype.NamedType('private', univ.Integer())
)
# ----
class RelativeDistinguishedName(univ.SetOf):
componentType = AttributeTypeAndValue()
class RDNSequence(univ.SequenceOf):
componentType = RelativeDistinguishedName()
class Name(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('', RDNSequence())
)
class DirectoryString(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('printableString', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('universalString', char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) # hm, this should not be here!? XXX
)
# certificate and CRL specific structures begin here
class AlgorithmIdentifier(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('parameters', univ.Any())
)
class Extension(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extnID', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
namedtype.NamedType('extnValue', univ.Any())
)
class Extensions(univ.SequenceOf):
componentType = Extension()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class SubjectPublicKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', AlgorithmIdentifier()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
class UniqueIdentifier(univ.BitString): pass
class Time(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('utcTime', useful.UTCTime()),
namedtype.NamedType('generalTime', useful.GeneralizedTime())
)
class Validity(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('notBefore', Time()),
namedtype.NamedType('notAfter', Time())
)
class CertificateSerialNumber(univ.Integer): pass
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('v1', 0), ('v2', 1), ('v3', 2)
)
class TBSCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('serialNumber', CertificateSerialNumber()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('validity', Validity()),
namedtype.NamedType('subject', Name()),
namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('extensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class Certificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificate', TBSCertificate()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signatureValue', univ.BitString())
)
# CRL structures
class RevokedCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('userCertificate', CertificateSerialNumber()),
namedtype.NamedType('revocationDate', Time()),
namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
)
class TBSCertList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('version', Version()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('thisUpdate', Time()),
namedtype.OptionalNamedType('nextUpdate', Time()),
namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=RevokedCertificate())),
namedtype.OptionalNamedType('crlExtensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class CertificateList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertList', TBSCertList()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
# Algorithm OIDs and parameter structures
pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3')
class Dss_Sig_Value(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('r', univ.Integer()),
namedtype.NamedType('s', univ.Integer())
)
dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1')
class ValidationParms(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('seed', univ.BitString()),
namedtype.NamedType('pgenCounter', univ.Integer())
)
class DomainParameters(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('g', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('j', univ.Integer()),
namedtype.OptionalNamedType('validationParms', ValidationParms())
)
id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1')
class Dss_Parms(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('g', univ.Integer())
)
# x400 address syntax starts here
teletex_domain_defined_attributes = univ.Integer(6)
class TeletexDomainDefinedAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.TeletexString())
)
class TeletexDomainDefinedAttributes(univ.SequenceOf):
componentType = TeletexDomainDefinedAttribute()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
terminal_type = univ.Integer(23)
class TerminalType(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, ub_integer_options)
namedValues = namedval.NamedValues(
('telex', 3),
('teletelex', 4),
('g3-facsimile', 5),
('g4-facsimile', 6),
('ia5-terminal', 7),
('videotex', 8)
)
class PresentationAddress(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3), subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
)
extended_network_address = univ.Integer(22)
class E163_4_address(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('number', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ExtendedNetworkAddress(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('e163-4-address', E163_4_address()),
namedtype.NamedType('psap-address', PresentationAddress().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class PDSParameter(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
)
local_postal_attributes = univ.Integer(21)
class LocalPostalAttributes(PDSParameter): pass
class UniquePostalName(PDSParameter): pass
unique_postal_name = univ.Integer(20)
poste_restante_address = univ.Integer(19)
class PosteRestanteAddress(PDSParameter): pass
post_office_box_address = univ.Integer(18)
class PostOfficeBoxAddress(PDSParameter): pass
street_address = univ.Integer(17)
class StreetAddress(PDSParameter): pass
class UnformattedPostalAddress(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)).subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_physical_address_lines)))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
)
physical_delivery_office_name = univ.Integer(10)
class PhysicalDeliveryOfficeName(PDSParameter): pass
physical_delivery_office_number = univ.Integer(11)
class PhysicalDeliveryOfficeNumber(PDSParameter): pass
extension_OR_address_components = univ.Integer(12)
class ExtensionORAddressComponents(PDSParameter): pass
physical_delivery_personal_name = univ.Integer(13)
class PhysicalDeliveryPersonalName(PDSParameter): pass
physical_delivery_organization_name = univ.Integer(14)
class PhysicalDeliveryOrganizationName(PDSParameter): pass
extension_physical_delivery_address_components = univ.Integer(15)
class ExtensionPhysicalDeliveryAddressComponents(PDSParameter): pass
unformatted_postal_address = univ.Integer(16)
postal_code = univ.Integer(9)
class PostalCode(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric-code', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
namedtype.NamedType('printable-code', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
)
class PhysicalDeliveryCountryName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
class PDSName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_pds_name_length)
physical_delivery_country_name = univ.Integer(8)
class TeletexOrganizationalUnitName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
pds_name = univ.Integer(7)
teletex_organizational_unit_names = univ.Integer(5)
class TeletexOrganizationalUnitNames(univ.SequenceOf):
componentType = TeletexOrganizationalUnitName()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
teletex_personal_name = univ.Integer(4)
class TeletexPersonalName(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
teletex_organization_name = univ.Integer(3)
class TeletexOrganizationName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
teletex_common_name = univ.Integer(2)
class TeletexCommonName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
class CommonName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
common_name = univ.Integer(1)
class ExtensionAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, ub_extension_attributes), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('extension-attribute-value', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ExtensionAttributes(univ.SetOf):
componentType = ExtensionAttribute()
subtypeSpec = univ.SetOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_extension_attributes)
class BuiltInDomainDefinedAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
)
class BuiltInDomainDefinedAttributes(univ.SequenceOf):
componentType = BuiltInDomainDefinedAttribute()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
class OrganizationalUnitName(char.PrintableString):
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
class OrganizationalUnitNames(univ.SequenceOf):
componentType = OrganizationalUnitName()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
class PersonalName(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class NumericUserIdentifier(char.NumericString):
subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
class OrganizationName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
class PrivateDomainName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
)
class TerminalIdentifier(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_terminal_id_length)
class X121Address(char.NumericString):
subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_x121_address_length)
class NetworkAddress(X121Address): pass
class AdministrationDomainName(univ.Choice):
tagSet = univ.Choice.tagSet.tagExplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
)
class CountryName(univ.Choice):
tagSet = univ.Choice.tagSet.tagExplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
class BuiltInStandardAttributes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('country-name', CountryName()),
namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('personal-name', PersonalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
)
class ORAddress(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
)
#
# PKIX1Implicit88
#
id_ce_invalidityDate = univ.ObjectIdentifier('2.5.29.24')
class InvalidityDate(useful.GeneralizedTime): pass
id_holdinstruction_none = univ.ObjectIdentifier('2.2.840.10040.2.1')
id_holdinstruction_callissuer = univ.ObjectIdentifier('2.2.840.10040.2.2')
id_holdinstruction_reject = univ.ObjectIdentifier('2.2.840.10040.2.3')
holdInstruction = univ.ObjectIdentifier('2.2.840.10040.2')
id_ce_holdInstructionCode = univ.ObjectIdentifier('2.5.29.23')
class HoldInstructionCode(univ.ObjectIdentifier): pass
id_ce_cRLReasons = univ.ObjectIdentifier('2.5.29.21')
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8)
)
id_ce_cRLNumber = univ.ObjectIdentifier('2.5.29.20')
class CRLNumber(univ.Integer):
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
class BaseCRLNumber(CRLNumber): pass
id_kp_serverAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.1')
id_kp_clientAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.2')
id_kp_codeSigning = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.3')
id_kp_emailProtection = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.4')
id_kp_ipsecEndSystem = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.5')
id_kp_ipsecTunnel = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.6')
id_kp_ipsecUser = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.7')
id_kp_timeStamping = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.8')
id_pe_authorityInfoAccess = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.1')
id_ce_extKeyUsage = univ.ObjectIdentifier('2.5.29.37')
class KeyPurposeId(univ.ObjectIdentifier): pass
class ExtKeyUsageSyntax(univ.SequenceOf):
componentType = KeyPurposeId()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class ReasonFlags(univ.BitString):
namedValues = namedval.NamedValues(
('unused', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6)
)
class SkipCerts(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
id_ce_policyConstraints = univ.ObjectIdentifier('2.5.29.36')
class PolicyConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('requireExplicitPolicy', SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('inhibitPolicyMapping', SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
id_ce_basicConstraints = univ.ObjectIdentifier('2.5.29.19')
class BasicConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('cA', univ.Boolean(False)),
namedtype.OptionalNamedType('pathLenConstraint', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
)
id_ce_subjectDirectoryAttributes = univ.ObjectIdentifier('2.5.29.9')
class SubjectDirectoryAttributes(univ.SequenceOf):
componentType = Attribute()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class EDIPartyName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('partyName', DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class AnotherName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type-id', univ.ObjectIdentifier()),
namedtype.NamedType('value', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class GeneralName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('otherName', AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('rfc822Name', char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('dNSName', char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('x400Address', ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('directoryName', Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.NamedType('ediPartyName', EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.NamedType('uniformResourceIdentifier', char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.NamedType('iPAddress', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
)
class GeneralNames(univ.SequenceOf):
componentType = GeneralName()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class AccessDescription(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
namedtype.NamedType('accessLocation', GeneralName())
)
class AuthorityInfoAccessSyntax(univ.SequenceOf):
componentType = AccessDescription()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_deltaCRLIndicator = univ.ObjectIdentifier('2.5.29.27')
class DistributionPointName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('fullName', GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class DistributionPoint(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class BaseDistance(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(0, MAX)
id_ce_cRLDistributionPoints = univ.ObjectIdentifier('2.5.29.31')
class CRLDistPointsSyntax(univ.SequenceOf):
componentType = DistributionPoint()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_issuingDistributionPoint = univ.ObjectIdentifier('2.5.29.28')
class IssuingDistributionPoint(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('onlyContainsUserCerts', univ.Boolean(False).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('onlyContainsCACerts', univ.Boolean(False).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('indirectCRL', univ.Boolean(False).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
)
class GeneralSubtree(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('base', GeneralName()),
namedtype.DefaultedNamedType('minimum', BaseDistance(0).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('maximum', BaseDistance().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class GeneralSubtrees(univ.SequenceOf):
componentType = GeneralSubtree()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_nameConstraints = univ.ObjectIdentifier('2.5.29.30')
class NameConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class DisplayText(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('visibleString', char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
)
class NoticeReference(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('organization', DisplayText()),
namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
)
class UserNotice(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('noticeRef', NoticeReference()),
namedtype.OptionalNamedType('explicitText', DisplayText())
)
class CPSuri(char.IA5String): pass
class PolicyQualifierId(univ.ObjectIdentifier):
subtypeSpec = univ.ObjectIdentifier.subtypeSpec + constraint.SingleValueConstraint(id_qt_cps, id_qt_unotice)
class CertPolicyId(univ.ObjectIdentifier): pass
class PolicyQualifierInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
namedtype.NamedType('qualifier', univ.Any())
)
id_ce_certificatePolicies = univ.ObjectIdentifier('2.5.29.32')
class PolicyInformation(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('policyIdentifier', CertPolicyId()),
namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class CertificatePolicies(univ.SequenceOf):
componentType = PolicyInformation()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_policyMappings = univ.ObjectIdentifier('2.5.29.33')
class PolicyMapping(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
)
class PolicyMappings(univ.SequenceOf):
componentType = PolicyMapping()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_privateKeyUsagePeriod = univ.ObjectIdentifier('2.5.29.16')
class PrivateKeyUsagePeriod(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_ce_keyUsage = univ.ObjectIdentifier('2.5.29.15')
class KeyUsage(univ.BitString):
namedValues = namedval.NamedValues(
('digitalSignature', 0),
('nonRepudiation', 1),
('keyEncipherment', 2),
('dataEncipherment', 3),
('keyAgreement', 4),
('keyCertSign', 5),
('cRLSign', 6),
('encipherOnly', 7),
('decipherOnly', 8)
)
id_ce = univ.ObjectIdentifier('2.5.29')
id_ce_authorityKeyIdentifier = univ.ObjectIdentifier('2.5.29.35')
class KeyIdentifier(univ.OctetString): pass
id_ce_subjectKeyIdentifier = univ.ObjectIdentifier('2.5.29.14')
class SubjectKeyIdentifier(KeyIdentifier): pass
class AuthorityKeyIdentifier(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
id_ce_certificateIssuer = univ.ObjectIdentifier('2.5.29.29')
class CertificateIssuer(GeneralNames): pass
id_ce_subjectAltName = univ.ObjectIdentifier('2.5.29.17')
class SubjectAltName(GeneralNames): pass
id_ce_issuerAltName = univ.ObjectIdentifier('2.5.29.18')
class IssuerAltName(GeneralNames): pass
| mit | -9,129,836,742,262,980,000 | 48.588496 | 278 | 0.756112 | false |
anorfleet/turntable | test/lib/python2.7/site-packages/scipy/interpolate/tests/test_fitpack2.py | 7 | 18719 | #!/usr/bin/env python
# Created by Pearu Peterson, June 2003
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, assert_raises, TestCase,
run_module_suite)
from numpy import array, diff, linspace, meshgrid, ones, pi, shape
from scipy.interpolate.fitpack import bisplrep, bisplev
from scipy.interpolate.fitpack2 import (UnivariateSpline,
LSQUnivariateSpline, InterpolatedUnivariateSpline,
LSQBivariateSpline, SmoothBivariateSpline, RectBivariateSpline,
LSQSphereBivariateSpline, SmoothSphereBivariateSpline,
RectSphereBivariateSpline)
class TestUnivariateSpline(TestCase):
def test_linear_constant(self):
x = [1,2,3]
y = [3,3,3]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[3,3])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2]),[3,3,3])
def test_preserve_shape(self):
x = [1, 2, 3]
y = [0, 2, 4]
lut = UnivariateSpline(x, y, k=1)
arg = 2
assert_equal(shape(arg), shape(lut(arg)))
assert_equal(shape(arg), shape(lut(arg, nu=1)))
arg = [1.5, 2, 2.5]
assert_equal(shape(arg), shape(lut(arg)))
assert_equal(shape(arg), shape(lut(arg, nu=1)))
def test_linear_1d(self):
x = [1,2,3]
y = [0,2,4]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[0,4])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2]),[0,1,2])
def test_subclassing(self):
# See #731
class ZeroSpline(UnivariateSpline):
def __call__(self, x):
return 0*array(x)
sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
assert_array_equal(sp([1.5, 2.5]), [0., 0.])
def test_empty_input(self):
# Test whether empty input returns an empty output. Ticket 1014
x = [1,3,5,7,9]
y = [0,4,9,12,21]
spl = UnivariateSpline(x, y, k=3)
assert_array_equal(spl([]), array([]))
def test_resize_regression(self):
"""Regression test for #1375."""
x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892,
-0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235,
0.65016502, 1.]
y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061,
0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223,
0.62928599, 1.]
w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02,
6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02,
6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02,
1.00000000e+12]
spl = UnivariateSpline(x=x, y=y, w=w, s=None)
desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344])
assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4)
def test_out_of_range_regression(self):
# Test different extrapolation modes. See ticket 3557
x = np.arange(5, dtype=np.float)
y = x**3
xp = linspace(-8, 13, 100)
xp_zeros = xp.copy()
xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0
xp_clip = xp.copy()
xp_clip[xp_clip < x[0]] = x[0]
xp_clip[xp_clip > x[-1]] = x[-1]
for cls in [UnivariateSpline, InterpolatedUnivariateSpline]:
spl = cls(x=x, y=y)
for ext in [0, 'extrapolate']:
assert_allclose(spl(xp, ext=ext), xp**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16)
for ext in [1, 'zeros']:
assert_allclose(spl(xp, ext=ext), xp_zeros**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16)
for ext in [2, 'raise']:
assert_raises(ValueError, spl, xp, **dict(ext=ext))
for ext in [3, 'const']:
assert_allclose(spl(xp, ext=ext), xp_clip**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp_clip**3, atol=1e-16)
# also test LSQUnivariateSpline [which needs explicit knots]
t = spl.get_knots()[3:4] # interior knots w/ default k=3
spl = LSQUnivariateSpline(x, y, t)
assert_allclose(spl(xp, ext=0), xp**3, atol=1e-16)
assert_allclose(spl(xp, ext=1), xp_zeros**3, atol=1e-16)
assert_raises(ValueError, spl, xp, **dict(ext=2))
assert_allclose(spl(xp, ext=3), xp_clip**3, atol=1e-16)
# also make sure that unknown values for `ext` are caught early
for ext in [-1, 'unknown']:
spl = UnivariateSpline(x, y)
assert_raises(ValueError, spl, xp, **dict(ext=ext))
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, ext=ext))
def test_lsq_fpchec(self):
xs = np.arange(100) * 1.
ys = np.arange(100) * 1.
knots = np.linspace(0, 99, 10)
bbox = (-1, 101)
assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots,
bbox=bbox)
def test_derivative_and_antiderivative(self):
# Thin wrappers to splder/splantider, so light smoke test only.
x = np.linspace(0, 1, 70)**3
y = np.cos(x)
spl = UnivariateSpline(x, y, s=0)
spl2 = spl.antiderivative(2).derivative(2)
assert_allclose(spl(0.3), spl2(0.3))
spl2 = spl.antiderivative(1)
assert_allclose(spl2(0.6) - spl2(0.2),
spl.integral(0.2, 0.6))
class TestLSQBivariateSpline(TestCase):
# NOTE: The systems in this test class are rank-deficient
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
assert_almost_equal(lut(2,2), 3.)
def test_bilinearity(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,7,8,3,4,7,1,3,4]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with warnings.catch_warnings():
# This seems to fail (ier=1, see ticket 1642).
warnings.simplefilter('ignore', UserWarning)
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
tx, ty = lut.get_knots()
for xa, xb in zip(tx[:-1], tx[1:]):
for ya, yb in zip(ty[:-1], ty[1:]):
for t in [0.1, 0.5, 0.9]:
for s in [0.3, 0.4, 0.7]:
xp = xa*(1-t) + xb*t
yp = ya*(1-s) + yb*s
zp = (+ lut(xa, ya)*(1-t)*(1-s)
+ lut(xb, ya)*t*(1-s)
+ lut(xa, yb)*(1-t)*s
+ lut(xb, yb)*t*s)
assert_almost_equal(lut(xp,yp), zp)
def test_integral(self):
x = [1,1,1,2,2,2,8,8,8]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
tx, ty = lut.get_knots()
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
def test_empty_input(self):
# Test whether empty inputs returns an empty output. Ticket 1014
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
assert_array_equal(lut([], []), np.zeros((0,0)))
assert_array_equal(lut([], [], grid=False), np.zeros((0,)))
class TestSmoothBivariateSpline(TestCase):
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]])
def test_linear_1d(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,0,0,2,2,2,4,4,4]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
def test_integral(self):
x = [1,1,1,2,2,2,4,4,4]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
with warnings.catch_warnings():
# This seems to fail (ier=1, see ticket 1642).
warnings.simplefilter('ignore', UserWarning)
lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0)
tx = [1,2,4]
ty = [1,2,3]
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0)
assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz,
decimal=0) # the quadratures give 23.75 and 23.85
tz = lut(tx[:-1], ty[:-1])
trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
def test_rerun_lwrk2_too_small(self):
# in this setting, lwrk2 is too small in the default run. Here we
# check for equality with the bisplrep/bisplev output because there,
# an automatic re-run of the spline representation is done if ier>10.
x = np.linspace(-2, 2, 80)
y = np.linspace(-2, 2, 80)
z = x + y
xi = np.linspace(-1, 1, 100)
yi = np.linspace(-2, 2, 100)
tck = bisplrep(x, y, z)
res1 = bisplev(xi, yi, tck)
interp_ = SmoothBivariateSpline(x, y, z)
res2 = interp_(xi, yi)
assert_almost_equal(res1, res2)
class TestLSQSphereBivariateSpline(TestCase):
def setUp(self):
# define the input data and coordinates
ntheta, nphi = 70, 90
theta = linspace(0.5/(ntheta - 1), 1 - 0.5/(ntheta - 1), ntheta) * pi
phi = linspace(0.5/(nphi - 1), 1 - 0.5/(nphi - 1), nphi) * 2. * pi
data = ones((theta.shape[0], phi.shape[0]))
# define knots and extract data values at the knots
knotst = theta[::5]
knotsp = phi[::5]
knotdata = data[::5, ::5]
# calculate spline coefficients
lats, lons = meshgrid(theta, phi)
lut_lsq = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp)
self.lut_lsq = lut_lsq
self.data = knotdata
self.new_lons, self.new_lats = knotsp, knotst
def test_linear_constant(self):
assert_almost_equal(self.lut_lsq.get_residual(), 0.0)
assert_array_almost_equal(self.lut_lsq(self.new_lats, self.new_lons),
self.data)
def test_empty_input(self):
assert_array_almost_equal(self.lut_lsq([], []), np.zeros((0,0)))
assert_array_almost_equal(self.lut_lsq([], [], grid=False), np.zeros((0,)))
class TestSmoothSphereBivariateSpline(TestCase):
def setUp(self):
theta = array([.25*pi, .25*pi, .25*pi, .5*pi, .5*pi, .5*pi, .75*pi,
.75*pi, .75*pi])
phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
1.5 * pi])
r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
self.lut = SmoothSphereBivariateSpline(theta, phi, r, s=1E10)
def test_linear_constant(self):
assert_almost_equal(self.lut.get_residual(), 0.)
assert_array_almost_equal(self.lut([1, 1.5, 2],[1, 1.5]),
[[3, 3], [3, 3], [3, 3]])
def test_empty_input(self):
assert_array_almost_equal(self.lut([], []), np.zeros((0,0)))
assert_array_almost_equal(self.lut([], [], grid=False), np.zeros((0,)))
class TestRectBivariateSpline(TestCase):
def test_defaults(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3]
yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0],
[0,0,-11,0,0],[0,0,4,0,0]])/6.
dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0],
[2,.25,0,-.25,-2],[4,-1,0,1,-4]])
dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26],
[-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1),dx)
assert_array_almost_equal(lut(x,y,dy=1),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy)
def test_derivatives(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([0,0,2./3,0,0])
dy = array([4,-1,0,-.25,-4])
dxdy = array([160,65,0,55,32])/24.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx)
assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy)
def test_broadcast(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_allclose(lut(x, y), lut(x[:,None], y[None,:], grid=False))
class TestRectSphereBivariateSpline(TestCase):
def test_defaults(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
yi = [0.2, 1, 2.3, 2.35, 3.0, 3.99, 5.25]
xi = [1.5, 0.4, 1.1, 0.45, 0.2345, 1., 0.0001]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
y = linspace(0.02, 2*pi-0.02, 7)
x = linspace(0.02, pi-0.02, 7)
assert_allclose(lut(x, y, dtheta=1), _numdiff_2d(lut, x, y, dx=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dphi=1), _numdiff_2d(lut, x, y, dy=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dtheta=1, dphi=1), _numdiff_2d(lut, x, y, dx=1, dy=1, eps=1e-6),
rtol=1e-3, atol=1e-3)
def test_derivatives(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
y = linspace(0.02, 2*pi-0.02, 7)
x = linspace(0.02, pi-0.02, 7)
assert_equal(lut(x, y, dtheta=1, grid=False).shape, x.shape)
assert_allclose(lut(x, y, dtheta=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dphi=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dy=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dtheta=1, dphi=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1, dy=1, eps=1e-6),
rtol=1e-3, atol=1e-3)
def _numdiff_2d(func, x, y, dx=0, dy=0, eps=1e-8):
if dx == 0 and dy == 0:
return func(x, y)
elif dx == 1 and dy == 0:
return (func(x + eps, y) - func(x - eps, y)) / (2*eps)
elif dx == 0 and dy == 1:
return (func(x, y + eps) - func(x, y - eps)) / (2*eps)
elif dx == 1 and dy == 1:
return (func(x + eps, y + eps) - func(x - eps, y + eps)
- func(x + eps, y - eps) + func(x - eps, y - eps)) / (2*eps)**2
else:
raise ValueError("invalid derivative order")
if __name__ == "__main__":
run_module_suite()
| mit | 7,027,878,597,349,935,000 | 39.429806 | 98 | 0.511459 | false |
B-MOOC/edx-platform | cms/djangoapps/contentstore/views/certificates.py | 3 | 19144 | """
Certificates Data Model:
course.certificates: {
'certificates': [
{
'version': 1, // data contract version
'id': 12345, // autogenerated identifier
'name': 'Certificate 1',
'description': 'Certificate 1 Description',
'course_title': 'course title',
'signatories': [
{
'id': 24680, // autogenerated identifier
'name': 'Dr. Bob Smith',
'title': 'Dean of the College',
'organization': 'Awesome College'
}
]
}
]
}
"""
import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import ensure_csrf_cookie
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods
from contentstore.utils import reverse_course_url
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey, AssetKey
from student.auth import has_studio_write_access
from util.db import generate_int_id, MYSQL_MAX_INT
from util.json_request import JsonResponse
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.django import modulestore
from contentstore.views.assets import delete_asset
from contentstore.views.exception import AssetNotFoundException
from django.core.exceptions import PermissionDenied
from course_modes.models import CourseMode
from contentstore.utils import get_lms_link_for_certificate_web_view
CERTIFICATE_SCHEMA_VERSION = 1
CERTIFICATE_MINIMUM_ID = 100
def _get_course_and_check_access(course_key, user, depth=0):
"""
Internal method used to calculate and return the locator and
course module for the view functions in this file.
"""
if not has_studio_write_access(user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key, depth=depth)
return course_module
def _delete_asset(course_key, asset_key_string):
"""
Internal method used to create asset key from string and
remove asset by calling delete_asset method of assets module.
"""
if asset_key_string:
# remove first slash in asset path
# otherwise it generates InvalidKeyError in case of split modulestore
if '/' == asset_key_string[0]:
asset_key_string = asset_key_string[1:]
asset_key = AssetKey.from_string(asset_key_string)
try:
delete_asset(course_key, asset_key)
# If the asset was not found, it doesn't have to be deleted...
except AssetNotFoundException:
pass
# Certificates Exceptions
class CertificateException(Exception):
"""
Base exception for Certificates workflows
"""
pass
class CertificateValidationError(CertificateException):
"""
An exception raised when certificate information is invalid.
"""
pass
class CertificateManager(object):
"""
The CertificateManager is responsible for storage, retrieval, and manipulation of Certificates
Certificates are not stored in the Django ORM, they are a field/setting on the course descriptor
"""
@staticmethod
def parse(json_string):
"""
Deserialize the provided JSON data into a standard Python object
"""
try:
certificate = json.loads(json_string)
except ValueError:
raise CertificateValidationError(_("invalid JSON"))
# Include the data contract version
certificate["version"] = CERTIFICATE_SCHEMA_VERSION
# Ensure a signatories list is always returned
if certificate.get("signatories") is None:
certificate["signatories"] = []
certificate["editing"] = False
return certificate
@staticmethod
def validate(certificate_data):
"""
Ensure the certificate data contains all of the necessary fields and the values match our rules
"""
# Ensure the schema version meets our expectations
if certificate_data.get("version") != CERTIFICATE_SCHEMA_VERSION:
raise TypeError(
"Unsupported certificate schema version: {0}. Expected version: {1}.".format(
certificate_data.get("version"),
CERTIFICATE_SCHEMA_VERSION
)
)
if not certificate_data.get("name"):
raise CertificateValidationError(_("must have name of the certificate"))
@staticmethod
def get_used_ids(course):
"""
Return a list of certificate identifiers that are already in use for this course
"""
if not course.certificates or not course.certificates.get('certificates'):
return []
return [cert['id'] for cert in course.certificates['certificates']]
@staticmethod
def assign_id(course, certificate_data, certificate_id=None):
"""
Assign an identifier to the provided certificate data.
If the caller did not provide an identifier, we autogenerate a unique one for them
In addition, we check the certificate's signatories and ensure they also have unique ids
"""
used_ids = CertificateManager.get_used_ids(course)
if certificate_id:
certificate_data['id'] = int(certificate_id)
else:
certificate_data['id'] = generate_int_id(
CERTIFICATE_MINIMUM_ID,
MYSQL_MAX_INT,
used_ids
)
for index, signatory in enumerate(certificate_data['signatories']): # pylint: disable=unused-variable
if signatory and not signatory.get('id', False):
signatory['id'] = generate_int_id(used_ids=used_ids)
used_ids.append(signatory['id'])
return certificate_data
@staticmethod
def serialize_certificate(certificate):
"""
Serialize the Certificate object's locally-stored certificate data to a JSON representation
We use direct access here for specific keys in order to enforce their presence
"""
certificate_data = certificate.certificate_data
certificate_response = {
"id": certificate_data['id'],
"name": certificate_data['name'],
"description": certificate_data['description'],
"version": CERTIFICATE_SCHEMA_VERSION,
"org_logo_path": certificate_data.get('org_logo_path', ''),
"signatories": certificate_data['signatories']
}
# Some keys are not required, such as the title override...
if certificate_data.get('course_title'):
certificate_response["course_title"] = certificate_data['course_title']
return certificate_response
@staticmethod
def deserialize_certificate(course, value):
"""
Deserialize from a JSON representation into a Certificate object.
'value' should be either a Certificate instance, or a valid JSON string
"""
# Ensure the schema fieldset meets our expectations
for key in ("name", "description", "version"):
if key not in value:
raise CertificateValidationError(_("Certificate dict {0} missing value key '{1}'").format(value, key))
# Load up the Certificate data
certificate_data = CertificateManager.parse(value)
CertificateManager.validate(certificate_data)
certificate_data = CertificateManager.assign_id(course, certificate_data, certificate_data.get('id', None))
certificate = Certificate(course, certificate_data)
# Return a new Certificate object instance
return certificate
@staticmethod
def get_certificates(course):
"""
Retrieve the certificates list from the provided course
"""
# The top-level course field is 'certificates', which contains various properties,
# including the actual 'certificates' list that we're working with in this context
return course.certificates.get('certificates', [])
@staticmethod
def remove_certificate(request, store, course, certificate_id):
"""
Remove certificate from the course
"""
for index, cert in enumerate(course.certificates['certificates']):
if int(cert['id']) == int(certificate_id):
certificate = course.certificates['certificates'][index]
# Remove any signatory assets prior to dropping the entire cert record from the course
_delete_asset(course.id, certificate['org_logo_path'])
for sig_index, signatory in enumerate(certificate.get('signatories')): # pylint: disable=unused-variable
_delete_asset(course.id, signatory['signature_image_path'])
# Now drop the certificate record
course.certificates['certificates'].pop(index)
store.update_item(course, request.user.id)
break
# pylint-disable: unused-variable
@staticmethod
def remove_signatory(request, store, course, certificate_id, signatory_id):
"""
Remove the specified signatory from the provided course certificate
"""
for cert_index, cert in enumerate(course.certificates['certificates']): # pylint: disable=unused-variable
if int(cert['id']) == int(certificate_id):
for sig_index, signatory in enumerate(cert.get('signatories')): # pylint: disable=unused-variable
if int(signatory_id) == int(signatory['id']):
_delete_asset(course.id, signatory['signature_image_path'])
del cert['signatories'][sig_index]
store.update_item(course, request.user.id)
break
class Certificate(object):
"""
The logical representation of an individual course certificate
"""
def __init__(self, course, certificate_data):
"""
Instantiate a Certificate object instance using the provided information.
"""
self.course = course
self._certificate_data = certificate_data
self.id = certificate_data['id'] # pylint: disable=invalid-name
@property
def certificate_data(self):
"""
Retrieve the locally-stored certificate data from the Certificate object via a helper method
"""
return self._certificate_data
@login_required
@require_http_methods(("POST",))
@ensure_csrf_cookie
def certificate_activation_handler(request, course_key_string):
"""
A handler for Certificate Activation/Deactivation
POST
json: is_active. update the activation state of certificate
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
try:
course = _get_course_and_check_access(course_key, request.user)
except PermissionDenied:
msg = _('PermissionDenied: Failed in authenticating {user}').format(user=request.user)
return JsonResponse({"error": msg}, status=403)
data = json.loads(request.body)
is_active = data.get('is_active', False)
certificates = CertificateManager.get_certificates(course)
# for certificate activation/deactivation, we are assuming one certificate in certificates collection.
for certificate in certificates:
certificate['is_active'] = is_active
break
store.update_item(course, request.user.id)
return HttpResponse(status=200)
@login_required
@require_http_methods(("GET", "POST"))
@ensure_csrf_cookie
def certificates_list_handler(request, course_key_string):
"""
A RESTful handler for Course Certificates
GET
html: return Certificates list page (Backbone application)
POST
json: create new Certificate
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
try:
course = _get_course_and_check_access(course_key, request.user)
except PermissionDenied:
msg = _('PermissionDenied: Failed in authenticating {user}').format(user=request.user)
return JsonResponse({"error": msg}, status=403)
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
certificate_url = reverse_course_url('certificates.certificates_list_handler', course_key)
course_outline_url = reverse_course_url('course_handler', course_key)
upload_asset_url = reverse_course_url('assets_handler', course_key)
activation_handler_url = reverse_course_url(
handler_name='certificates.certificate_activation_handler',
course_key=course_key
)
course_modes = [mode.slug for mode in CourseMode.modes_for_course(course.id)]
certificate_web_view_url = get_lms_link_for_certificate_web_view(
user_id=request.user.id,
course_key=course_key,
mode=course_modes[0] # CourseMode.modes_for_course returns default mode 'honor' if doesn't find anyone.
)
certificates = None
is_active = False
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
certificates = CertificateManager.get_certificates(course)
# we are assuming only one certificate in certificates collection.
for certificate in certificates:
is_active = certificate.get('is_active', False)
break
return render_to_response('certificates.html', {
'context_course': course,
'certificate_url': certificate_url,
'course_outline_url': course_outline_url,
'upload_asset_url': upload_asset_url,
'certificates': json.dumps(certificates),
'course_modes': course_modes,
'certificate_web_view_url': certificate_web_view_url,
'is_active': is_active,
'certificate_activation_handler_url': activation_handler_url
})
elif "application/json" in request.META.get('HTTP_ACCEPT'):
# Retrieve the list of certificates for the specified course
if request.method == 'GET':
certificates = CertificateManager.get_certificates(course)
return JsonResponse(certificates, encoder=EdxJSONEncoder)
elif request.method == 'POST':
# Add a new certificate to the specified course
try:
new_certificate = CertificateManager.deserialize_certificate(course, request.body)
except CertificateValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if course.certificates.get('certificates') is None:
course.certificates['certificates'] = []
course.certificates['certificates'].append(new_certificate.certificate_data)
response = JsonResponse(CertificateManager.serialize_certificate(new_certificate), status=201)
response["Location"] = reverse_course_url(
'certificates.certificates_detail_handler',
course.id,
kwargs={'certificate_id': new_certificate.id} # pylint: disable=no-member
)
store.update_item(course, request.user.id)
course = _get_course_and_check_access(course_key, request.user)
return response
else:
return HttpResponse(status=406)
@login_required
@ensure_csrf_cookie
@require_http_methods(("POST", "PUT", "DELETE"))
def certificates_detail_handler(request, course_key_string, certificate_id):
"""
JSON API endpoint for manipulating a course certificate via its internal identifier.
Utilized by the Backbone.js 'certificates' application model
POST or PUT
json: update the specified certificate based on provided information
DELETE
json: remove the specified certificate from the course
"""
course_key = CourseKey.from_string(course_key_string)
course = _get_course_and_check_access(course_key, request.user)
certificates_list = course.certificates.get('certificates', [])
match_index = None
match_cert = None
for index, cert in enumerate(certificates_list):
if certificate_id is not None:
if int(cert['id']) == int(certificate_id):
match_index = index
match_cert = cert
store = modulestore()
if request.method in ('POST', 'PUT'):
try:
new_certificate = CertificateManager.deserialize_certificate(course, request.body)
except CertificateValidationError as err:
return JsonResponse({"error": err.message}, status=400)
serialized_certificate = CertificateManager.serialize_certificate(new_certificate)
if match_cert:
certificates_list[match_index] = serialized_certificate
else:
certificates_list.append(serialized_certificate)
store.update_item(course, request.user.id)
return JsonResponse(serialized_certificate, status=201)
elif request.method == "DELETE":
if not match_cert:
return JsonResponse(status=404)
CertificateManager.remove_certificate(
request=request,
store=store,
course=course,
certificate_id=certificate_id
)
return JsonResponse(status=204)
@login_required
@ensure_csrf_cookie
@require_http_methods(("POST", "PUT", "DELETE"))
def signatory_detail_handler(request, course_key_string, certificate_id, signatory_id):
"""
JSON API endpoint for manipulating a specific course certificate signatory via its internal identifier.
Utilized by the Backbone 'certificates' application.
DELETE
json: Remove the specified signatory from the specified certificate
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = _get_course_and_check_access(course_key, request.user)
certificates_list = course.certificates['certificates']
match_cert = None
# pylint: disable=unused-variable
for index, cert in enumerate(certificates_list):
if certificate_id is not None:
if int(cert['id']) == int(certificate_id):
match_cert = cert
if request.method == "DELETE":
if not match_cert:
return JsonResponse(status=404)
CertificateManager.remove_signatory(
request=request,
store=store,
course=course,
certificate_id=certificate_id,
signatory_id=signatory_id
)
return JsonResponse(status=204)
| agpl-3.0 | 3,251,929,802,354,216,400 | 39.645435 | 121 | 0.637746 | false |
audiolion/django-shibauth-rit | tests/test_backends.py | 1 | 6303 | # -*- coding: utf-8 -*-
# Third Party Library Imports
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.models import User
from django.test import RequestFactory, TestCase, override_settings
# First Party Library Imports
from shibauth_rit.compat import reverse
from shibauth_rit.middleware import ShibauthRitMiddleware
try:
from importlib import reload # python 3.4+
except ImportError:
try:
from imp import reload # for python 3.2/3.3
except ImportError:
pass # this means we're on python 2, where reload is a builtin function
settings.SHIBAUTH_ATTRIBUTE_MAP = {
"idp": (False, "idp"),
"mail": (False, "email"),
"uid": (True, "username"),
"schoolStatus": (False, "status"),
"affiliation": (False, "affiliation"),
"sessionId": (False, "session_id"),
"givenName": (False, "first_name"),
"sn": (False, "last_name"),
}
settings.AUTHENTICATION_BACKENDS += (
'shibauth_rit.backends.ShibauthRitBackend',
)
settings.MIDDLEWARE_CLASSES += (
'shibauth_rit.middleware.ShibauthRitMiddleware',
)
settings.ROOT_URLCONF = 'tests.urls'
# we import the module so we can reload with new settings for tests
from shibauth_rit import backends # noqa; E402
class TestAttributes(TestCase):
def test_decorator_not_authenticated(self):
res = self.client.get(reverse('shibauth_rit:shibauth_info'))
self.assertEqual(res.status_code, 302)
# Test the context - shouldn't exist
self.assertEqual(res.context, None)
def test_decorator_authenticated(self):
res = self.client.get(reverse('shibauth_rit:shibauth_info'), **settings.SAMPLE_HEADERS)
self.assertEqual(str(res.context['user']), 'rrcdis1')
self.assertEqual(res.status_code, 200)
user = res.context.get('user')
self.assertEqual(user.email, '[email protected]')
self.assertEqual(user.first_name, 'Sample')
self.assertEqual(user.last_name, 'Developer')
self.assertTrue(user.is_authenticated())
self.assertFalse(user.is_anonymous())
@override_settings(SHIBAUTH_ATTRIBUTE_MAP={'uid': (True, 'username')})
def test_no_non_required_kwargs(self):
res = self.client.get(reverse('shibauth_rit:shibauth_info'), **settings.SAMPLE_HEADERS)
self.assertEqual(str(res.context['user']), 'rrcdis1')
self.assertEqual(res.status_code, 200)
class TestShibauthRitBackend(TestCase):
def setUp(self):
self.request_factory = RequestFactory()
def _get_valid_shib_meta(self, location='/'):
request_factory = RequestFactory()
test_request = request_factory.get(location)
test_request.META.update(**settings.SAMPLE_HEADERS)
shib_meta, error = ShibauthRitMiddleware.parse_attributes(test_request)
self.assertFalse(error, 'Generating shibboleth attribute mapping contains errors')
return shib_meta
def test_create_unknown_user_true(self):
self.assertFalse(User.objects.all())
shib_meta = self._get_valid_shib_meta(location=reverse('shibauth_rit:shibauth_info'))
user = auth.authenticate(remote_user='[email protected]', shib_meta=shib_meta)
self.assertEqual(user.username, '[email protected]')
self.assertEqual(User.objects.all()[0].username, '[email protected]')
def test_create_unknown_user_false(self):
with self.settings(SHIBAUTH_CREATE_UNKNOWN_USER=False):
# because attr is set on the class we need to reload the module
reload(backends)
shib_meta = self._get_valid_shib_meta(location=reverse('shibauth_rit:shibauth_info'))
self.assertEqual(User.objects.all().count(), 0)
user = auth.authenticate(remote_user='[email protected]', shib_meta=shib_meta)
self.assertTrue(user is None)
self.assertEqual(User.objects.all().count(), 0)
# reload module again to remove the setting override
reload(backends)
def test_ensure_user_attributes(self):
shib_meta = self._get_valid_shib_meta(location=reverse('shibauth_rit:shibauth_info'))
# Create / authenticate the test user and store another mail address
user = auth.authenticate(remote_user='[email protected]', shib_meta=shib_meta)
user.email = '[email protected]'
user.save()
# The user must contain the invalid mail address
user = User.objects.get(username='[email protected]')
self.assertEqual(user.email, '[email protected]')
# After authenticate the user again, the mail address must be set back to the shibboleth data
user2 = auth.authenticate(remote_user='[email protected]', shib_meta=shib_meta)
self.assertEqual(user2.email, '[email protected]')
def test_change_required_attributes(self):
shib_meta = self._get_valid_shib_meta(location=reverse('shibauth_rit:shibauth_info'))
user = auth.authenticate(remote_user='[email protected]', shib_meta=shib_meta)
user.username = 'new_user'
user.save()
user = auth.authenticate(remote_user='[email protected]', shib_meta=shib_meta)
self.assertEqual(user.email, '[email protected]')
class LogoutTest(TestCase):
def test_logout(self):
# Login
login = self.client.get(reverse('shibauth_rit:shibauth_login'), **settings.SAMPLE_HEADERS)
self.assertEqual(login.status_code, 302)
# Logout
logout = self.client.get(reverse('shibauth_rit:shibauth_logout'), **settings.SAMPLE_HEADERS)
self.assertEqual(logout.status_code, 302)
# Ensure redirect happened.
self.assertEqual(
logout['Location'],
'https://shibboleth.main.ad.rit.edu/logout.html'
)
# Check to see if the session has the force logout key.
self.assertTrue(self.client.session.get(settings.SHIBAUTH_LOGOUT_SESSION_KEY))
# Load root url to see if user is in fact logged out.
resp = self.client.get(reverse('shibauth_rit:shibauth_info'), **settings.SAMPLE_HEADERS)
self.assertEqual(resp.status_code, 302)
# Make sure the context is empty.
self.assertEqual(resp.context, None)
| mit | -352,922,252,984,526,800 | 41.877551 | 101 | 0.680946 | false |
bd339/servo | tests/dromaeo/run_dromaeo.py | 111 | 2507 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import subprocess
import sys
import BaseHTTPServer
import SimpleHTTPServer
import urlparse
import json
# Port to run the HTTP server on for Dromaeo.
TEST_SERVER_PORT = 8192
# Run servo and print / parse the results for a specific Dromaeo module.
def run_servo(servo_exe, tests):
url = "http://localhost:{0}/dromaeo/web/index.html?{1}&automated&post_json".format(TEST_SERVER_PORT, tests)
args = [servo_exe, url, "-z", "-f"]
return subprocess.Popen(args)
# Print usage if command line args are incorrect
def print_usage():
print("USAGE: {0} tests servo_binary dromaeo_base_dir".format(sys.argv[0]))
# Handle the POST at the end
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_POST(self):
self.send_response(200)
self.end_headers()
self.wfile.write("<HTML>POST OK.<BR><BR>")
length = int(self.headers.getheader('content-length'))
parameters = urlparse.parse_qs(self.rfile.read(length))
self.server.got_post = True
self.server.post_data = parameters['data']
def log_message(self, format, *args):
return
if __name__ == '__main__':
if len(sys.argv) == 4:
tests = sys.argv[1]
servo_exe = sys.argv[2]
base_dir = sys.argv[3]
os.chdir(base_dir)
# Ensure servo binary can be found
if not os.path.isfile(servo_exe):
print("Unable to find {0}. This script expects an existing build of Servo.".format(servo_exe))
sys.exit(1)
# Start the test server
server = BaseHTTPServer.HTTPServer(('', TEST_SERVER_PORT), RequestHandler)
print("Testing Dromaeo on Servo!")
proc = run_servo(servo_exe, tests)
server.got_post = False
while not server.got_post:
server.handle_request()
data = json.loads(server.post_data[0])
n = 0
l = 0
for test in data:
n = max(n, len(data[test]))
l = max(l, len(test))
print("\n Test{0} | Time".format(" " * (l - len("Test"))))
print("-{0}-|-{1}-".format("-" * l, "-" * n))
for test in data:
print(" {0}{1} | {2}".format(test, " " * (l - len(test)), data[test]))
proc.kill()
else:
print_usage()
| mpl-2.0 | -6,130,129,712,141,503,000 | 30.734177 | 111 | 0.609493 | false |
charlesccychen/beam | sdks/python/apache_beam/utils/retry_test.py | 5 | 7723 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the retry module."""
from __future__ import absolute_import
import unittest
from builtins import object
from apache_beam.utils import retry
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
# TODO(sourabhbajaj): Remove the GCP specific error code to a submodule
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
HttpError = None
# pylint: enable=wrong-import-order, wrong-import-position
class FakeClock(object):
"""A fake clock object implementing sleep() and recording calls."""
def __init__(self):
self.calls = []
def sleep(self, value):
self.calls.append(value)
class FakeLogger(object):
"""A fake logger object implementing log() and recording calls."""
def __init__(self):
self.calls = []
def log(self, message, interval, func_name, exn_name, exn_traceback):
_ = interval, exn_traceback
self.calls.append((message, func_name, exn_name))
@retry.with_exponential_backoff(clock=FakeClock())
def test_function(a, b):
_ = a, b
raise NotImplementedError
@retry.with_exponential_backoff(initial_delay_secs=0.1, num_retries=1)
def test_function_with_real_clock(a, b):
_ = a, b
raise NotImplementedError
@retry.no_retries
def test_no_retry_function(a, b):
_ = a, b
raise NotImplementedError
class RetryTest(unittest.TestCase):
def setUp(self):
self.clock = FakeClock()
self.logger = FakeLogger()
self.calls = 0
def permanent_failure(self, a, b):
raise NotImplementedError
def transient_failure(self, a, b):
self.calls += 1
if self.calls > 4:
return a + b
raise NotImplementedError
def http_error(self, code):
if HttpError is None:
raise RuntimeError("This is not a valid test as GCP is not enabled")
raise HttpError({'status': str(code)}, '', '')
def test_with_explicit_decorator(self):
# We pass one argument as positional argument and one as keyword argument
# so that we cover both code paths for argument handling.
self.assertRaises(NotImplementedError, test_function, 10, b=20)
def test_with_no_retry_decorator(self):
self.assertRaises(NotImplementedError, test_no_retry_function, 1, 2)
def test_with_real_clock(self):
self.assertRaises(NotImplementedError,
test_function_with_real_clock, 10, b=20)
def test_with_default_number_of_retries(self):
self.assertRaises(NotImplementedError,
retry.with_exponential_backoff(clock=self.clock)(
self.permanent_failure),
10, b=20)
self.assertEqual(len(self.clock.calls), 7)
def test_with_explicit_number_of_retries(self):
self.assertRaises(NotImplementedError,
retry.with_exponential_backoff(
clock=self.clock, num_retries=10)(
self.permanent_failure),
10, b=20)
self.assertEqual(len(self.clock.calls), 10)
@unittest.skipIf(HttpError is None, 'google-apitools is not installed')
def test_with_http_error_that_should_not_be_retried(self):
self.assertRaises(HttpError,
retry.with_exponential_backoff(
clock=self.clock, num_retries=10)(
self.http_error),
404)
# Make sure just one call was made.
self.assertEqual(len(self.clock.calls), 0)
@unittest.skipIf(HttpError is None, 'google-apitools is not installed')
def test_with_http_error_that_should_be_retried(self):
self.assertRaises(HttpError,
retry.with_exponential_backoff(
clock=self.clock, num_retries=10)(
self.http_error),
500)
self.assertEqual(len(self.clock.calls), 10)
def test_with_explicit_initial_delay(self):
self.assertRaises(NotImplementedError,
retry.with_exponential_backoff(
initial_delay_secs=10.0, clock=self.clock,
fuzz=False)(
self.permanent_failure),
10, b=20)
self.assertEqual(len(self.clock.calls), 7)
self.assertEqual(self.clock.calls[0], 10.0)
def test_log_calls_for_permanent_failure(self):
self.assertRaises(NotImplementedError,
retry.with_exponential_backoff(
clock=self.clock, logger=self.logger.log)(
self.permanent_failure),
10, b=20)
self.assertEqual(len(self.logger.calls), 7)
for message, func_name, exn_name in self.logger.calls:
self.assertTrue(message.startswith('Retry with exponential backoff:'))
self.assertEqual(exn_name, 'NotImplementedError\n')
self.assertEqual(func_name, 'permanent_failure')
def test_log_calls_for_transient_failure(self):
result = retry.with_exponential_backoff(
clock=self.clock, logger=self.logger.log, fuzz=False)(
self.transient_failure)(10, b=20)
self.assertEqual(result, 30)
self.assertEqual(len(self.clock.calls), 4)
self.assertEqual(self.clock.calls,
[5.0 * 1, 5.0 * 2, 5.0 * 4, 5.0 * 8,])
self.assertEqual(len(self.logger.calls), 4)
for message, func_name, exn_name in self.logger.calls:
self.assertTrue(message.startswith('Retry with exponential backoff:'))
self.assertEqual(exn_name, 'NotImplementedError\n')
self.assertEqual(func_name, 'transient_failure')
class DummyClass(object):
def __init__(self, results):
self.index = 0
self.results = results
@retry.with_exponential_backoff(num_retries=2, initial_delay_secs=0.1)
def func(self):
self.index += 1
if self.index > len(self.results) or \
self.results[self.index - 1] == "Error":
raise ValueError("Error")
return self.results[self.index - 1]
class RetryStateTest(unittest.TestCase):
"""The test_two_failures and test_single_failure would fail if we have
any shared state for the retry decorator. This test tries to prevent a bug we
found where the state in the decorator was shared across objects and retries
were not available correctly.
The test_call_two_objects would test this inside the same test.
"""
def test_two_failures(self):
dummy = DummyClass(["Error", "Error", "Success"])
dummy.func()
self.assertEqual(3, dummy.index)
def test_single_failure(self):
dummy = DummyClass(["Error", "Success"])
dummy.func()
self.assertEqual(2, dummy.index)
def test_call_two_objects(self):
dummy = DummyClass(["Error", "Error", "Success"])
dummy.func()
self.assertEqual(3, dummy.index)
dummy2 = DummyClass(["Error", "Success"])
dummy2.func()
self.assertEqual(2, dummy2.index)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 7,037,875,279,751,662,000 | 33.632287 | 79 | 0.658811 | false |
vebin/Wox | PythonHome/Lib/site-packages/setuptools/command/bdist_egg.py | 155 | 17606 | """setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import marshal
import textwrap
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.compat import basestring
from setuptools.extension import Library
from setuptools import Command
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base, name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3] == "2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, basestring):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
# avoid 2.3 zipimport bug when 64 bits
compress = (sys.version >= "2.4")
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
| mit | -2,876,553,395,860,691,500 | 35.755741 | 79 | 0.572589 | false |
40223202/test | static/Brython3.1.3-20150514-095342/Lib/textwrap.py | 745 | 16488 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <[email protected]>
import re
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 0 .. 'tabsize' spaces, depending on its position
in its line. If false, each tab is treated as a single character.
tabsize (default: 8)
Expand tabs in input text to 0 .. 'tabsize' spaces, unless
'expand_tabs' is false.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True,
tabsize=8):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
self.tabsize = tabsize
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs(self.tabsize)
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print(dedent("Hello there.\n This is indented."))
| agpl-3.0 | 8,402,692,835,381,830,000 | 39.116788 | 80 | 0.594554 | false |
ESS-LLP/erpnext | erpnext/healthcare/doctype/fee_validity/fee_validity.py | 13 | 1166 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
import frappe
from frappe.utils import getdate
import datetime
class FeeValidity(Document):
pass
def update_fee_validity(fee_validity, date, ref_invoice=None):
max_visit = frappe.db.get_value("Healthcare Settings", None, "max_visit")
valid_days = frappe.db.get_value("Healthcare Settings", None, "valid_days")
if not valid_days:
valid_days = 1
if not max_visit:
max_visit = 1
date = getdate(date)
valid_till = date + datetime.timedelta(days=int(valid_days))
fee_validity.max_visit = max_visit
fee_validity.visited = 1
fee_validity.valid_till = valid_till
fee_validity.ref_invoice = ref_invoice
fee_validity.save(ignore_permissions=True)
return fee_validity
def create_fee_validity(practitioner, patient, date, ref_invoice=None):
fee_validity = frappe.new_doc("Fee Validity")
fee_validity.practitioner = practitioner
fee_validity.patient = patient
fee_validity = update_fee_validity(fee_validity, date, ref_invoice)
return fee_validity
| gpl-3.0 | -5,365,662,938,190,794,000 | 31.388889 | 76 | 0.755575 | false |
anyonedev/anyonedev-monitor-agent | monitor/metrics/Cpu.py | 1 | 1893 | '''
Created on 2014-11-12
@author: hongye
'''
import psutil
from core import regist_monitor_source
from core.MetricValue import MultiMetricValue, SingleMetricValue
from core.MonitorSource import SampleMonitorSource
class CpuTimesMonitorSource(SampleMonitorSource):
def sample(self, parms):
cpu = psutil.cpu_times()
metricValue = MultiMetricValue(self.getMonitorSourceName())
metricValue.addMetricValue("user", cpu.user)
metricValue.addMetricValue("system", cpu.system)
metricValue.addMetricValue("idle", cpu.idle)
return metricValue
class CpuPercentMonitorSource(SampleMonitorSource):
interval = None
def sample(self, parms):
value = psutil.cpu_percent(self.interval)
return SingleMetricValue(self.getMonitorSourceName(), "cpu_percent", value)
class CpuTimesPercentMonitorSource(SampleMonitorSource):
_interval = None
def sample(self, parms):
cpu = psutil.cpu_times_percent(self._interval)
metricValue = MultiMetricValue(self.getMonitorSourceName())
metricValue.addMetricValue("user", cpu.user)
metricValue.addMetricValue("system", cpu.system)
metricValue.addMetricValue("idle", cpu.idle)
return metricValue
def interval(self, interval):
self._interval = interval
def cpu_percent(monitorSourceName):
monitorSource = CpuPercentMonitorSource().monitorSourceName(monitorSourceName)
regist_monitor_source(monitorSource)
return monitorSource
def cpu_times_percent(monitorSourceName):
monitorSource = CpuTimesPercentMonitorSource().monitorSourceName(monitorSourceName)
regist_monitor_source(monitorSource)
return monitorSource
def cpu_times(monitorSourceName):
monitorSource = CpuTimesMonitorSource().monitorSourceName(monitorSourceName)
regist_monitor_source(monitorSource)
return monitorSource
| gpl-2.0 | -6,940,471,429,278,517,000 | 33.418182 | 87 | 0.744849 | false |
lzw120/django | build/lib/django/utils/datastructures.py | 12 | 16025 | import copy
from types import GeneratorType
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.iteritems()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key, default=None):
"""
Returns the list of values for the passed key. If key doesn't exist,
then a default value is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return default
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
return default_list
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key).append(value)
def dict(self):
"""
Returns current object as a dict with singular values.
"""
return dict((key, self[key]) for key in self)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
| bsd-3-clause | 8,165,227,595,859,711,000 | 30.298828 | 131 | 0.54727 | false |
windyuuy/opera | chromium/src/third_party/python_26/Lib/test/test_xrange.py | 53 | 2512 | # Python test set -- built-in functions
import test.test_support, unittest
import sys
import pickle
import warnings
warnings.filterwarnings("ignore", "integer argument expected",
DeprecationWarning, "unittest")
class XrangeTest(unittest.TestCase):
def test_xrange(self):
self.assertEqual(list(xrange(3)), [0, 1, 2])
self.assertEqual(list(xrange(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(xrange(0)), [])
self.assertEqual(list(xrange(-3)), [])
self.assertEqual(list(xrange(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(xrange(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(xrange(a, a+2)), [a, a+1])
self.assertEqual(list(xrange(a+2, a, -1L)), [a+2, a+1])
self.assertEqual(list(xrange(a+4, a, -2)), [a+4, a+2])
seq = list(xrange(a, b, c))
self.assert_(a in seq)
self.assert_(b not in seq)
self.assertEqual(len(seq), 2)
seq = list(xrange(b, a, -c))
self.assert_(b in seq)
self.assert_(a not in seq)
self.assertEqual(len(seq), 2)
seq = list(xrange(-a, -b, -c))
self.assert_(-a in seq)
self.assert_(-b not in seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, xrange)
self.assertRaises(TypeError, xrange, 1, 2, 3, 4)
self.assertRaises(ValueError, xrange, 1, 2, 0)
self.assertRaises(OverflowError, xrange, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, xrange, 0, "spam")
self.assertRaises(TypeError, xrange, 0, 42, "spam")
self.assertEqual(len(xrange(0, sys.maxint, sys.maxint-1)), 2)
self.assertRaises(OverflowError, xrange, -sys.maxint, sys.maxint)
self.assertRaises(OverflowError, xrange, 0, 2*sys.maxint)
r = xrange(-sys.maxint, sys.maxint, 2)
self.assertEqual(len(r), sys.maxint)
self.assertRaises(OverflowError, xrange, -sys.maxint-1, sys.maxint, 2)
def test_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
r = xrange(*t)
self.assertEquals(list(pickle.loads(pickle.dumps(r, proto))),
list(r))
def test_main():
test.test_support.run_unittest(XrangeTest)
if __name__ == "__main__":
test_main()
| bsd-3-clause | -1,239,069,336,259,808,800 | 32.493333 | 78 | 0.567675 | false |
jart/tensorflow | tensorflow/contrib/kfac/examples/tests/convnet_test.py | 14 | 6264 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convnet.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.kfac import layer_collection as lc
from tensorflow.contrib.kfac.examples import convnet
class ConvNetTest(tf.test.TestCase):
def testConvLayer(self):
with tf.Graph().as_default():
pre, act, (w, b) = convnet.conv_layer(
layer_id=1,
inputs=tf.zeros([5, 3, 3, 2]),
kernel_size=3,
out_channels=5)
self.assertShapeEqual(np.zeros([5, 3, 3, 5]), pre)
self.assertShapeEqual(np.zeros([5, 3, 3, 5]), act)
self.assertShapeEqual(np.zeros([3, 3, 2, 5]), tf.convert_to_tensor(w))
self.assertShapeEqual(np.zeros([5]), tf.convert_to_tensor(b))
self.assertIsInstance(w, tf.Variable)
self.assertIsInstance(b, tf.Variable)
self.assertIn("conv_1", w.op.name)
self.assertIn("conv_1", b.op.name)
def testMaxPoolLayer(self):
with tf.Graph().as_default():
act = convnet.max_pool_layer(
layer_id=1, inputs=tf.zeros([5, 6, 6, 2]), kernel_size=5, stride=3)
self.assertShapeEqual(np.zeros([5, 2, 2, 2]), act)
self.assertEqual(act.op.name, "pool_1/pool")
def testLinearLayer(self):
with tf.Graph().as_default():
act, (w, b) = convnet.linear_layer(
layer_id=1, inputs=tf.zeros([5, 20]), output_size=5)
self.assertShapeEqual(np.zeros([5, 5]), act)
self.assertShapeEqual(np.zeros([20, 5]), tf.convert_to_tensor(w))
self.assertShapeEqual(np.zeros([5]), tf.convert_to_tensor(b))
self.assertIsInstance(w, tf.Variable)
self.assertIsInstance(b, tf.Variable)
self.assertIn("fc_1", w.op.name)
self.assertIn("fc_1", b.op.name)
def testBuildModel(self):
with tf.Graph().as_default():
x = tf.placeholder(tf.float32, [None, 6, 6, 3])
y = tf.placeholder(tf.int64, [None])
layer_collection = lc.LayerCollection()
loss, accuracy = convnet.build_model(
x, y, num_labels=5, layer_collection=layer_collection)
# Ensure layers and logits were registered.
self.assertEqual(len(layer_collection.fisher_blocks), 3)
self.assertEqual(len(layer_collection.losses), 1)
# Ensure inference doesn't crash.
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {
x: np.random.randn(10, 6, 6, 3).astype(np.float32),
y: np.random.randint(5, size=10).astype(np.int64),
}
sess.run([loss, accuracy], feed_dict=feed_dict)
def _build_toy_problem(self):
"""Construct a toy linear regression problem.
Initial loss should be,
2.5 = 0.5 * (1^2 + 2^2)
Returns:
loss: 0-D Tensor representing loss to be minimized.
accuracy: 0-D Tensors representing model accuracy.
layer_collection: LayerCollection instance describing model architecture.
"""
x = np.asarray([[1.], [2.]]).astype(np.float32)
y = np.asarray([1., 2.]).astype(np.float32)
x, y = (tf.data.Dataset.from_tensor_slices((x, y))
.repeat(100).batch(2).make_one_shot_iterator().get_next())
w = tf.get_variable("w", shape=[1, 1], initializer=tf.zeros_initializer())
y_hat = tf.matmul(x, w)
loss = tf.reduce_mean(0.5 * tf.square(y_hat - y))
accuracy = loss
layer_collection = lc.LayerCollection()
layer_collection.register_fully_connected(params=w, inputs=x, outputs=y_hat)
layer_collection.register_normal_predictive_distribution(y_hat)
return loss, accuracy, layer_collection
def testMinimizeLossSingleMachine(self):
with tf.Graph().as_default():
loss, accuracy, layer_collection = self._build_toy_problem()
accuracy_ = convnet.minimize_loss_single_machine(
loss, accuracy, layer_collection, device="/cpu:0")
self.assertLess(accuracy_, 2.0)
def testMinimizeLossDistributed(self):
with tf.Graph().as_default():
loss, accuracy, layer_collection = self._build_toy_problem()
accuracy_ = convnet.distributed_grads_only_and_ops_chief_worker(
task_id=0,
is_chief=True,
num_worker_tasks=1,
num_ps_tasks=0,
master="",
checkpoint_dir=None,
loss=loss,
accuracy=accuracy,
layer_collection=layer_collection)
self.assertLess(accuracy_, 2.0)
def testTrainMnistSingleMachine(self):
with tf.Graph().as_default():
# Ensure model training doesn't crash.
#
# Ideally, we should check that accuracy increases as the model converges,
# but there are too few parameters for the model to effectively memorize
# the training set the way an MLP can.
convnet.train_mnist_single_machine(
data_dir=None, num_epochs=1, use_fake_data=True, device="/cpu:0")
def testTrainMnistMultitower(self):
with tf.Graph().as_default():
# Ensure model training doesn't crash.
convnet.train_mnist_multitower(
data_dir=None, num_epochs=1, num_towers=2, use_fake_data=True)
def testTrainMnistDistributed(self):
with tf.Graph().as_default():
# Ensure model training doesn't crash.
convnet.train_mnist_distributed_sync_replicas(
task_id=0,
is_chief=True,
num_worker_tasks=1,
num_ps_tasks=0,
master="",
data_dir=None,
num_epochs=2,
op_strategy="chief_worker",
use_fake_data=True)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 5,315,397,503,990,249,000 | 36.73494 | 80 | 0.639527 | false |
Orochimarufan/youtube-dl | youtube_dl/extractor/theintercept.py | 71 | 1801 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
parse_iso8601,
int_or_none,
ExtractorError,
)
class TheInterceptIE(InfoExtractor):
_VALID_URL = r'https?://theintercept\.com/fieldofvision/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://theintercept.com/fieldofvision/thisisacoup-episode-four-surrender-or-die/',
'md5': '145f28b41d44aab2f87c0a4ac8ec95bd',
'info_dict': {
'id': '46214',
'ext': 'mp4',
'title': '#ThisIsACoup – Episode Four: Surrender or Die',
'description': 'md5:74dd27f0e2fbd50817829f97eaa33140',
'timestamp': 1450429239,
'upload_date': '20151218',
'comment_count': int,
}
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
json_data = self._parse_json(self._search_regex(
r'initialStoreTree\s*=\s*(?P<json_data>{.+})', webpage,
'initialStoreTree'), display_id)
for post in json_data['resources']['posts'].values():
if post['slug'] == display_id:
return {
'_type': 'url_transparent',
'url': 'jwplatform:%s' % post['fov_videoid'],
'id': compat_str(post['ID']),
'display_id': display_id,
'title': post['title'],
'description': post.get('excerpt'),
'timestamp': parse_iso8601(post.get('date')),
'comment_count': int_or_none(post.get('comments_number')),
}
raise ExtractorError('Unable to find the current post')
| unlicense | 1,900,185,122,016,027,400 | 35.714286 | 99 | 0.540856 | false |
telefonicaid/fiware-pep-steelskin | test/acceptance/integration/headers/steps.py | 32 | 1455 | # -*- coding: utf-8 -*-
"""
Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
This file is part of fiware-pep-steelskin
fiware-pep-steelskin is free software: you can redistribute it and/or
modify it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
fiware-pep-steelskin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with fiware-pep-steelskin.
If not, see http://www.gnu.org/licenses/.
For those usages not covered by the GNU Affero General Public License
please contact with::[iot_support at tid.es]
"""
__author__ = 'Jon Calderin Goñi <[email protected]>'
from integration.steps_lib.mocks import *
from integration.steps_lib.proxys import *
from integration.steps_lib.access_control import *
from integration.steps_lib.background import *
from integration.steps_lib.general import *
from integration.steps_lib.headers import *
from integration.steps_lib.keystone import *
from integration.steps_lib.payload import *
from integration.steps_lib.request import *
from integration.steps_lib.responses import *
from integration.steps_lib.url import * | agpl-3.0 | 1,642,707,539,401,645,600 | 40.542857 | 75 | 0.793531 | false |
Anonymous-X6/django | django/template/loaders/filesystem.py | 418 | 2158 | """
Wrapper for loading templates from the filesystem.
"""
import errno
import io
import warnings
from django.core.exceptions import SuspiciousFileOperation
from django.template import Origin, TemplateDoesNotExist
from django.utils._os import safe_join
from django.utils.deprecation import RemovedInDjango20Warning
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def get_dirs(self):
return self.engine.dirs
def get_contents(self, origin):
try:
with io.open(origin.name, encoding=self.engine.file_charset) as fp:
return fp.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise TemplateDoesNotExist(origin)
raise
def get_template_sources(self, template_name, template_dirs=None):
"""
Return an Origin object pointing to an absolute path in each directory
in template_dirs. For security reasons, if a path doesn't lie inside
one of the template_dirs it is excluded from the result set.
"""
if not template_dirs:
template_dirs = self.get_dirs()
for template_dir in template_dirs:
try:
name = safe_join(template_dir, template_name)
except SuspiciousFileOperation:
# The joined path was located outside of this template_dir
# (it might be inside another one, so this isn't fatal).
continue
yield Origin(
name=name,
template_name=template_name,
loader=self,
)
def load_template_source(self, template_name, template_dirs=None):
warnings.warn(
'The load_template_sources() method is deprecated. Use '
'get_template() or get_contents() instead.',
RemovedInDjango20Warning,
)
for origin in self.get_template_sources(template_name, template_dirs):
try:
return self.get_contents(origin), origin.name
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(template_name)
| bsd-3-clause | -7,074,299,911,050,740,000 | 32.71875 | 79 | 0.620945 | false |
proversity-org/edx-platform | openedx/core/lib/edx_api_utils.py | 18 | 3689 | """Helper functions to get data from APIs"""
from __future__ import unicode_literals
import logging
from django.core.cache import cache
from openedx.core.lib.cache_utils import zpickle, zunpickle
log = logging.getLogger(__name__)
def get_fields(fields, response):
"""Extracts desired fields from the API response"""
results = {}
for field in fields:
results[field] = response.get(field)
return results
def get_edx_api_data(api_config, resource, api, resource_id=None, querystring=None, cache_key=None, many=True,
traverse_pagination=True, fields=None, long_term_cache=False):
"""GET data from an edX REST API.
DRY utility for handling caching and pagination.
Arguments:
api_config (ConfigurationModel): The configuration model governing interaction with the API.
resource (str): Name of the API resource being requested.
Keyword Arguments:
api (APIClient): API client to use for requesting data.
resource_id (int or str): Identifies a specific resource to be retrieved.
querystring (dict): Optional query string parameters.
cache_key (str): Where to cache retrieved data. The cache will be ignored if this is omitted
(neither inspected nor updated).
many (bool): Whether the resource requested is a collection of objects, or a single object.
If false, an empty dict will be returned in cases of failure rather than the default empty list.
traverse_pagination (bool): Whether to traverse pagination or return paginated response..
long_term_cache (bool): Whether to use the long term cache ttl or the standard cache ttl
Returns:
Data returned by the API. When hitting a list endpoint, extracts "results" (list of dict)
returned by DRF-powered APIs.
"""
no_data = [] if many else {}
if not api_config.enabled:
log.warning('%s configuration is disabled.', api_config.API_NAME)
return no_data
if cache_key:
cache_key = '{}.{}'.format(cache_key, resource_id) if resource_id is not None else cache_key
cache_key += '.zpickled'
cached = cache.get(cache_key)
if cached:
return zunpickle(cached)
try:
endpoint = getattr(api, resource)
querystring = querystring if querystring else {}
response = endpoint(resource_id).get(**querystring)
if resource_id is not None:
if fields:
results = get_fields(fields, response)
else:
results = response
elif traverse_pagination:
results = _traverse_pagination(response, endpoint, querystring, no_data)
else:
results = response
except: # pylint: disable=bare-except
log.exception('Failed to retrieve data from the %s API.', api_config.API_NAME)
return no_data
if cache_key:
zdata = zpickle(results)
cache_ttl = api_config.cache_ttl
if long_term_cache:
cache_ttl = api_config.long_term_cache_ttl
cache.set(cache_key, zdata, cache_ttl)
return results
def _traverse_pagination(response, endpoint, querystring, no_data):
"""Traverse a paginated API response.
Extracts and concatenates "results" (list of dict) returned by DRF-powered APIs.
"""
results = response.get('results', no_data)
page = 1
next_page = response.get('next')
while next_page:
page += 1
querystring['page'] = page
response = endpoint.get(**querystring)
results += response.get('results', no_data)
next_page = response.get('next')
return results
| agpl-3.0 | -8,052,240,791,415,435,000 | 34.471154 | 110 | 0.650583 | false |
kgullikson88/General | Feiden.py | 2 | 4640 | from __future__ import division, print_function
import os
import os.path
import pickle
import numpy as np
from pkg_resources import resource_filename
from scipy.interpolate import LinearNDInterpolator as interpnd
try:
import pandas as pd
except ImportError:
pd = None
from isochrones.isochrone import Isochrone
DATADIR = os.getenv('ISOCHRONES',
os.path.expanduser(os.path.join('~', '.isochrones')))
if not os.path.exists(DATADIR):
os.mkdir(DATADIR)
MASTERFILE = '{}/Feiden.h5'.format(DATADIR)
TRI_FILE = '{}/Feiden.tri'.format(DATADIR)
MAXAGES = np.load(resource_filename('isochrones', 'data/dartmouth_maxages.npz'))
MAXAGE = interpnd(MAXAGES['points'], MAXAGES['maxages'])
# def _download_h5():
# """
# Downloads HDF5 file containing Dartmouth grids from Zenodo.
# """
# #url = 'http://zenodo.org/record/12800/files/dartmouth.h5'
# url = 'http://zenodo.org/record/15843/files/dartmouth.h5'
# from six.moves import urllib
# print('Downloading Dartmouth stellar model data (should happen only once)...')
# if os.path.exists(MASTERFILE):
# os.remove(MASTERFILE)
# urllib.request.urlretrieve(url,MASTERFILE)
#def _download_tri():
# """
# Downloads pre-computed triangulation for Dartmouth grids from Zenodo.
# """
# #url = 'http://zenodo.org/record/12800/files/dartmouth.tri'
# #url = 'http://zenodo.org/record/15843/files/dartmouth.tri'
# url = 'http://zenodo.org/record/17627/files/dartmouth.tri'
# from six.moves import urllib
# print('Downloading Dartmouth isochrone pre-computed triangulation (should happen only once...)')
# if os.path.exists(TRI_FILE):
# os.remove(TRI_FILE)
# urllib.request.urlretrieve(url,TRI_FILE)
#if not os.path.exists(MASTERFILE):
# _download_h5()
#if not os.path.exists(TRI_FILE):
# _download_tri()
#Check to see if you have the right dataframe and tri file
#import hashlib
#DF_SHASUM = '0515e83521f03cfe3ab8bafcb9c8187a90fd50c7'
#TRI_SHASUM = 'e05a06c799abae3d526ac83ceeea5e6df691a16d'
#if hashlib.sha1(open(MASTERFILE, 'rb').read()).hexdigest() != DF_SHASUM:
# raise ImportError('You have a wrong/corrupted/outdated Dartmouth DataFrame!' +
# ' Delete {} and try re-importing to download afresh.'.format(MASTERFILE))
#if hashlib.sha1(open(TRI_FILE, 'rb').read()).hexdigest() != TRI_SHASUM:
# raise ImportError('You have a wrong/corrupted/outdated Dartmouth triangulation!' +
# ' Delete {} and try re-importing to download afresh.'.format(TRI_FILE))
#
if pd is not None:
MASTERDF = pd.read_hdf(MASTERFILE, 'df').dropna() #temporary hack
else:
MASTERDF = None
class Feiden_Isochrone(Isochrone):
"""Dotter (2008) Stellar Models, at solar a/Fe and He abundances.
:param bands: (optional)
List of desired photometric bands. Must be a subset of
``['U','B','V','R','I','J','H','K','g','r','i','z','Kepler','D51',
'W1','W2','W3']``, which is the default. W4 is not included
because it does not have a well-measured A(lambda)/A(V).
"""
def __init__(self, bands=None, **kwargs):
df = MASTERDF
log_ages = np.log10(df['Age'])
minage = log_ages.min()
maxage = log_ages.max()
# make copies that claim to have different metallicities. This is a lie, but makes things work.
lowmet = df.copy()
lowmet['feh'] = -0.1
highmet = df.copy()
highmet['feh'] = 0.1
df = pd.concat((df, lowmet, highmet))
mags = {}
if bands is not None:
for band in bands:
try:
if band in ['g', 'r', 'i', 'z']:
mags[band] = df['sdss_{}'.format(band)]
else:
mags[band] = df[band]
except:
if band == 'kep' or band == 'Kepler':
mags[band] = df['Kp']
elif band == 'K':
mags['K'] = df['Ks']
else:
raise
tri = None
try:
f = open(TRI_FILE, 'rb')
tri = pickle.load(f)
except:
f = open(TRI_FILE, 'rb')
tri = pickle.load(f, encoding='latin-1')
finally:
f.close()
Isochrone.__init__(self, m_ini=df['Msun'], age=np.log10(df['Age']),
feh=df['feh'], m_act=df['Msun'], logL=df['logL'],
Teff=10 ** df['logT'], logg=df['logg'], mags=mags,
tri=tri, minage=minage, maxage=maxage, **kwargs)
| gpl-3.0 | 3,022,859,642,342,912,500 | 32.868613 | 103 | 0.586422 | false |
cmcantalupo/geopm | integration/experiment/power_sweep/gen_plot_power_limit.py | 1 | 5090 | #!/usr/bin/env python
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''
Shows balancer chosen power limits on each socket over time.
'''
import pandas
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import sys
import os
import argparse
from experiment import common_args
from experiment import plotting
def plot_lines(traces, label, analysis_dir):
if not os.path.exists(analysis_dir):
os.mkdir(analysis_dir)
fig, axs = plt.subplots(2)
fig.set_size_inches((20, 10))
num_traces = len(traces)
colormap = cm.jet
colors = [colormap(i) for i in np.linspace(0, 1, num_traces*2)]
idx = 0
for path in traces:
node_name = path.split('-')[-1]
df = pandas.read_csv(path, delimiter='|', comment='#')
time = df['TIME']
pl0 = df['MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT-package-0']
pl1 = df['MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT-package-1']
rt0 = df['EPOCH_RUNTIME-package-0'] - df['EPOCH_RUNTIME_NETWORK-package-0']
rt1 = df['EPOCH_RUNTIME-package-1'] - df['EPOCH_RUNTIME_NETWORK-package-1']
plot_tgt = False
try:
tgt = df['POLICY_MAX_EPOCH_RUNTIME']
plot_tgt = True
except:
sys.stdout.write('POLICY_MAX_EPOCH_RUNTIME missing from trace {}; data will be omitted from plot.\n'.format(path))
color0 = colors[idx]
color1 = colors[idx + 1]
idx += 2
axs[0].plot(time, pl0, color=color0)
axs[0].plot(time, pl1, color=color1)
axs[1].plot(time, rt0, label='pkg-0-{}'.format(node_name), color=color0)
axs[1].plot(time, rt1, label='pkg-1-{}'.format(node_name), color=color1)
axs[0].set_title('Per socket power limits')
axs[0].set_ylabel('Power (w)')
axs[1].set_title('Per socket runtimes and target')
axs[1].set_xlabel('Time (s)')
axs[1].set_ylabel('Epoch duration (s)')
if plot_tgt:
# draw target once on top of other lines
axs[1].plot(time, tgt, label='target')
fig.legend(loc='lower right')
agent = ' '.join(traces[0].split('_')[1:3]).title()
fig.suptitle('{} - {}'.format(label, agent), fontsize=20)
dirname = os.path.dirname(traces[0])
if len(traces) == 1:
plot_name = traces[0].split('.')[0] # gadget_power_governor_330_0.trace-epb001
plot_name += '_' + traces[0].split('-')[1]
else:
plot_name = '_'.join(traces[0].split('_')[0:3]) # gadget_power_governor
outfile = os.path.join(analysis_dir, plot_name + '_power_and_runtime.png')
sys.stdout.write('Writing {}...\n'.format(outfile))
fig.savefig(outfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
common_args.add_output_dir(parser)
common_args.add_label(parser)
common_args.add_analysis_dir(parser)
# Positional arg for gathering all traces into a list
# Works for files listed explicitly, or with a glob pattern e.g. *trace*
parser.add_argument('tracepath', metavar='TRACE_PATH', nargs='+',
action='store',
help='path or glob pattern for trace files to analyze')
args, _ = parser.parse_known_args()
# see if paths are valid
for path in args.tracepath:
lp = os.path.join(args.output_dir, path)
if not (os.path.isfile(lp) and os.path.getsize(lp) > 0):
sys.stderr.write('<geopm> Error: No trace data found in {}\n'.format(lp))
sys.exit(1)
plot_lines(args.tracepath, args.label, args.analysis_dir)
| bsd-3-clause | -7,181,980,293,217,151,000 | 36.703704 | 126 | 0.655206 | false |
mkwm/casia | casia/cas/urls.py | 1 | 1180 | # -*- coding: utf-8 -*-
# This file is part of Casia - CAS server based on Django
# Copyright (C) 2013 Mateusz Małek
# Casia is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# You should have received a copy of the GNU Affero General Public License
# along with Casia. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^validate$', 'casia.cas.views.validate', name='cas_validate'),
url(r'^serviceValidate$', 'casia.cas.views.service_validate',
name='cas_service_validate'),
url(r'^login$', 'casia.cas.views.login', name='cas_login'),
url(r'^issue/(?P<ticket_request_id>.*?)$', 'casia.cas.views.issue',
name='cas_issue'),
url(r'^logout$', 'casia.webapp.views.logout', name='cas_logout'),
url(r'^proxyValidate$', 'casia.cas.views.service_validate',
{'require_st': False}, name='cas_proxy_validate'),
url(r'^proxy$', 'casia.cas.views.proxy', name='cas_proxy'),
)
| agpl-3.0 | 4,458,565,512,986,749,400 | 42.666667 | 74 | 0.680237 | false |
LingxiaoJIA/gem5 | configs/common/Caches.py | 20 | 3170 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
from m5.objects import *
# Base implementations of L1, L2, IO and TLB-walker caches. There are
# used in the regressions and also as base components in the
# system-configuration scripts. The values are meant to serve as a
# starting point, and specific parameters can be overridden in the
# specific instantiations.
class L1Cache(BaseCache):
assoc = 2
hit_latency = 2
response_latency = 2
mshrs = 4
tgts_per_mshr = 20
is_top_level = True
class L2Cache(BaseCache):
assoc = 8
hit_latency = 20
response_latency = 20
mshrs = 20
tgts_per_mshr = 12
write_buffers = 8
class IOCache(BaseCache):
assoc = 8
hit_latency = 50
response_latency = 50
mshrs = 20
size = '1kB'
tgts_per_mshr = 12
forward_snoops = False
is_top_level = True
class PageTableWalkerCache(BaseCache):
assoc = 2
hit_latency = 2
response_latency = 2
mshrs = 10
size = '1kB'
tgts_per_mshr = 12
is_top_level = True
| bsd-3-clause | 4,453,312,303,817,368,000 | 37.658537 | 72 | 0.75205 | false |
jagguli/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/prepared.py | 623 | 1032 | from ctypes import c_char
from django.contrib.gis.geos.libgeos import GEOM_PTR, PREPGEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# Prepared geometry constructor and destructors.
geos_prepare = GEOSFunc('GEOSPrepare')
geos_prepare.argtypes = [GEOM_PTR]
geos_prepare.restype = PREPGEOM_PTR
prepared_destroy = GEOSFunc('GEOSPreparedGeom_destroy')
prepared_destroy.argtpes = [PREPGEOM_PTR]
prepared_destroy.restype = None
# Prepared geometry binary predicate support.
def prepared_predicate(func):
func.argtypes= [PREPGEOM_PTR, GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
prepared_contains = prepared_predicate(GEOSFunc('GEOSPreparedContains'))
prepared_contains_properly = prepared_predicate(GEOSFunc('GEOSPreparedContainsProperly'))
prepared_covers = prepared_predicate(GEOSFunc('GEOSPreparedCovers'))
prepared_intersects = prepared_predicate(GEOSFunc('GEOSPreparedIntersects'))
| apache-2.0 | -2,802,300,033,208,854,500 | 40.28 | 89 | 0.805233 | false |
enStratus/unix-agent | extensions/docker/dcmdocker/start_container.py | 3 | 2556 | #
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import dcm.agent.plugins.api.base as plugin_base
import dcmdocker.utils as docker_utils
_g_logger = logging.getLogger(__name__)
class StartContainer(docker_utils.DockerJob):
protocol_arguments = {
"container": ("", True, str, None),
"port_bindings": ("", False, dict, None),
"lxc_conf": ("", False, list, None),
"links": ("", False, dict, None),
"privileged": ("", False, bool, False),
"publish_all_ports": ("", False, bool, False),
"cap_add": ("", False, list, None),
"cap_drop": ("", False, list, None)
}
def __init__(self, conf, job_id, items_map, name, arguments):
super(StartContainer, self).__init__(
conf, job_id, items_map, name, arguments)
def run(self):
# make a list a tuple
if self.args.port_bindings:
for internal_port in self.args.port_bindings:
binding_list = self.args.port_bindings[internal_port]
new_binding_list = []
for bind in binding_list:
host, port = bind
new_binding_list.append((host, port,))
self.args.port_bindings[internal_port] = new_binding_list
self.docker_conn.start(self.args.container,
port_bindings=self.args.port_bindings,
lxc_conf=self.args.lxc_conf,
links=self.args.links,
privileged=self.args.privileged,
publish_all_ports=self.args.publish_all_ports,
cap_add=self.args.cap_add,
cap_drop=self.args.cap_drop,
network_mode="bridge")
return plugin_base.PluginReply(0, reply_type="void")
def load_plugin(conf, job_id, items_map, name, arguments):
return StartContainer(conf, job_id, items_map, name, arguments)
| apache-2.0 | 138,115,258,982,316,510 | 37.727273 | 77 | 0.584116 | false |
MTASZTAKI/ApertusVR | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/deps/v8/third_party/jinja2/constants.py | 1169 | 1626 | # -*- coding: utf-8 -*-
"""
jinja.constants
~~~~~~~~~~~~~~~
Various constants.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate'''
| mit | -5,828,995,049,539,576,000 | 49.8125 | 79 | 0.812423 | false |
callidus/playbot | playbot/bot.py | 1 | 3336 |
from __future__ import absolute_import
from __future__ import print_function
import irc.bot
import logging
import re
import ssl
import time
logger = logging.getLogger(__name__)
class PlayBot(irc.bot.SingleServerIRCBot):
def __init__(self, channels, nickname, password, server, port=6667,
force_ssl=False, server_password=None):
if force_ssl or port == 6697:
factory = irc.connection.Factory(wrapper=ssl.wrap_socket)
super(PlayBot, self).__init__(
[(server, port, server_password)],
nickname, nickname,
connect_factory=factory)
else:
super(PlayBot, self).__init__(
[(server, port, server_password)],
nickname, nickname)
self.commands = {}
self.listeners = []
self.channel_list = channels
self.nickname = nickname
self.password = password
def register_command(self, name, obj):
self.commands[name] = obj
def register_listner(self, obj):
self.listeners.append(obj)
def on_nicknameinuse(self, c, e):
logger.info('Nick previously in use, recovering.')
self.nickname = c.get_nickname() + "_"
c.nick(self.nickname)
time.sleep(1)
logger.info('Nick previously in use, recovered.')
def on_welcome(self, c, e):
for channel in self.channel_list:
c.join(channel)
logger.info('Joined channel %s' % channel)
time.sleep(0.5)
def on_privmsg(self, c, e):
e.target = re.sub("!.*", "", e.source)
self.do_command(e)
def on_pubmsg(self, c, e):
if(e.arguments[0].lower().startswith(self.nickname.lower())):
# Remove Name
e.arguments[0] = re.sub("^[\t:]*", "",
e.arguments[0][len(self.nickname):])
self.do_command(e)
else:
try:
for listener in self.listeners:
msg = listener(self, c, e)
if msg is not None:
self.do_send(e.target, msg)
except Exception as err:
logger.warn('Error in listener: %s', err)
def on_dccmsg(self, c, e):
c.privmsg("You said: " + e.arguments[0])
def do_command(self, e):
msg = e.arguments[0].strip().split(" ")
cmd = msg[0].lower()
arg = msg[1:]
if cmd == 'help':
cmdStr = "commands: help " + " ".join(self.commands.keys())
self.do_send(e.target, cmdStr)
elif cmd in self.commands:
c = self.commands[cmd]
try:
c(self, e, cmd, *arg)
except Exception as err:
logger.warn('Error in command: %s %s', str(cmd), err)
self.do_send(e.target, "Huh?")
else:
nick = re.sub("!.*", "", e.source) # Strip IP from nick
c = self.connection
c.notice(nick, "Not understood: " + cmd)
def do_send(self, channel, msg):
logger.info('Sending "%s" to %s' % (msg, channel))
try:
self.connection.privmsg(channel, msg)
time.sleep(0.5)
except Exception:
logger.exception('Exception sending message:')
self.reconnect()
| mit | 3,843,377,846,509,192,700 | 30.471698 | 72 | 0.525779 | false |
beni55/olympia | apps/api/views.py | 13 | 18404 | """
API views
"""
import hashlib
import itertools
import json
import random
import urllib
from datetime import date, timedelta
from django.core.cache import cache
from django.http import HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import render
from django.template.context import get_standard_processors
from django.utils import encoding, translation
from django.utils.encoding import smart_str
from django.views.decorators.csrf import csrf_exempt
import commonware.log
import jingo
import waffle
from caching.base import cached_with
from piston.utils import rc
from tower import ugettext as _, ugettext_lazy
import amo
import api
from addons.models import Addon, CompatOverride
from amo.decorators import post_required, allow_cross_site_request, json_view
from amo.models import manual_order
from amo.urlresolvers import get_url_prefix
from amo.utils import JSONEncoder
from api.authentication import AMOOAuthAuthentication
from api.forms import PerformanceForm
from api.utils import addon_to_dict, extract_filters
from perf.models import (Performance, PerformanceAppVersions,
PerformanceOSVersion)
from search.views import (AddonSuggestionsAjax, PersonaSuggestionsAjax,
name_query)
from versions.compare import version_int
ERROR = 'error'
OUT_OF_DATE = ugettext_lazy(
u"The API version, {0:.1f}, you are using is not valid. "
u"Please upgrade to the current version {1:.1f} API.")
SEARCHABLE_STATUSES = (amo.STATUS_PUBLIC, amo.STATUS_LITE,
amo.STATUS_LITE_AND_NOMINATED)
xml_env = jingo.env.overlay()
old_finalize = xml_env.finalize
xml_env.finalize = lambda x: amo.helpers.strip_controls(old_finalize(x))
# Hard limit of 30. The buffer is to try for locale-specific add-ons.
MAX_LIMIT, BUFFER = 30, 10
# "New" is arbitrarily defined as 10 days old.
NEW_DAYS = 10
log = commonware.log.getLogger('z.api')
def partition(seq, key):
"""Group a sequence based into buckets by key(x)."""
groups = itertools.groupby(sorted(seq, key=key), key=key)
return ((k, list(v)) for k, v in groups)
def render_xml_to_string(request, template, context={}):
if not jingo._helpers_loaded:
jingo.load_helpers()
for processor in get_standard_processors():
context.update(processor(request))
template = xml_env.get_template(template)
return template.render(context)
def render_xml(request, template, context={}, **kwargs):
"""Safely renders xml, stripping out nasty control characters."""
rendered = render_xml_to_string(request, template, context)
if 'content_type' not in kwargs:
kwargs['content_type'] = 'text/xml'
return HttpResponse(rendered, **kwargs)
def handler403(request):
context = {'error_level': ERROR, 'msg': 'Not allowed'}
return render_xml(request, 'api/message.xml', context, status=403)
def handler404(request):
context = {'error_level': ERROR, 'msg': 'Not Found'}
return render_xml(request, 'api/message.xml', context, status=404)
def handler500(request):
context = {'error_level': ERROR, 'msg': 'Server Error'}
return render_xml(request, 'api/message.xml', context, status=500)
def validate_api_version(version):
"""
We want to be able to deprecate old versions of the API, therefore we check
for a minimum API version before continuing.
"""
if float(version) < api.MIN_VERSION:
return False
if float(version) > api.MAX_VERSION:
return False
return True
def addon_filter(addons, addon_type, limit, app, platform, version,
compat_mode='strict', shuffle=True):
"""
Filter addons by type, application, app version, and platform.
Add-ons that support the current locale will be sorted to front of list.
Shuffling will be applied to the add-ons supporting the locale and the
others separately.
Doing this in the database takes too long, so we in code and wrap it in
generous caching.
"""
APP = app
if addon_type.upper() != 'ALL':
try:
addon_type = int(addon_type)
if addon_type:
addons = [a for a in addons if a.type == addon_type]
except ValueError:
# `addon_type` is ALL or a type id. Otherwise we ignore it.
pass
# Take out personas since they don't have versions.
groups = dict(partition(addons,
lambda x: x.type == amo.ADDON_PERSONA))
personas, addons = groups.get(True, []), groups.get(False, [])
platform = platform.lower()
if platform != 'all' and platform in amo.PLATFORM_DICT:
def f(ps):
return pid in ps or amo.PLATFORM_ALL in ps
pid = amo.PLATFORM_DICT[platform]
addons = [a for a in addons
if f(a.current_version.supported_platforms)]
if version is not None:
vint = version_int(version)
def f_strict(app):
return app.min.version_int <= vint <= app.max.version_int
def f_ignore(app):
return app.min.version_int <= vint
xs = [(a, a.compatible_apps) for a in addons]
# Iterate over addons, checking compatibility depending on compat_mode.
addons = []
for addon, apps in xs:
app = apps.get(APP)
if compat_mode == 'strict':
if app and f_strict(app):
addons.append(addon)
elif compat_mode == 'ignore':
if app and f_ignore(app):
addons.append(addon)
elif compat_mode == 'normal':
# This does a db hit but it's cached. This handles the cases
# for strict opt-in, binary components, and compat overrides.
v = addon.compatible_version(APP.id, version, platform,
compat_mode)
if v: # There's a compatible version.
addons.append(addon)
# Put personas back in.
addons.extend(personas)
# We prefer add-ons that support the current locale.
lang = translation.get_language()
def partitioner(x):
return x.description is not None and (x.description.locale == lang)
groups = dict(partition(addons, partitioner))
good, others = groups.get(True, []), groups.get(False, [])
if shuffle:
random.shuffle(good)
random.shuffle(others)
# If limit=0, we return all addons with `good` coming before `others`.
# Otherwise pad `good` if less than the limit and return the limit.
if limit > 0:
if len(good) < limit:
good.extend(others[:limit - len(good)])
return good[:limit]
else:
good.extend(others)
return good
class APIView(object):
"""
Base view class for all API views.
"""
def __call__(self, request, api_version, *args, **kwargs):
self.version = float(api_version)
self.format = request.REQUEST.get('format', 'xml')
self.content_type = ('text/xml' if self.format == 'xml'
else 'application/json')
self.request = request
if not validate_api_version(api_version):
msg = OUT_OF_DATE.format(self.version, api.CURRENT_VERSION)
return self.render_msg(msg, ERROR, status=403,
content_type=self.content_type)
return self.process_request(*args, **kwargs)
def render_msg(self, msg, error_level=None, *args, **kwargs):
"""
Renders a simple message.
"""
if self.format == 'xml':
return render_xml(
self.request, 'api/message.xml',
{'error_level': error_level, 'msg': msg}, *args, **kwargs)
else:
return HttpResponse(json.dumps({'msg': _(msg)}), *args, **kwargs)
def render(self, template, context):
context['api_version'] = self.version
context['api'] = api
if self.format == 'xml':
return render_xml(self.request, template, context,
content_type=self.content_type)
else:
return HttpResponse(self.render_json(context),
content_type=self.content_type)
def render_json(self, context):
return json.dumps({'msg': _('Not implemented yet.')})
class AddonDetailView(APIView):
@allow_cross_site_request
def process_request(self, addon_id):
try:
addon = Addon.objects.id_or_slug(addon_id).get()
except Addon.DoesNotExist:
return self.render_msg(
'Add-on not found!', ERROR, status=404,
content_type=self.content_type
)
if addon.is_disabled:
return self.render_msg('Add-on disabled.', ERROR, status=404,
content_type=self.content_type)
return self.render_addon(addon)
def render_addon(self, addon):
return self.render('api/addon_detail.xml', {'addon': addon})
def render_json(self, context):
return json.dumps(addon_to_dict(context['addon']), cls=JSONEncoder)
def guid_search(request, api_version, guids):
lang = request.LANG
def guid_search_cache_key(guid):
key = 'guid_search:%s:%s:%s' % (api_version, lang, guid)
return hashlib.md5(smart_str(key)).hexdigest()
guids = [g.strip() for g in guids.split(',')] if guids else []
addons_xml = cache.get_many([guid_search_cache_key(g) for g in guids])
dirty_keys = set()
for g in guids:
key = guid_search_cache_key(g)
if key not in addons_xml:
dirty_keys.add(key)
try:
addon = Addon.objects.get(guid=g, disabled_by_user=False,
status__in=SEARCHABLE_STATUSES)
except Addon.DoesNotExist:
addons_xml[key] = ''
else:
addon_xml = render_xml_to_string(request,
'api/includes/addon.xml',
{'addon': addon,
'api_version': api_version,
'api': api})
addons_xml[key] = addon_xml
cache.set_many(dict((k, v) for k, v in addons_xml.iteritems()
if k in dirty_keys))
compat = (CompatOverride.objects.filter(guid__in=guids)
.transform(CompatOverride.transformer))
addons_xml = [v for v in addons_xml.values() if v]
return render_xml(request, 'api/search.xml',
{'addons_xml': addons_xml,
'total': len(addons_xml),
'compat': compat,
'api_version': api_version, 'api': api})
class SearchView(APIView):
def process_request(self, query, addon_type='ALL', limit=10,
platform='ALL', version=None, compat_mode='strict'):
"""
Query the search backend and serve up the XML.
"""
limit = min(MAX_LIMIT, int(limit))
app_id = self.request.APP.id
# We currently filter for status=PUBLIC for all versions. If
# that changes, the contract for API version 1.5 requires
# that we continue filtering for it there.
filters = {
'app': app_id,
'status': amo.STATUS_PUBLIC,
'is_disabled': False,
'has_version': True,
}
# Opts may get overridden by query string filters.
opts = {
'addon_type': addon_type,
'version': version,
}
# Specific case for Personas (bug 990768): if we search providing the
# Persona addon type (9), don't filter on the platform as Personas
# don't have compatible platforms to filter on.
if addon_type != '9':
opts['platform'] = platform
if self.version < 1.5:
# Fix doubly encoded query strings.
try:
query = urllib.unquote(query.encode('ascii'))
except UnicodeEncodeError:
# This fails if the string is already UTF-8.
pass
query, qs_filters, params = extract_filters(query, opts)
qs = Addon.search().query(or_=name_query(query))
filters.update(qs_filters)
if 'type' not in filters:
# Filter by ALL types, which is really all types except for apps.
filters['type__in'] = list(amo.ADDON_SEARCH_TYPES)
qs = qs.filter(**filters)
qs = qs[:limit]
total = qs.count()
results = []
for addon in qs:
compat_version = addon.compatible_version(app_id,
params['version'],
params['platform'],
compat_mode)
# Specific case for Personas (bug 990768): if we search providing
# the Persona addon type (9), then don't look for a compatible
# version.
if compat_version or addon_type == '9':
addon.compat_version = compat_version
results.append(addon)
if len(results) == limit:
break
else:
# We're excluding this addon because there are no
# compatible versions. Decrement the total.
total -= 1
return self.render('api/search.xml', {
'results': results,
'total': total,
# For caching
'version': version,
'compat_mode': compat_mode,
})
@json_view
def search_suggestions(request):
if waffle.sample_is_active('autosuggest-throttle'):
return HttpResponse(status=503)
cat = request.GET.get('cat', 'all')
suggesterClass = {
'all': AddonSuggestionsAjax,
'themes': PersonaSuggestionsAjax,
}.get(cat, AddonSuggestionsAjax)
items = suggesterClass(request, ratings=True).items
for s in items:
s['rating'] = float(s['rating'])
return {'suggestions': items}
class ListView(APIView):
def process_request(self, list_type='recommended', addon_type='ALL',
limit=10, platform='ALL', version=None,
compat_mode='strict'):
"""
Find a list of new or featured add-ons. Filtering is done in Python
for cache-friendliness and to avoid heavy queries.
"""
limit = min(MAX_LIMIT, int(limit))
APP, platform = self.request.APP, platform.lower()
qs = Addon.objects.listed(APP)
shuffle = True
if list_type in ('by_adu', 'featured'):
qs = qs.exclude(type=amo.ADDON_PERSONA)
if list_type == 'newest':
new = date.today() - timedelta(days=NEW_DAYS)
addons = (qs.filter(created__gte=new)
.order_by('-created'))[:limit + BUFFER]
elif list_type == 'by_adu':
addons = qs.order_by('-average_daily_users')[:limit + BUFFER]
shuffle = False # By_adu is an ordered list.
elif list_type == 'hotness':
# Filter to type=1 so we hit visible_idx. Only extensions have a
# hotness index right now so this is not incorrect.
addons = (qs.filter(type=amo.ADDON_EXTENSION)
.order_by('-hotness'))[:limit + BUFFER]
shuffle = False
else:
ids = Addon.featured_random(APP, self.request.LANG)
addons = manual_order(qs, ids[:limit + BUFFER], 'addons.id')
shuffle = False
args = (addon_type, limit, APP, platform, version, compat_mode,
shuffle)
def f():
return self._process(addons, *args)
return cached_with(addons, f, map(encoding.smart_str, args))
def _process(self, addons, *args):
return self.render('api/list.xml',
{'addons': addon_filter(addons, *args)})
def render_json(self, context):
return json.dumps([addon_to_dict(a) for a in context['addons']],
cls=JSONEncoder)
class LanguageView(APIView):
def process_request(self):
addons = Addon.objects.filter(status=amo.STATUS_PUBLIC,
type=amo.ADDON_LPAPP,
appsupport__app=self.request.APP.id,
disabled_by_user=False).order_by('pk')
return self.render('api/list.xml', {'addons': addons,
'show_localepicker': True})
# pylint: disable-msg=W0613
def redirect_view(request, url):
"""
Redirect all requests that come here to an API call with a view parameter.
"""
dest = '/api/%.1f/%s' % (api.CURRENT_VERSION,
urllib.quote(url.encode('utf-8')))
dest = get_url_prefix().fix(dest)
return HttpResponsePermanentRedirect(dest)
def request_token_ready(request, token):
error = request.GET.get('error', '')
ctx = {'error': error, 'token': token}
return render(request, 'piston/request_token_ready.html', ctx)
@csrf_exempt
@post_required
def performance_add(request):
"""
A wrapper around adding in performance data that is easier than
using the piston API.
"""
# Trigger OAuth.
if not AMOOAuthAuthentication(two_legged=True).is_authenticated(request):
return rc.FORBIDDEN
form = PerformanceForm(request.POST)
if not form.is_valid():
return form.show_error()
os, created = (PerformanceOSVersion
.objects.safer_get_or_create(**form.os_version))
app, created = (PerformanceAppVersions
.objects.safer_get_or_create(**form.app_version))
data = form.performance
data.update({'osversion': os, 'appversion': app})
# Look up on everything except the average time.
result, created = Performance.objects.safer_get_or_create(**data)
result.average = form.cleaned_data['average']
result.save()
log.info('Performance created for add-on: %s, %s' %
(form.cleaned_data['addon_id'], form.cleaned_data['average']))
return rc.ALL_OK
| bsd-3-clause | -2,194,925,147,266,002,400 | 33.529081 | 79 | 0.583243 | false |
fgesora/odoo | addons/l10n_co/__openerp__.py | 256 | 1794 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) David Arnold (devCO).
# Author David Arnold (devCO), [email protected]
# Co-Authors Juan Pablo Aries (devCO), [email protected]
# Hector Ivan Valencia Muñoz (TIX SAS)
# Nhomar Hernandez (Vauxoo)
# Humberto Ochoa (Vauxoo)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Colombian - Accounting',
'version': '0.8',
'category': 'Localization/Account Charts',
'description': 'Colombian Accounting and Tax Preconfiguration',
'author': 'David Arnold BA HSG (devCO)',
'depends': [
'account',
'base_vat',
'account_chart',
],
'data': [
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account_chart_template.xml',
'data/account.tax.template.csv',
'wizard/account_wizard.xml',
],
'demo': [],
'installable': True,
}
| agpl-3.0 | -1,411,559,862,458,768,100 | 34.86 | 78 | 0.597323 | false |
xuyuan/openni.pyx | demo.py | 1 | 1589 | #!/usr/bin/env python
from openni import xn
import cv
cvimage = cv.CreateImageHeader( (640, 480), cv.IPL_DEPTH_8U, 3 )
cvdepth = cv.CreateImageHeader( (640, 480), cv.IPL_DEPTH_16U, 1 )
cvlabel = cv.CreateImageHeader( (640, 480), cv.IPL_DEPTH_16U, 1 )
# v1 = xn.Version(0, 1, 1, 1)
# v2 = xn.Version(0, 1, 1, 1)
# print v1 == v2
context = xn.Context()
node = context.InitFromXmlFile('demo.xml')
# node = context.OpenFileRecording('test.oni')
assert node
depthGenerator = context.FindExistingNode(xn.Node.TYPE_DEPTH)
imageGenerator = context.FindExistingNode(xn.Node.TYPE_IMAGE)
# recorder = context.FindExistingNode(xn.NODE_TYPE_RECORDER)
sceneAnalyzer = context.FindExistingNode(xn.Node.TYPE_SCENE)
try:
while context.WaitAndUpdateAll():
if depthGenerator:
depth = depthGenerator.GetDepthMap()
cv.SetData(cvdepth, depth.tostring())
cv.ShowImage("Depth Stream", cvdepth)
if imageGenerator:
image = imageGenerator.GetRGB24ImageMap()
cv.SetData(cvimage, image.tostring())
cv.CvtColor(cvimage, cvimage, cv.CV_RGB2BGR)
cv.ShowImage("Image Stream", cvimage)
if sceneAnalyzer:
label = sceneAnalyzer.GetLabelMap()
label[label.nonzero()] = 2 ** 15
cv.SetData(cvlabel, label.tostring())
cv.ShowImage("Label", cvlabel)
key = cv.WaitKey(10)
if key == 27:
break
finally:
# release all rescourses
del context
del node
del depthGenerator
del imageGenerator
del sceneAnalyzer
| mit | -3,085,732,931,416,471,000 | 26.396552 | 65 | 0.648206 | false |
rcbops/python-quantumclient-buildpackage | quantum/client/cli_lib.py | 2 | 12654 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# Copyright 2011 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Salvatore Orlando, Citrix
""" Functions providing implementation for CLI commands. """
import logging
import os
import sys
FORMAT = "json"
LOG = logging.getLogger('quantum.client.cli_lib')
class OutputTemplate(object):
""" A class for generating simple templated output.
Based on Python templating mechanism.
Templates can also express attributes on objects, such as network.id;
templates can also be nested, thus allowing for iteration on inner
templates.
Examples:
1) template with class attributes
Name: %(person.name)s \n
Surname: %(person.surname)s \n
2) template with iteration
Telephone numbers: \n
%(phone_numbers|Telephone number:%(number)s)
3) template with iteration and class attributes
Addresses: \n
%(Addresses|Street:%(address.street)s\nNumber%(address.number))
Instances of this class are initialized with a template string and
the dictionary for performing substition. The class implements the
__str__ method, so it can be directly printed.
"""
def __init__(self, template, data):
self._template = template
self.data = data
def __str__(self):
return self._template % self
def __getitem__(self, key):
items = key.split("|")
if len(items) == 1:
return self._make_attribute(key)
else:
# Note(salvatore-orlando): items[0] must be subscriptable
return self._make_list(self.data[items[0]], items[1])
def _make_attribute(self, item):
""" Renders an entity attribute key in the template.
e.g.: entity.attribute
"""
items = item.split('.')
if len(items) == 1:
return self.data[item]
elif len(items) == 2:
return self.data[items[0]][items[1]]
def _make_list(self, items, inner_template):
""" Renders a list key in the template.
e.g.: %(list|item data:%(item))
"""
#make sure list is subscriptable
if not hasattr(items, '__getitem__'):
raise Exception("Element is not iterable")
return "\n".join([inner_template % item for item in items])
class CmdOutputTemplate(OutputTemplate):
""" This class provides templated output for CLI commands.
Extends OutputTemplate loading a different template for each command.
"""
_templates = {
"list_nets": "Virtual Networks for Tenant %(tenant_id)s\n" +
"%(networks|\tNetwork ID: %(id)s)s",
"show_net": "Network ID: %(network.id)s\n" +
"network Name: %(network.name)s",
"create_net": "Created a new Virtual Network with ID: " +
"%(network_id)s\n" +
"for Tenant: %(tenant_id)s",
"update_net": "Updated Virtual Network with ID: %(network.id)s\n" +
"for Tenant: %(tenant_id)s\n",
"delete_net": "Deleted Virtual Network with ID: %(network_id)s\n" +
"for Tenant %(tenant_id)s",
"list_ports": "Ports on Virtual Network: %(network_id)s\n" +
"for Tenant: %(tenant_id)s\n" +
"%(ports|\tLogical Port: %(id)s)s",
"create_port": "Created new Logical Port with ID: %(port_id)s\n" +
"on Virtual Network: %(network_id)s\n" +
"for Tenant: %(tenant_id)s",
"show_port": "Logical Port ID: %(port.id)s\n" +
"administrative State: %(port.state)s\n" +
"interface: %(port.attachment)s\n" +
"on Virtual Network: %(network_id)s\n" +
"for Tenant: %(tenant_id)s",
"update_port": "Updated Logical Port " +
"with ID: %(port.id)s\n" +
"on Virtual Network: %(network_id)s\n" +
"for tenant: %(tenant_id)s",
"delete_port": "Deleted Logical Port with ID: %(port_id)s\n" +
"on Virtual Network: %(network_id)s\n" +
"for Tenant: %(tenant_id)s",
"plug_iface": "Plugged interface %(attachment)s\n" +
"into Logical Port: %(port_id)s\n" +
"on Virtual Network: %(network_id)s\n" +
"for Tenant: %(tenant_id)s",
"unplug_iface": "Unplugged interface from Logical Port:" +
"%(port_id)s\n" +
"on Virtual Network: %(network_id)s\n" +
"for Tenant: %(tenant_id)s"}
def __init__(self, cmd, data):
super(CmdOutputTemplate, self).__init__(self._templates[cmd], data)
def _handle_exception(ex):
LOG.exception(sys.exc_info())
print "Exception:%s - %s" % (sys.exc_info()[0], sys.exc_info()[1])
status_code = None
message = None
# Retrieve dict at 1st element of tuple at last argument
if ex.args and isinstance(ex.args[-1][0], dict):
status_code = ex.args[-1][0].get('status_code', None)
message = ex.args[-1][0].get('message', None)
msg_1 = "Command failed with error code: %s" \
% (status_code or '<missing>')
msg_2 = "Error message:%s" % (message or '<missing>')
LOG.exception(msg_1 + "-" + msg_2)
print msg_1
print msg_2
def prepare_output(cmd, tenant_id, response):
LOG.debug("Preparing output for response:%s", response)
response['tenant_id'] = tenant_id
output = str(CmdOutputTemplate(cmd, response))
LOG.debug("Finished preparing output for command:%s", cmd)
return output
def list_nets(client, *args):
tenant_id = args[0]
res = client.list_networks()
LOG.debug("Operation 'list_networks' executed.")
output = prepare_output("list_nets", tenant_id, res)
print output
def create_net(client, *args):
tenant_id, name = args
data = {'network': {'name': name}}
new_net_id = None
try:
res = client.create_network(data)
new_net_id = res["network"]["id"]
LOG.debug("Operation 'create_network' executed.")
output = prepare_output("create_net", tenant_id,
dict(network_id=new_net_id))
print output
except Exception as ex:
_handle_exception(ex)
def delete_net(client, *args):
tenant_id, network_id = args
try:
client.delete_network(network_id)
LOG.debug("Operation 'delete_network' executed.")
output = prepare_output("delete_net", tenant_id,
dict(network_id=network_id))
print output
except Exception as ex:
_handle_exception(ex)
def show_net(client, *args):
tenant_id, network_id = args
try:
#NOTE(salvatore-orlando) changed for returning exclusively
# output for GET /networks/{net-id} API operation
res = client.show_network_details(network_id)["network"]
LOG.debug("Operation 'show_network_details' executed.")
output = prepare_output("show_net", tenant_id,
dict(network=res))
print output
except Exception as ex:
_handle_exception(ex)
def update_net(client, *args):
tenant_id, network_id, param_data = args
data = {'network': {}}
for kv in param_data.split(","):
k, v = kv.split("=")
data['network'][k] = v
data['network']['id'] = network_id
try:
client.update_network(network_id, data)
LOG.debug("Operation 'update_network' executed.")
# Response has no body. Use data for populating output
output = prepare_output("update_net", tenant_id, data)
print output
except Exception as ex:
_handle_exception(ex)
def list_ports(client, *args):
tenant_id, network_id = args
try:
ports = client.list_ports(network_id)
LOG.debug("Operation 'list_ports' executed.")
data = ports
data['network_id'] = network_id
output = prepare_output("list_ports", tenant_id, data)
print output
except Exception as ex:
_handle_exception(ex)
def create_port(client, *args):
tenant_id, network_id = args
try:
res = client.create_port(network_id)
LOG.debug("Operation 'create_port' executed.")
new_port_id = res["port"]["id"]
output = prepare_output("create_port", tenant_id,
dict(network_id=network_id,
port_id=new_port_id))
print output
except Exception as ex:
_handle_exception(ex)
def delete_port(client, *args):
tenant_id, network_id, port_id = args
try:
client.delete_port(network_id, port_id)
LOG.debug("Operation 'delete_port' executed.")
output = prepare_output("delete_port", tenant_id,
dict(network_id=network_id,
port_id=port_id))
print output
except Exception as ex:
_handle_exception(ex)
return
def show_port(client, *args):
tenant_id, network_id, port_id = args
try:
port = client.show_port_details(network_id, port_id)["port"]
LOG.debug("Operation 'list_port_details' executed.")
#NOTE(salvatore-orland): current API implementation does not
#return attachment with GET operation on port. Once API alignment
#branch is merged, update client to use the detail action.
# (danwent) Until then, just make additonal webservice call.
attach = client.show_port_attachment(network_id, port_id)['attachment']
if "id" in attach:
port['attachment'] = attach['id']
else:
port['attachment'] = '<none>'
output = prepare_output("show_port", tenant_id,
dict(network_id=network_id,
port=port))
print output
except Exception as ex:
_handle_exception(ex)
def update_port(client, *args):
tenant_id, network_id, port_id, param_data = args
data = {'port': {}}
for kv in param_data.split(","):
k, v = kv.split("=")
data['port'][k] = v
data['network_id'] = network_id
data['port']['id'] = port_id
try:
client.update_port(network_id, port_id, data)
LOG.debug("Operation 'udpate_port' executed.")
# Response has no body. Use data for populating output
output = prepare_output("update_port", tenant_id, data)
print output
except Exception as ex:
_handle_exception(ex)
def plug_iface(client, *args):
tenant_id, network_id, port_id, attachment = args
try:
data = {'attachment': {'id': '%s' % attachment}}
client.attach_resource(network_id, port_id, data)
LOG.debug("Operation 'attach_resource' executed.")
output = prepare_output("plug_iface", tenant_id,
dict(network_id=network_id,
port_id=port_id,
attachment=attachment))
print output
except Exception as ex:
_handle_exception(ex)
def unplug_iface(client, *args):
tenant_id, network_id, port_id = args
try:
client.detach_resource(network_id, port_id)
LOG.debug("Operation 'detach_resource' executed.")
output = prepare_output("unplug_iface", tenant_id,
dict(network_id=network_id,
port_id=port_id))
print output
except Exception as ex:
_handle_exception(ex)
| apache-2.0 | -8,090,150,393,951,178,000 | 36.773134 | 79 | 0.566303 | false |
zhjunlang/kbengine | kbe/src/lib/python/Doc/includes/mp_workers.py | 52 | 1586 | import time
import random
from multiprocessing import Process, Queue, current_process, freeze_support
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(current_process().name, func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print('Unordered results:')
for i in range(len(TASKS1)):
print('\t', done_queue.get())
# Add more tasks using `put()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print('\t', done_queue.get())
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freeze_support()
test()
| lgpl-3.0 | 9,187,869,999,894,636,000 | 19.597403 | 75 | 0.605296 | false |
ingadhoc/odoo | addons/base_gengo/wizard/__init__.py | 434 | 1077 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_gengo_translations
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -9,053,746,830,555,276,000 | 43.875 | 78 | 0.618384 | false |
MarkTheF4rth/youtube-dl | youtube_dl/extractor/freesound.py | 192 | 1392 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class FreesoundIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.freesound.org/people/miklovan/sounds/194503/',
'md5': '12280ceb42c81f19a515c745eae07650',
'info_dict': {
'id': '194503',
'ext': 'mp3',
'title': 'gulls in the city.wav',
'uploader': 'miklovan',
'description': 'the sounds of seagulls in the city',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
music_id = mobj.group('id')
webpage = self._download_webpage(url, music_id)
title = self._html_search_regex(
r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
webpage, 'music title', flags=re.DOTALL)
description = self._html_search_regex(
r'<div id="sound_description">(.*?)</div>', webpage, 'description',
fatal=False, flags=re.DOTALL)
return {
'id': music_id,
'title': title,
'url': self._og_search_property('audio', webpage, 'music url'),
'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'),
'description': description,
}
| unlicense | -1,282,292,062,333,956,900 | 34.692308 | 92 | 0.543822 | false |
girving/tensorflow | tensorflow/python/ops/inplace_ops.py | 36 | 6296 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inplace operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
def _inplace_helper(x, i, v, op):
"""Applies an inplace op on (x, i, v).
op is one of gen_array_ops.alias_inplace_update,
gen_array_ops.alias_inplace_add, or gen_array_ops.alias_inplace_sub.
If i is None, x and v must be the same shape. Computes
x op v;
If i is a scalar, x has a rank 1 higher than v's. Computes
x[i, :] op v;
Otherwise, x and v must have the same rank. Computes
x[i, :] op v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
op: alias_inplace_update, alias_inplace_add, or alias_inplace_sub.
Returns:
Returns x.
"""
x = ops.convert_to_tensor(x)
v = ops.convert_to_tensor(v, x.dtype)
if i is None:
# Full tensor.
return array_ops.reshape(
op(array_ops.reshape(x, [1, -1]), [0], array_ops.reshape(v, [1, -1])),
array_ops.shape(x))
i = math_ops.to_int32(i)
if i.get_shape().ndims == 0:
# Single 0-dim update.
return op(x, array_ops.reshape(i, [1]), array_ops.expand_dims(v, 0))
return op(x, i, v)
def alias_inplace_update(x, i, v):
"""Applies an inplace update on input x at index i with value v. Aliases x.
If i is None, x and v must be the same shape. Computes
x = v;
If i is a scalar, x has a rank 1 higher than v's. Computes
x[i, :] = v;
Otherwise, x and v must have the same rank. Computes
x[i, :] = v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns x.
"""
return _inplace_helper(x, i, v, gen_array_ops.inplace_update)
def alias_inplace_add(x, i, v):
"""Applies an inplace add on input x at index i with value v. Aliases x.
If i is None, x and v must be the same shape. Computes
x += v;
If i is a scalar, x has a rank 1 higher than v's. Computes
x[i, :] += v;
Otherwise, x and v must have the same rank. Computes
x[i, :] += v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns x.
"""
return _inplace_helper(x, i, v, gen_array_ops.inplace_add)
def alias_inplace_sub(x, i, v):
"""Applies an inplace sub on input x at index i with value v. Aliases x.
If i is None, x and v must be the same shape. Computes
x -= v;
If i is a scalar, x has a rank 1 higher than v's. Computes
x[i, :] -= v;
Otherwise, x and v must have the same rank. Computes
x[i, :] -= v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns x.
"""
return _inplace_helper(x, i, v, gen_array_ops.inplace_sub)
def empty_like(x, init=None):
"""Returns a non-initialized tensor with the same shape and dtype as x.
Args:
x: A Tensor.
init: Initialize the returned tensor with the default value of
x.dtype(), if True. Otherwise, do not initialize. Defaults to
None.
Returns:
A tensor y, whose dtype and shape are the same as those of x.
y is guaranteed not to be an alias of x. Upon return, y may contain
arbitrary data.
"""
x = ops.convert_to_tensor(x)
return gen_array_ops.empty(array_ops.shape(x), x.dtype, init=init)
def inplace_update(x, i, v):
"""Applies an inplace update on input x at index i with value v.
Note that this function is not actually inplace - it allocates
a copy of x. The utility is not avoiding memory copies but rather
specifying a sparse update.
If i is None, x and v must be the same shape. Computes
y = x; y = v;
If i is a scalar, x has a rank 1 higher than v's. Computes
y = x; y[i, :] = v;
Otherwise, x and v must have the same rank. Computes
y = x; y[i, :] = v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns y, which is guaranteed not to be an alias of x.
"""
return alias_inplace_update(gen_array_ops.deep_copy(x), i, v)
def inplace_add(x, i, v):
"""Applies an inplace add on input x at index i with value v.
Note that this function is not actually inplace - it allocates
a copy of x. The utility is not avoiding memory copies but rather
specifying a sparse update.
If i is None, x and v must be the same shape. Computes
y = x; y += v;
If i is a scalar, x has a rank 1 higher than v's. Computes
y = x; y[i, :] += v;
Otherwise, x and v must have the same rank. Computes
y = x; y[i, :] += v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns y, which is guaranteed not to be an alias of x.
"""
return alias_inplace_add(gen_array_ops.deep_copy(x), i, v)
def inplace_sub(x, i, v):
"""Applies an inplace sub on input x at index i with value v.
Note that this function is not actually inplace - it allocates
a copy of x. The utility is not avoiding memory copies but rather
specifying a sparse update.
If i is None, x and v must be the same shape. Computes
y = x; y -= v;
If i is a scalar, x has a rank 1 higher than v's. Computes
y = x; y[i, :] -= v;
Otherwise, x and v must have the same rank. Computes
y = x; y[i, :] -= v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns y, which is guaranteed not to be an alias of x.
"""
return alias_inplace_sub(gen_array_ops.deep_copy(x), i, v)
empty = gen_array_ops.empty
| apache-2.0 | -4,495,402,618,330,529,300 | 26.735683 | 80 | 0.641995 | false |
fevxie/connector-magento | __unported__/magentoerpconnect_options_active/__openerp__.py | 11 | 1420 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Markus Schneider
# Copyright 2014 initOS GmbH & Co. KG
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Magento Connector Option Active Products',
'version': '1.0.0',
'category': 'Connector',
'depends': ['magentoerpconnect',
],
'external_dependencies': {},
'author': "initOS GmbH & Co. KG,Odoo Community Association (OCA)",
'license': 'AGPL-3',
'website': 'http://www.odoo-magento-connector.com',
'description': """
""",
'images': [],
'demo': [],
'data': ['magento_model_view.xml',
],
'installable': False,
'application': False,
}
| agpl-3.0 | -6,390,143,954,784,893,000 | 34.5 | 78 | 0.59507 | false |
candy7393/VTK | ThirdParty/Twisted/twisted/python/roots.py | 68 | 7311 | # -*- test-case-name: twisted.test.test_roots -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted Python Roots: an abstract hierarchy representation for Twisted.
Maintainer: Glyph Lefkowitz
"""
# System imports
import types
from twisted.python import reflect
class NotSupportedError(NotImplementedError):
"""
An exception meaning that the tree-manipulation operation
you're attempting to perform is not supported.
"""
class Request:
"""I am an abstract representation of a request for an entity.
I also function as the response. The request is responded to by calling
self.write(data) until there is no data left and then calling
self.finish().
"""
# This attribute should be set to the string name of the protocol being
# responded to (e.g. HTTP or FTP)
wireProtocol = None
def write(self, data):
"""Add some data to the response to this request.
"""
raise NotImplementedError("%s.write" % reflect.qual(self.__class__))
def finish(self):
"""The response to this request is finished; flush all data to the network stream.
"""
raise NotImplementedError("%s.finish" % reflect.qual(self.__class__))
class Entity:
"""I am a terminal object in a hierarchy, with no children.
I represent a null interface; certain non-instance objects (strings and
integers, notably) are Entities.
Methods on this class are suggested to be implemented, but are not
required, and will be emulated on a per-protocol basis for types which do
not handle them.
"""
def render(self, request):
"""
I produce a stream of bytes for the request, by calling request.write()
and request.finish().
"""
raise NotImplementedError("%s.render" % reflect.qual(self.__class__))
class Collection:
"""I represent a static collection of entities.
I contain methods designed to represent collections that can be dynamically
created.
"""
def __init__(self, entities=None):
"""Initialize me.
"""
if entities is not None:
self.entities = entities
else:
self.entities = {}
def getStaticEntity(self, name):
"""Get an entity that was added to me using putEntity.
This method will return 'None' if it fails.
"""
return self.entities.get(name)
def getDynamicEntity(self, name, request):
"""Subclass this to generate an entity on demand.
This method should return 'None' if it fails.
"""
def getEntity(self, name, request):
"""Retrieve an entity from me.
I will first attempt to retrieve an entity statically; static entities
will obscure dynamic ones. If that fails, I will retrieve the entity
dynamically.
If I cannot retrieve an entity, I will return 'None'.
"""
ent = self.getStaticEntity(name)
if ent is not None:
return ent
ent = self.getDynamicEntity(name, request)
if ent is not None:
return ent
return None
def putEntity(self, name, entity):
"""Store a static reference on 'name' for 'entity'.
Raises a KeyError if the operation fails.
"""
self.entities[name] = entity
def delEntity(self, name):
"""Remove a static reference for 'name'.
Raises a KeyError if the operation fails.
"""
del self.entities[name]
def storeEntity(self, name, request):
"""Store an entity for 'name', based on the content of 'request'.
"""
raise NotSupportedError("%s.storeEntity" % reflect.qual(self.__class__))
def removeEntity(self, name, request):
"""Remove an entity for 'name', based on the content of 'request'.
"""
raise NotSupportedError("%s.removeEntity" % reflect.qual(self.__class__))
def listStaticEntities(self):
"""Retrieve a list of all name, entity pairs that I store references to.
See getStaticEntity.
"""
return self.entities.items()
def listDynamicEntities(self, request):
"""A list of all name, entity that I can generate on demand.
See getDynamicEntity.
"""
return []
def listEntities(self, request):
"""Retrieve a list of all name, entity pairs I contain.
See getEntity.
"""
return self.listStaticEntities() + self.listDynamicEntities(request)
def listStaticNames(self):
"""Retrieve a list of the names of entities that I store references to.
See getStaticEntity.
"""
return self.entities.keys()
def listDynamicNames(self):
"""Retrieve a list of the names of entities that I store references to.
See getDynamicEntity.
"""
return []
def listNames(self, request):
"""Retrieve a list of all names for entities that I contain.
See getEntity.
"""
return self.listStaticNames()
class ConstraintViolation(Exception):
"""An exception raised when a constraint is violated.
"""
class Constrained(Collection):
"""A collection that has constraints on its names and/or entities."""
def nameConstraint(self, name):
"""A method that determines whether an entity may be added to me with a given name.
If the constraint is satisfied, return 1; if the constraint is not
satisfied, either return 0 or raise a descriptive ConstraintViolation.
"""
return 1
def entityConstraint(self, entity):
"""A method that determines whether an entity may be added to me.
If the constraint is satisfied, return 1; if the constraint is not
satisfied, either return 0 or raise a descriptive ConstraintViolation.
"""
return 1
def reallyPutEntity(self, name, entity):
Collection.putEntity(self, name, entity)
def putEntity(self, name, entity):
"""Store an entity if it meets both constraints.
Otherwise raise a ConstraintViolation.
"""
if self.nameConstraint(name):
if self.entityConstraint(entity):
self.reallyPutEntity(name, entity)
else:
raise ConstraintViolation("Entity constraint violated.")
else:
raise ConstraintViolation("Name constraint violated.")
class Locked(Constrained):
"""A collection that can be locked from adding entities."""
locked = 0
def lock(self):
self.locked = 1
def entityConstraint(self, entity):
return not self.locked
class Homogenous(Constrained):
"""A homogenous collection of entities.
I will only contain entities that are an instance of the class or type
specified by my 'entityType' attribute.
"""
entityType = types.InstanceType
def entityConstraint(self, entity):
if isinstance(entity, self.entityType):
return 1
else:
raise ConstraintViolation("%s of incorrect type (%s)" %
(entity, self.entityType))
def getNameType(self):
return "Name"
def getEntityType(self):
return self.entityType.__name__
| bsd-3-clause | 2,673,293,966,575,226,000 | 28.479839 | 91 | 0.63425 | false |
amenonsen/ansible | lib/ansible/modules/cloud/openstack/os_coe_cluster.py | 31 | 8797 | #!/usr/bin/python
# Copyright (c) 2018 Catalyst IT Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_coe_cluster
short_description: Add/Remove COE cluster from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.8"
author: "Feilong Wang (@flwang)"
description:
- Add or Remove COE cluster from the OpenStack Container Infra service.
options:
availability_zone:
description:
- Ignored. Present for backwards compatibility
cluster_template_id:
description:
- The template ID of cluster template.
required: true
discovery_url:
description:
- Url used for cluster node discovery
docker_volume_size:
description:
- The size in GB of the docker volume
flavor_id:
description:
- The flavor of the minion node for this ClusterTemplate
keypair:
description:
- Name of the keypair to use.
labels:
description:
- One or more key/value pairs
master_flavor_id:
description:
- The flavor of the master node for this ClusterTemplate
master_count:
description:
- The number of master nodes for this cluster
default: 1
name:
description:
- Name that has to be given to the cluster template
required: true
node_count:
description:
- The number of nodes for this cluster
default: 1
state:
description:
- Indicate desired state of the resource.
choices: [present, absent]
default: present
timeout:
description:
- Timeout for creating the cluster in minutes. Default to 60 mins
if not set
default: 60
requirements: ["openstacksdk"]
'''
RETURN = '''
id:
description: The cluster UUID.
returned: On success when I(state) is 'present'
type: str
sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
cluster:
description: Dictionary describing the cluster.
returned: On success when I(state) is 'present'
type: complex
contains:
api_address:
description:
- Api address of cluster master node
type: string
sample: https://172.24.4.30:6443
cluster_template_id:
description: The cluster_template UUID
type: string
sample: '7b1418c8-cea8-48fc-995d-52b66af9a9aa'
coe_version:
description:
- Version of the COE software currently running in this cluster
type: string
sample: v1.11.1
container_version:
description:
- Version of the container software. Example: docker version.
type: string
sample: 1.12.6
created_at:
description:
- The time in UTC at which the cluster is created
type: datetime
sample: 2018-08-16T10:29:45+00:00
create_timeout:
description:
- Timeout for creating the cluster in minutes. Default to 60 if
not set.
type: int
sample: 60
discovery_url:
description:
- Url used for cluster node discovery
type: string
sample: https://discovery.etcd.io/a42ee38e7113f31f4d6324f24367aae5
faults:
description:
- Fault info collected from the Heat resources of this cluster
type: dict
sample: {'0': 'ResourceInError: resources[0].resources...'}
flavor_id:
description:
- The flavor of the minion node for this cluster
type: string
sample: c1.c1r1
keypair:
description:
- Name of the keypair to use.
type: string
sample: mykey
labels:
description: One or more key/value pairs
type: dict
sample: {'key1': 'value1', 'key2': 'value2'}
master_addresses:
description:
- IP addresses of cluster master nodes
type: list
sample: ['172.24.4.5']
master_count:
description:
- The number of master nodes for this cluster.
type: int
sample: 1
master_flavor_id:
description:
- The flavor of the master node for this cluster
type: string
sample: c1.c1r1
name:
description:
- Name that has to be given to the cluster
type: string
sample: k8scluster
node_addresses:
description:
- IP addresses of cluster slave nodes
type: list
sample: ['172.24.4.8']
node_count:
description:
- The number of master nodes for this cluster.
type: int
sample: 1
stack_id:
description:
- Stack id of the Heat stack
type: string
sample: '07767ec6-85f5-44cb-bd63-242a8e7f0d9d'
status:
description: Status of the cluster from the heat stack
type: string
sample: 'CREATE_COMLETE'
status_reason:
description:
- Status reason of the cluster from the heat stack
type: string
sample: 'Stack CREATE completed successfully'
updated_at:
description:
- The time in UTC at which the cluster is updated
type: datetime
sample: '2018-08-16T10:39:25+00:00'
uuid:
description:
- Unique UUID for this cluster
type: string
sample: '86246a4d-a16c-4a58-9e96ad7719fe0f9d'
'''
EXAMPLES = '''
# Create a new Kubernetes cluster
- os_coe_cluster:
name: k8s
cluster_template_id: k8s-ha
keypair: mykey
master_count: 3
node_count: 5
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _parse_labels(labels):
if isinstance(labels, str):
labels_dict = {}
for kv_str in labels.split(","):
k, v = kv_str.split("=")
labels_dict[k] = v
return labels_dict
if not labels:
return {}
return labels
def main():
argument_spec = openstack_full_argument_spec(
cluster_template_id=dict(required=True),
discovery_url=dict(default=None),
docker_volume_size=dict(type='int'),
flavor_id=dict(default=None),
keypair=dict(default=None),
labels=dict(default=None, type='raw'),
master_count=dict(type='int', default=1),
master_flavor_id=dict(default=None),
name=dict(required=True),
node_count=dict(type='int', default=1),
state=dict(default='present', choices=['absent', 'present']),
timeout=dict(type='int', default=60),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
params = module.params.copy()
state = module.params['state']
name = module.params['name']
cluster_template_id = module.params['cluster_template_id']
kwargs = dict(
discovery_url=module.params['discovery_url'],
docker_volume_size=module.params['docker_volume_size'],
flavor_id=module.params['flavor_id'],
keypair=module.params['keypair'],
labels=_parse_labels(params['labels']),
master_count=module.params['master_count'],
master_flavor_id=module.params['master_flavor_id'],
node_count=module.params['node_count'],
create_timeout=module.params['timeout'],
)
sdk, cloud = openstack_cloud_from_module(module)
try:
changed = False
cluster = cloud.get_coe_cluster(name_or_id=name, filters={'cluster_template_id': cluster_template_id})
if state == 'present':
if not cluster:
cluster = cloud.create_coe_cluster(name, cluster_template_id=cluster_template_id, **kwargs)
changed = True
else:
changed = False
module.exit_json(changed=changed, cluster=cluster, id=cluster['uuid'])
elif state == 'absent':
if not cluster:
module.exit_json(changed=False)
else:
cloud.delete_coe_cluster(name)
module.exit_json(changed=True)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == "__main__":
main()
| gpl-3.0 | 4,662,569,939,266,239,000 | 30.195035 | 125 | 0.596908 | false |
laijingtao/landlab | landlab/components/flexure/funcs.py | 6 | 7655 | #!/usr/bin/env python
import numpy as np
import scipy.special
from multiprocessing import Pool
_POISSON = .25
_N_PROCS = 4
def get_flexure_parameter(h, E, n_dim, gamma_mantle=33000.):
"""
Calculate the flexure parameter based on some physical constants. *h* is
the Effective elastic thickness of Earth's crust (m), *E* is Young's
Modulus, and *n_dim* is the number of spatial dimensions for which the
flexure parameter is used. The number of dimension must be either 1, or
2.
Examples
--------
>>> from __future__ import print_function
>>> from landlab.components.flexure import get_flexure_parameter
>>> eet = 65000.
>>> youngs = 7e10
>>> alpha = get_flexure_parameter(eet, youngs, 1)
>>> print('%.3f' % round(alpha, 3))
119965.926
>>> alpha = get_flexure_parameter(eet, youngs, 2)
>>> print('%.2f' % alpha)
84828.72
"""
D = E * pow(h, 3) / 12. / (1. - pow(_POISSON, 2))
assert(n_dim == 1 or n_dim == 2)
if n_dim == 2:
alpha = pow(D / gamma_mantle, .25)
else:
alpha = pow(4. * D / gamma_mantle, .25)
return alpha
def _calculate_distances(locs, coords):
if isinstance(locs[0], (float, int)):
return np.sqrt(pow(coords[0] - locs[0], 2) +
pow(coords[1] - locs[1], 2))
else:
r = pow(coords[0][:, np.newaxis] - locs[0], 2)
r += pow(coords[1][:, np.newaxis] - locs[1], 2)
return np.sqrt(r, out=r)
def _calculate_deflections(load, locs, coords, alpha, out=None,
gamma_mantle=33000.):
c = - load / (2. * np.pi * gamma_mantle * pow(alpha, 2.))
r = _calculate_distances(locs, coords) / alpha
if isinstance(c, (float, int)):
return np.multiply(scipy.special.kei(r), c, out=out)
else:
scipy.special.kei(r, out=r)
np.multiply(r, c[np.newaxis, :], out=r)
return np.sum(r, axis=1, out=out)
def subside_point_load(load, loc, coords, params=None, out=None):
"""Calculate deflection at points due a point load.
Calculate deflections on a grid, defined by the points in the *coords*
tuple, due to a point load of magnitude *load* applied at *loc*.
*x* and *y* are the x and y coordinates of each node of the solution
grid (in meters). The scalars *eet* and *youngs* define the crustal
properties.
Parameters
----------
load : float
Magnitude of the point load.
loc : float or tuple
Location of the load as either a scalar or as (*x*, *y*)
coords : ndarray
Array of points to calculate deflections at
params : dict-like
Physical parameters used for deflection calculation. Valid keys are
- *eet*: Effective elastic thickness
- *youngs*: Young's modulus
out : ndarray, optional
Array to put deflections into.
Returns
-------
out : ndarray
Array of deflections.
Examples
--------
>>> from landlab.components.flexure import subside_point_load
>>> params = dict(eet=65000., youngs=7e10)
>>> load = 1e9
Define a unifrom rectilinear grid.
>>> x = np.arange(0, 10000, 100.)
>>> y = np.arange(0, 5000, 100.)
>>> (x, y) = np.meshgrid(x, y)
>>> x.shape = (x.size, )
>>> y.shape = (y.size, )
Calculate deflections due to a load applied at position (5000., 2500.).
>>> import six
>>> x = np.arange(0, 10000, 1000.)
>>> y = np.arange(0, 5000, 1000.)
>>> (x, y) = np.meshgrid(x, y)
>>> x.shape = (x.size, )
>>> y.shape = (y.size, )
>>> dz = subside_point_load(load, (5000., 2500.), (x, y), params=params)
>>> print('%.5g' % round(dz.sum(), 9))
2.6267e-05
>>> six.print_(round(dz.min(), 9))
5.24e-07
>>> six.print_(round(dz.max(), 9))
5.26e-07
>>> dz = subside_point_load((1e9, 1e9), ((5000., 5000.), (2500., 2500.)),
... (x, y), params=params)
>>> six.print_(round(dz.min(), 9) / 2.)
5.235e-07
>>> six.print_(round(dz.max(), 9) / 2.)
5.265e-07
"""
params = params or dict(eet=6500., youngs=7.e10)
eet, youngs = params['eet'], params['youngs']
gamma_mantle = params.get('gamma_mantle', 33000.)
assert(len(loc) in [1, 2])
assert(len(coords) == len(loc))
assert(len(coords[0].shape) == 1)
if not isinstance(load, (int, float, np.ndarray)):
load = np.array(load)
if out is None:
out = np.empty(coords[0].size, dtype=np.float)
alpha = get_flexure_parameter(eet, youngs, len(loc),
gamma_mantle=gamma_mantle)
if len(loc) == 2:
_calculate_deflections(load, loc, coords, alpha, out=out,
gamma_mantle=gamma_mantle)
else:
c = load / (2. * alpha * gamma_mantle)
r = abs(coords[0] - loc[0]) / alpha
out[:] = c * np.exp(-r) * (np.cos(r) + np.sin(r))
return out
def subside_point_loads(loads, locs, coords, params=None, deflection=None,
n_procs=1):
"""Calculate deflection at points due multiple point loads.
Calculate lithospheric deflections due to *loads* at coordinates
specified by the *locs* tuple. *coords* is a tuple that gives the
coordinates of each point where deflections are calculated; *locs* is
positions of the applied loads. Since this function calculates the 1D
or 2D flexure equation, *coords* and *locs* must have either one or two
elements.
Parameters
----------
load : array_like
Magnitude of the point loads.
loc : tuple of (loc_x, loc_y)
Load locations.
coords : ndarray
Array of points to calculate deflections at
params : dict-like
Physical parameters used for deflection calculation. Valid keys are
- *eet*: Effective elastic thickness
- *youngs*: Young's modulus
- *gamma_mantle*: Specific weight of the mantle
out : ndarray, optional
Array to put deflections into.
Returns
-------
out : ndarray
Array of deflections.
"""
params = params or dict(eet=6500., youngs=7.e10)
eet, youngs = params['eet'], params['youngs']
gamma_mantle = params.get('gamma_mantle', 33000.)
if deflection is None:
deflection = np.empty(coords[0].size, dtype=np.float)
assert(len(coords) in [1, 2])
assert(len(locs) == len(coords))
assert(loads.size == locs[0].size)
if n_procs > 1:
_subside_in_parallel(deflection, loads, locs, coords, eet, youngs,
gamma_mantle, n_procs=n_procs)
else:
for index in loads.nonzero()[0]:
loc = [dim.flat[index] for dim in locs]
deflection += subside_point_load(loads.flat[index], loc,
coords, eet, youngs,
gamma_mantle)
return deflection
def _subside_point_load_helper(args):
return subside_point_load(*args)
def _subside_in_parallel(dz, loads, locs, coords, eet, youngs, gamma_mantle,
n_procs=4):
args = []
for index in loads.nonzero()[0]:
loc = (locs[0].flat[index], locs[1].flat[index])
args.append((loads.flat[index], loc, coords, eet, youngs,
gamma_mantle))
pool = Pool(processes=n_procs)
results = pool.map(_subside_point_load_helper, args)
for result in results:
try:
dz += result
except ValueError:
result.shape = dz.shape
dz += result
if __name__ == '__main__':
import doctest
doctest.testmod()
| mit | 2,800,048,137,220,816,400 | 29.991903 | 77 | 0.574135 | false |
numenta-ci/nupic | scripts/run_opf_experiment.py | 37 | 1210 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This script is a command-line client of Online Prediction Framework (OPF).
It executes a single experiment.
"""
from nupic.frameworks.opf.experiment_runner import main
if __name__ == "__main__":
main()
| agpl-3.0 | 1,228,874,628,268,945,400 | 36.8125 | 77 | 0.661157 | false |
mateon1/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/client_for_testing.py | 451 | 39706 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket client utility for testing.
This module contains helper methods for performing handshake, frame
sending/receiving as a WebSocket client.
This is code for testing mod_pywebsocket. Keep this code independent from
mod_pywebsocket. Don't import e.g. Stream class for generating frame for
testing. Using util.hexify, etc. that are not related to protocol processing
is allowed.
Note:
This code is far from robust, e.g., we cut corners in handshake.
"""
import base64
import errno
import logging
import os
import random
import re
import socket
import struct
import time
from mod_pywebsocket import common
from mod_pywebsocket import util
DEFAULT_PORT = 80
DEFAULT_SECURE_PORT = 443
# Opcodes introduced in IETF HyBi 01 for the new framing format
OPCODE_CONTINUATION = 0x0
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
# Strings used for handshake
_UPGRADE_HEADER = 'Upgrade: websocket\r\n'
_UPGRADE_HEADER_HIXIE75 = 'Upgrade: WebSocket\r\n'
_CONNECTION_HEADER = 'Connection: Upgrade\r\n'
WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
# Status codes
STATUS_NORMAL_CLOSURE = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA = 1003
STATUS_NO_STATUS_RECEIVED = 1005
STATUS_ABNORMAL_CLOSURE = 1006
STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_MANDATORY_EXT = 1010
STATUS_INTERNAL_ENDPOINT_ERROR = 1011
STATUS_TLS_HANDSHAKE = 1015
# Extension tokens
_DEFLATE_FRAME_EXTENSION = 'deflate-frame'
# TODO(bashi): Update after mux implementation finished.
_MUX_EXTENSION = 'mux_DO_NOT_USE'
_PERMESSAGE_DEFLATE_EXTENSION = 'permessage-deflate'
def _method_line(resource):
return 'GET %s HTTP/1.1\r\n' % resource
def _sec_origin_header(origin):
return 'Sec-WebSocket-Origin: %s\r\n' % origin.lower()
def _origin_header(origin):
# 4.1 13. concatenation of the string "Origin:", a U+0020 SPACE character,
# and the /origin/ value, converted to ASCII lowercase, to /fields/.
return 'Origin: %s\r\n' % origin.lower()
def _format_host_header(host, port, secure):
# 4.1 9. Let /hostport/ be an empty string.
# 4.1 10. Append the /host/ value, converted to ASCII lowercase, to
# /hostport/
hostport = host.lower()
# 4.1 11. If /secure/ is false, and /port/ is not 80, or if /secure/
# is true, and /port/ is not 443, then append a U+003A COLON character
# (:) followed by the value of /port/, expressed as a base-ten integer,
# to /hostport/
if ((not secure and port != DEFAULT_PORT) or
(secure and port != DEFAULT_SECURE_PORT)):
hostport += ':' + str(port)
# 4.1 12. concatenation of the string "Host:", a U+0020 SPACE
# character, and /hostport/, to /fields/.
return 'Host: %s\r\n' % hostport
# TODO(tyoshino): Define a base class and move these shared methods to that.
def receive_bytes(socket, length):
received_bytes = []
remaining = length
while remaining > 0:
new_received_bytes = socket.recv(remaining)
if not new_received_bytes:
raise Exception(
'Connection closed before receiving requested length '
'(requested %d bytes but received only %d bytes)' %
(length, length - remaining))
received_bytes.append(new_received_bytes)
remaining -= len(new_received_bytes)
return ''.join(received_bytes)
# TODO(tyoshino): Now the WebSocketHandshake class diverts these methods. We
# should move to HTTP parser as specified in RFC 6455. For HyBi 00 and
# Hixie 75, pack these methods as some parser class.
def _read_fields(socket):
# 4.1 32. let /fields/ be a list of name-value pairs, initially empty.
fields = {}
while True:
# 4.1 33. let /name/ and /value/ be empty byte arrays
name = ''
value = ''
# 4.1 34. read /name/
name = _read_name(socket)
if name is None:
break
# 4.1 35. read spaces
# TODO(tyoshino): Skip only one space as described in the spec.
ch = _skip_spaces(socket)
# 4.1 36. read /value/
value = _read_value(socket, ch)
# 4.1 37. read a byte from the server
ch = receive_bytes(socket, 1)
if ch != '\n': # 0x0A
raise Exception(
'Expected LF but found %r while reading value %r for header '
'%r' % (ch, name, value))
# 4.1 38. append an entry to the /fields/ list that has the name
# given by the string obtained by interpreting the /name/ byte
# array as a UTF-8 stream and the value given by the string
# obtained by interpreting the /value/ byte array as a UTF-8 byte
# stream.
fields.setdefault(name, []).append(value)
# 4.1 39. return to the "Field" step above
return fields
def _read_name(socket):
# 4.1 33. let /name/ be empty byte arrays
name = ''
while True:
# 4.1 34. read a byte from the server
ch = receive_bytes(socket, 1)
if ch == '\r': # 0x0D
return None
elif ch == '\n': # 0x0A
raise Exception(
'Unexpected LF when reading header name %r' % name)
elif ch == ':': # 0x3A
return name
elif ch >= 'A' and ch <= 'Z': # range 0x31 to 0x5A
ch = chr(ord(ch) + 0x20)
name += ch
else:
name += ch
def _skip_spaces(socket):
# 4.1 35. read a byte from the server
while True:
ch = receive_bytes(socket, 1)
if ch == ' ': # 0x20
continue
return ch
def _read_value(socket, ch):
# 4.1 33. let /value/ be empty byte arrays
value = ''
# 4.1 36. read a byte from server.
while True:
if ch == '\r': # 0x0D
return value
elif ch == '\n': # 0x0A
raise Exception(
'Unexpected LF when reading header value %r' % value)
else:
value += ch
ch = receive_bytes(socket, 1)
def read_frame_header(socket):
received = receive_bytes(socket, 2)
first_byte = ord(received[0])
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
second_byte = ord(received[1])
mask = (second_byte >> 7) & 1
payload_length = second_byte & 0x7f
if mask != 0:
raise Exception(
'Mask bit must be 0 for frames coming from server')
if payload_length == 127:
extended_payload_length = receive_bytes(socket, 8)
payload_length = struct.unpack(
'!Q', extended_payload_length)[0]
if payload_length > 0x7FFFFFFFFFFFFFFF:
raise Exception('Extended payload length >= 2^63')
elif payload_length == 126:
extended_payload_length = receive_bytes(socket, 2)
payload_length = struct.unpack(
'!H', extended_payload_length)[0]
return fin, rsv1, rsv2, rsv3, opcode, payload_length
class _TLSSocket(object):
"""Wrapper for a TLS connection."""
def __init__(self, raw_socket):
self._ssl = socket.ssl(raw_socket)
def send(self, bytes):
return self._ssl.write(bytes)
def recv(self, size=-1):
return self._ssl.read(size)
def close(self):
# Nothing to do.
pass
class HttpStatusException(Exception):
"""This exception will be raised when unexpected http status code was
received as a result of handshake.
"""
def __init__(self, name, status):
super(HttpStatusException, self).__init__(name)
self.status = status
class WebSocketHandshake(object):
"""Opening handshake processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
def handshake(self, socket):
"""Handshake WebSocket.
Raises:
Exception: handshake failed.
"""
self._socket = socket
request_line = _method_line(self._options.resource)
self._logger.debug('Opening handshake Request-Line: %r', request_line)
self._socket.sendall(request_line)
fields = []
fields.append(_UPGRADE_HEADER)
fields.append(_CONNECTION_HEADER)
fields.append(_format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls))
if self._options.version is 8:
fields.append(_sec_origin_header(self._options.origin))
else:
fields.append(_origin_header(self._options.origin))
original_key = os.urandom(16)
key = base64.b64encode(original_key)
self._logger.debug(
'Sec-WebSocket-Key: %s (%s)', key, util.hexify(original_key))
fields.append('Sec-WebSocket-Key: %s\r\n' % key)
fields.append('Sec-WebSocket-Version: %d\r\n' % self._options.version)
# Setting up extensions.
if len(self._options.extensions) > 0:
fields.append('Sec-WebSocket-Extensions: %s\r\n' %
', '.join(self._options.extensions))
self._logger.debug('Opening handshake request headers: %r', fields)
for field in fields:
self._socket.sendall(field)
self._socket.sendall('\r\n')
self._logger.info('Sent opening handshake request')
field = ''
while True:
ch = receive_bytes(self._socket, 1)
field += ch
if ch == '\n':
break
self._logger.debug('Opening handshake Response-Line: %r', field)
if len(field) < 7 or not field.endswith('\r\n'):
raise Exception('Wrong status line: %r' % field)
m = re.match('[^ ]* ([^ ]*) .*', field)
if m is None:
raise Exception(
'No HTTP status code found in status line: %r' % field)
code = m.group(1)
if not re.match('[0-9][0-9][0-9]', code):
raise Exception(
'HTTP status code %r is not three digit in status line: %r' %
(code, field))
if code != '101':
raise HttpStatusException(
'Expected HTTP status code 101 but found %r in status line: '
'%r' % (code, field), int(code))
fields = _read_fields(self._socket)
ch = receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise Exception('Expected LF but found: %r' % ch)
self._logger.debug('Opening handshake response headers: %r', fields)
# Check /fields/
if len(fields['upgrade']) != 1:
raise Exception(
'Multiple Upgrade headers found: %s' % fields['upgrade'])
if len(fields['connection']) != 1:
raise Exception(
'Multiple Connection headers found: %s' % fields['connection'])
if fields['upgrade'][0] != 'websocket':
raise Exception(
'Unexpected Upgrade header value: %s' % fields['upgrade'][0])
if fields['connection'][0].lower() != 'upgrade':
raise Exception(
'Unexpected Connection header value: %s' %
fields['connection'][0])
if len(fields['sec-websocket-accept']) != 1:
raise Exception(
'Multiple Sec-WebSocket-Accept headers found: %s' %
fields['sec-websocket-accept'])
accept = fields['sec-websocket-accept'][0]
# Validate
try:
decoded_accept = base64.b64decode(accept)
except TypeError, e:
raise HandshakeException(
'Illegal value for header Sec-WebSocket-Accept: ' + accept)
if len(decoded_accept) != 20:
raise HandshakeException(
'Decoded value of Sec-WebSocket-Accept is not 20-byte long')
self._logger.debug('Actual Sec-WebSocket-Accept: %r (%s)',
accept, util.hexify(decoded_accept))
original_expected_accept = util.sha1_hash(
key + WEBSOCKET_ACCEPT_UUID).digest()
expected_accept = base64.b64encode(original_expected_accept)
self._logger.debug('Expected Sec-WebSocket-Accept: %r (%s)',
expected_accept,
util.hexify(original_expected_accept))
if accept != expected_accept:
raise Exception(
'Invalid Sec-WebSocket-Accept header: %r (expected) != %r '
'(actual)' % (accept, expected_accept))
server_extensions_header = fields.get('sec-websocket-extensions')
accepted_extensions = []
if server_extensions_header is not None:
accepted_extensions = common.parse_extensions(
', '.join(server_extensions_header))
# Scan accepted extension list to check if there is any unrecognized
# extensions or extensions we didn't request in it. Then, for
# extensions we request, parse them and store parameters. They will be
# used later by each extension.
deflate_frame_accepted = False
mux_accepted = False
for extension in accepted_extensions:
if extension.name() == _DEFLATE_FRAME_EXTENSION:
if self._options.use_deflate_frame:
deflate_frame_accepted = True
continue
if extension.name() == _MUX_EXTENSION:
if self._options.use_mux:
mux_accepted = True
continue
if extension.name() == _PERMESSAGE_DEFLATE_EXTENSION:
checker = self._options.check_permessage_deflate
if checker:
checker(extension)
continue
raise Exception(
'Received unrecognized extension: %s' % extension.name())
# Let all extensions check the response for extension request.
if (self._options.use_deflate_frame and
not deflate_frame_accepted):
raise Exception('%s extension not accepted' %
_DEFLATE_FRAME_EXTENSION)
if self._options.use_mux and not mux_accepted:
raise Exception('%s extension not accepted' % _MUX_EXTENSION)
class WebSocketHybi00Handshake(object):
"""Opening handshake processor for the WebSocket protocol version HyBi 00.
"""
def __init__(self, options, draft_field):
self._logger = util.get_class_logger(self)
self._options = options
self._draft_field = draft_field
def handshake(self, socket):
"""Handshake WebSocket.
Raises:
Exception: handshake failed.
"""
self._socket = socket
# 4.1 5. send request line.
request_line = _method_line(self._options.resource)
self._logger.debug('Opening handshake Request-Line: %r', request_line)
self._socket.sendall(request_line)
# 4.1 6. Let /fields/ be an empty list of strings.
fields = []
# 4.1 7. Add the string "Upgrade: WebSocket" to /fields/.
fields.append(_UPGRADE_HEADER_HIXIE75)
# 4.1 8. Add the string "Connection: Upgrade" to /fields/.
fields.append(_CONNECTION_HEADER)
# 4.1 9-12. Add Host: field to /fields/.
fields.append(_format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls))
# 4.1 13. Add Origin: field to /fields/.
fields.append(_origin_header(self._options.origin))
# TODO: 4.1 14 Add Sec-WebSocket-Protocol: field to /fields/.
# TODO: 4.1 15 Add cookie headers to /fields/.
# 4.1 16-23. Add Sec-WebSocket-Key<n> to /fields/.
self._number1, key1 = self._generate_sec_websocket_key()
self._logger.debug('Number1: %d', self._number1)
fields.append('Sec-WebSocket-Key1: %s\r\n' % key1)
self._number2, key2 = self._generate_sec_websocket_key()
self._logger.debug('Number2: %d', self._number1)
fields.append('Sec-WebSocket-Key2: %s\r\n' % key2)
fields.append('Sec-WebSocket-Draft: %s\r\n' % self._draft_field)
# 4.1 24. For each string in /fields/, in a random order: send the
# string, encoded as UTF-8, followed by a UTF-8 encoded U+000D CARRIAGE
# RETURN U+000A LINE FEED character pair (CRLF).
random.shuffle(fields)
self._logger.debug('Opening handshake request headers: %r', fields)
for field in fields:
self._socket.sendall(field)
# 4.1 25. send a UTF-8-encoded U+000D CARRIAGE RETURN U+000A LINE FEED
# character pair (CRLF).
self._socket.sendall('\r\n')
# 4.1 26. let /key3/ be a string consisting of eight random bytes (or
# equivalently, a random 64 bit integer encoded in a big-endian order).
self._key3 = self._generate_key3()
# 4.1 27. send /key3/ to the server.
self._socket.sendall(self._key3)
self._logger.debug(
'Key3: %r (%s)', self._key3, util.hexify(self._key3))
self._logger.info('Sent opening handshake request')
# 4.1 28. Read bytes from the server until either the connection
# closes, or a 0x0A byte is read. let /field/ be these bytes, including
# the 0x0A bytes.
field = ''
while True:
ch = receive_bytes(self._socket, 1)
field += ch
if ch == '\n':
break
self._logger.debug('Opening handshake Response-Line: %r', field)
# if /field/ is not at least seven bytes long, or if the last
# two bytes aren't 0x0D and 0x0A respectively, or if it does not
# contain at least two 0x20 bytes, then fail the WebSocket connection
# and abort these steps.
if len(field) < 7 or not field.endswith('\r\n'):
raise Exception('Wrong status line: %r' % field)
m = re.match('[^ ]* ([^ ]*) .*', field)
if m is None:
raise Exception('No code found in status line: %r' % field)
# 4.1 29. let /code/ be the substring of /field/ that starts from the
# byte after the first 0x20 byte, and ends with the byte before the
# second 0x20 byte.
code = m.group(1)
# 4.1 30. if /code/ is not three bytes long, or if any of the bytes in
# /code/ are not in the range 0x30 to 0x90, then fail the WebSocket
# connection and abort these steps.
if not re.match('[0-9][0-9][0-9]', code):
raise Exception(
'HTTP status code %r is not three digit in status line: %r' %
(code, field))
# 4.1 31. if /code/, interpreted as UTF-8, is "101", then move to the
# next step.
if code != '101':
raise HttpStatusException(
'Expected HTTP status code 101 but found %r in status line: '
'%r' % (code, field), int(code))
# 4.1 32-39. read fields into /fields/
fields = _read_fields(self._socket)
self._logger.debug('Opening handshake response headers: %r', fields)
# 4.1 40. _Fields processing_
# read a byte from server
ch = receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise Exception('Expected LF but found %r' % ch)
# 4.1 41. check /fields/
if len(fields['upgrade']) != 1:
raise Exception(
'Multiple Upgrade headers found: %s' % fields['upgrade'])
if len(fields['connection']) != 1:
raise Exception(
'Multiple Connection headers found: %s' % fields['connection'])
if len(fields['sec-websocket-origin']) != 1:
raise Exception(
'Multiple Sec-WebSocket-Origin headers found: %s' %
fields['sec-sebsocket-origin'])
if len(fields['sec-websocket-location']) != 1:
raise Exception(
'Multiple Sec-WebSocket-Location headers found: %s' %
fields['sec-sebsocket-location'])
# TODO(ukai): protocol
# if the entry's name is "upgrade"
# if the value is not exactly equal to the string "WebSocket",
# then fail the WebSocket connection and abort these steps.
if fields['upgrade'][0] != 'WebSocket':
raise Exception(
'Unexpected Upgrade header value: %s' % fields['upgrade'][0])
# if the entry's name is "connection"
# if the value, converted to ASCII lowercase, is not exactly equal
# to the string "upgrade", then fail the WebSocket connection and
# abort these steps.
if fields['connection'][0].lower() != 'upgrade':
raise Exception(
'Unexpected Connection header value: %s' %
fields['connection'][0])
# TODO(ukai): check origin, location, cookie, ..
# 4.1 42. let /challenge/ be the concatenation of /number_1/,
# expressed as a big endian 32 bit integer, /number_2/, expressed
# as big endian 32 bit integer, and the eight bytes of /key_3/ in the
# order they were sent on the wire.
challenge = struct.pack('!I', self._number1)
challenge += struct.pack('!I', self._number2)
challenge += self._key3
self._logger.debug(
'Challenge: %r (%s)', challenge, util.hexify(challenge))
# 4.1 43. let /expected/ be the MD5 fingerprint of /challenge/ as a
# big-endian 128 bit string.
expected = util.md5_hash(challenge).digest()
self._logger.debug(
'Expected challenge response: %r (%s)',
expected, util.hexify(expected))
# 4.1 44. read sixteen bytes from the server.
# let /reply/ be those bytes.
reply = receive_bytes(self._socket, 16)
self._logger.debug(
'Actual challenge response: %r (%s)', reply, util.hexify(reply))
# 4.1 45. if /reply/ does not exactly equal /expected/, then fail
# the WebSocket connection and abort these steps.
if expected != reply:
raise Exception(
'Bad challenge response: %r (expected) != %r (actual)' %
(expected, reply))
# 4.1 46. The *WebSocket connection is established*.
def _generate_sec_websocket_key(self):
# 4.1 16. let /spaces_n/ be a random integer from 1 to 12 inclusive.
spaces = random.randint(1, 12)
# 4.1 17. let /max_n/ be the largest integer not greater than
# 4,294,967,295 divided by /spaces_n/.
maxnum = 4294967295 / spaces
# 4.1 18. let /number_n/ be a random integer from 0 to /max_n/
# inclusive.
number = random.randint(0, maxnum)
# 4.1 19. let /product_n/ be the result of multiplying /number_n/ and
# /spaces_n/ together.
product = number * spaces
# 4.1 20. let /key_n/ be a string consisting of /product_n/, expressed
# in base ten using the numerals in the range U+0030 DIGIT ZERO (0) to
# U+0039 DIGIT NINE (9).
key = str(product)
# 4.1 21. insert between one and twelve random characters from the
# range U+0021 to U+002F and U+003A to U+007E into /key_n/ at random
# positions.
available_chars = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
n = random.randint(1, 12)
for _ in xrange(n):
ch = random.choice(available_chars)
pos = random.randint(0, len(key))
key = key[0:pos] + chr(ch) + key[pos:]
# 4.1 22. insert /spaces_n/ U+0020 SPACE characters into /key_n/ at
# random positions other than start or end of the string.
for _ in xrange(spaces):
pos = random.randint(1, len(key) - 1)
key = key[0:pos] + ' ' + key[pos:]
return number, key
def _generate_key3(self):
# 4.1 26. let /key3/ be a string consisting of eight random bytes (or
# equivalently, a random 64 bit integer encoded in a big-endian order).
return ''.join([chr(random.randint(0, 255)) for _ in xrange(8)])
class WebSocketHixie75Handshake(object):
"""WebSocket handshake processor for IETF Hixie 75."""
_EXPECTED_RESPONSE = (
'HTTP/1.1 101 Web Socket Protocol Handshake\r\n' +
_UPGRADE_HEADER_HIXIE75 +
_CONNECTION_HEADER)
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
def _skip_headers(self):
terminator = '\r\n\r\n'
pos = 0
while pos < len(terminator):
received = receive_bytes(self._socket, 1)
if received == terminator[pos]:
pos += 1
elif received == terminator[0]:
pos = 1
else:
pos = 0
def handshake(self, socket):
self._socket = socket
request_line = _method_line(self._options.resource)
self._logger.debug('Opening handshake Request-Line: %r', request_line)
self._socket.sendall(request_line)
headers = _UPGRADE_HEADER_HIXIE75 + _CONNECTION_HEADER
headers += _format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls)
headers += _origin_header(self._options.origin)
self._logger.debug('Opening handshake request headers: %r', headers)
self._socket.sendall(headers)
self._socket.sendall('\r\n')
self._logger.info('Sent opening handshake request')
for expected_char in WebSocketHixie75Handshake._EXPECTED_RESPONSE:
received = receive_bytes(self._socket, 1)
if expected_char != received:
raise Exception('Handshake failure')
# We cut corners and skip other headers.
self._skip_headers()
class WebSocketStream(object):
"""Frame processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, socket, handshake):
self._handshake = handshake
self._socket = socket
# Filters applied to application data part of data frames.
self._outgoing_frame_filter = None
self._incoming_frame_filter = None
if self._handshake._options.use_deflate_frame:
self._outgoing_frame_filter = (
util._RFC1979Deflater(None, False))
self._incoming_frame_filter = util._RFC1979Inflater()
self._fragmented = False
def _mask_hybi(self, s):
# TODO(tyoshino): os.urandom does open/read/close for every call. If
# performance matters, change this to some library call that generates
# cryptographically secure pseudo random number sequence.
masking_nonce = os.urandom(4)
result = [masking_nonce]
count = 0
for c in s:
result.append(chr(ord(c) ^ ord(masking_nonce[count])))
count = (count + 1) % len(masking_nonce)
return ''.join(result)
def send_frame_of_arbitrary_bytes(self, header, body):
self._socket.sendall(header + self._mask_hybi(body))
def send_data(self, payload, frame_type, end=True, mask=True,
rsv1=0, rsv2=0, rsv3=0):
if self._outgoing_frame_filter is not None:
payload = self._outgoing_frame_filter.filter(payload)
if self._fragmented:
opcode = OPCODE_CONTINUATION
else:
opcode = frame_type
if end:
self._fragmented = False
fin = 1
else:
self._fragmented = True
fin = 0
if self._handshake._options.use_deflate_frame:
rsv1 = 1
if mask:
mask_bit = 1 << 7
else:
mask_bit = 0
header = chr(fin << 7 | rsv1 << 6 | rsv2 << 5 | rsv3 << 4 | opcode)
payload_length = len(payload)
if payload_length <= 125:
header += chr(mask_bit | payload_length)
elif payload_length < 1 << 16:
header += chr(mask_bit | 126) + struct.pack('!H', payload_length)
elif payload_length < 1 << 63:
header += chr(mask_bit | 127) + struct.pack('!Q', payload_length)
else:
raise Exception('Too long payload (%d byte)' % payload_length)
if mask:
payload = self._mask_hybi(payload)
self._socket.sendall(header + payload)
def send_binary(self, payload, end=True, mask=True):
self.send_data(payload, OPCODE_BINARY, end, mask)
def send_text(self, payload, end=True, mask=True):
self.send_data(payload.encode('utf-8'), OPCODE_TEXT, end, mask)
def _assert_receive_data(self, payload, opcode, fin, rsv1, rsv2, rsv3):
(actual_fin, actual_rsv1, actual_rsv2, actual_rsv3, actual_opcode,
payload_length) = read_frame_header(self._socket)
if actual_opcode != opcode:
raise Exception(
'Unexpected opcode: %d (expected) vs %d (actual)' %
(opcode, actual_opcode))
if actual_fin != fin:
raise Exception(
'Unexpected fin: %d (expected) vs %d (actual)' %
(fin, actual_fin))
if rsv1 is None:
rsv1 = 0
if self._handshake._options.use_deflate_frame:
rsv1 = 1
if rsv2 is None:
rsv2 = 0
if rsv3 is None:
rsv3 = 0
if actual_rsv1 != rsv1:
raise Exception(
'Unexpected rsv1: %r (expected) vs %r (actual)' %
(rsv1, actual_rsv1))
if actual_rsv2 != rsv2:
raise Exception(
'Unexpected rsv2: %r (expected) vs %r (actual)' %
(rsv2, actual_rsv2))
if actual_rsv3 != rsv3:
raise Exception(
'Unexpected rsv3: %r (expected) vs %r (actual)' %
(rsv3, actual_rsv3))
received = receive_bytes(self._socket, payload_length)
if self._incoming_frame_filter is not None:
received = self._incoming_frame_filter.filter(received)
if len(received) != len(payload):
raise Exception(
'Unexpected payload length: %d (expected) vs %d (actual)' %
(len(payload), len(received)))
if payload != received:
raise Exception(
'Unexpected payload: %r (expected) vs %r (actual)' %
(payload, received))
def assert_receive_binary(self, payload, opcode=OPCODE_BINARY, fin=1,
rsv1=None, rsv2=None, rsv3=None):
self._assert_receive_data(payload, opcode, fin, rsv1, rsv2, rsv3)
def assert_receive_text(self, payload, opcode=OPCODE_TEXT, fin=1,
rsv1=None, rsv2=None, rsv3=None):
self._assert_receive_data(payload.encode('utf-8'), opcode, fin, rsv1,
rsv2, rsv3)
def _build_close_frame(self, code, reason, mask):
frame = chr(1 << 7 | OPCODE_CLOSE)
if code is not None:
body = struct.pack('!H', code) + reason.encode('utf-8')
else:
body = ''
if mask:
frame += chr(1 << 7 | len(body)) + self._mask_hybi(body)
else:
frame += chr(len(body)) + body
return frame
def send_close(self, code, reason):
self._socket.sendall(
self._build_close_frame(code, reason, True))
def assert_receive_close(self, code, reason):
expected_frame = self._build_close_frame(code, reason, False)
actual_frame = receive_bytes(self._socket, len(expected_frame))
if actual_frame != expected_frame:
raise Exception(
'Unexpected close frame: %r (expected) vs %r (actual)' %
(expected_frame, actual_frame))
class WebSocketStreamHixie75(object):
"""Frame processor for the WebSocket protocol version Hixie 75 and HyBi 00.
"""
_CLOSE_FRAME = '\xff\x00'
def __init__(self, socket, unused_handshake):
self._socket = socket
def send_frame_of_arbitrary_bytes(self, header, body):
self._socket.sendall(header + body)
def send_data(self, payload, unused_frame_typem, unused_end, unused_mask):
frame = ''.join(['\x00', payload, '\xff'])
self._socket.sendall(frame)
def send_binary(self, unused_payload, unused_end, unused_mask):
pass
def send_text(self, payload, unused_end, unused_mask):
encoded_payload = payload.encode('utf-8')
frame = ''.join(['\x00', encoded_payload, '\xff'])
self._socket.sendall(frame)
def assert_receive_binary(self, payload, opcode=OPCODE_BINARY, fin=1,
rsv1=0, rsv2=0, rsv3=0):
raise Exception('Binary frame is not supported in hixie75')
def assert_receive_text(self, payload):
received = receive_bytes(self._socket, 1)
if received != '\x00':
raise Exception(
'Unexpected frame type: %d (expected) vs %d (actual)' %
(0, ord(received)))
received = receive_bytes(self._socket, len(payload) + 1)
if received[-1] != '\xff':
raise Exception(
'Termination expected: 0xff (expected) vs %r (actual)' %
received)
if received[0:-1] != payload:
raise Exception(
'Unexpected payload: %r (expected) vs %r (actual)' %
(payload, received[0:-1]))
def send_close(self, code, reason):
self._socket.sendall(self._CLOSE_FRAME)
def assert_receive_close(self, unused_code, unused_reason):
closing = receive_bytes(self._socket, len(self._CLOSE_FRAME))
if closing != self._CLOSE_FRAME:
raise Exception('Didn\'t receive closing handshake')
class ClientOptions(object):
"""Holds option values to configure the Client object."""
def __init__(self):
self.version = 13
self.server_host = ''
self.origin = ''
self.resource = ''
self.server_port = -1
self.socket_timeout = 1000
self.use_tls = False
self.extensions = []
# Enable deflate-application-data.
self.use_deflate_frame = False
# Enable mux
self.use_mux = False
def enable_deflate_frame(self):
self.use_deflate_frame = True
self.extensions.append(_DEFLATE_FRAME_EXTENSION)
def enable_mux(self):
self.use_mux = True
self.extensions.append(_MUX_EXTENSION)
def connect_socket_with_retry(host, port, timeout, use_tls,
retry=10, sleep_sec=0.1):
retry_count = 0
while retry_count < retry:
try:
s = socket.socket()
s.settimeout(timeout)
s.connect((host, port))
if use_tls:
return _TLSSocket(s)
return s
except socket.error, e:
if e.errno != errno.ECONNREFUSED:
raise
else:
retry_count = retry_count + 1
time.sleep(sleep_sec)
return None
class Client(object):
"""WebSocket client."""
def __init__(self, options, handshake, stream_class):
self._logger = util.get_class_logger(self)
self._options = options
self._socket = None
self._handshake = handshake
self._stream_class = stream_class
def connect(self):
self._socket = connect_socket_with_retry(
self._options.server_host,
self._options.server_port,
self._options.socket_timeout,
self._options.use_tls)
self._handshake.handshake(self._socket)
self._stream = self._stream_class(self._socket, self._handshake)
self._logger.info('Connection established')
def send_frame_of_arbitrary_bytes(self, header, body):
self._stream.send_frame_of_arbitrary_bytes(header, body)
def send_message(self, message, end=True, binary=False, raw=False,
mask=True):
if binary:
self._stream.send_binary(message, end, mask)
elif raw:
self._stream.send_data(message, OPCODE_TEXT, end, mask)
else:
self._stream.send_text(message, end, mask)
def assert_receive(self, payload, binary=False):
if binary:
self._stream.assert_receive_binary(payload)
else:
self._stream.assert_receive_text(payload)
def send_close(self, code=STATUS_NORMAL_CLOSURE, reason=''):
self._stream.send_close(code, reason)
def assert_receive_close(self, code=STATUS_NORMAL_CLOSURE, reason=''):
self._stream.assert_receive_close(code, reason)
def close_socket(self):
self._socket.close()
def assert_connection_closed(self):
try:
read_data = receive_bytes(self._socket, 1)
except Exception, e:
if str(e).find(
'Connection closed before receiving requested length ') == 0:
return
try:
error_number, message = e
for error_name in ['ECONNRESET', 'WSAECONNRESET']:
if (error_name in dir(errno) and
error_number == getattr(errno, error_name)):
return
except:
raise e
raise e
raise Exception('Connection is not closed (Read: %r)' % read_data)
def create_client(options):
return Client(
options, WebSocketHandshake(options), WebSocketStream)
def create_client_hybi00(options):
return Client(
options,
WebSocketHybi00Handshake(options, '0'),
WebSocketStreamHixie75)
def create_client_hixie75(options):
return Client(
options, WebSocketHixie75Handshake(options), WebSocketStreamHixie75)
# vi:sts=4 sw=4 et
| mpl-2.0 | -241,253,350,295,308,800 | 35.096364 | 79 | 0.587795 | false |
jolevq/odoopub | addons/account/wizard/account_period_close.py | 341 | 2646 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_period_close(osv.osv_memory):
"""
close period
"""
_name = "account.period.close"
_description = "period close"
_columns = {
'sure': fields.boolean('Check this box'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close period
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: account period close’s ID or list of IDs
"""
journal_period_pool = self.pool.get('account.journal.period')
period_pool = self.pool.get('account.period')
account_move_obj = self.pool.get('account.move')
mode = 'done'
for form in self.read(cr, uid, ids, context=context):
if form['sure']:
for id in context['active_ids']:
account_move_ids = account_move_obj.search(cr, uid, [('period_id', '=', id), ('state', '=', "draft")], context=context)
if account_move_ids:
raise osv.except_osv(_('Invalid Action!'), _('In order to close a period, you must first post related journal entries.'))
cr.execute('update account_journal_period set state=%s where period_id=%s', (mode, id))
cr.execute('update account_period set state=%s where id=%s', (mode, id))
self.invalidate_cache(cr, uid, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,380,996,891,953,524,000 | 42.311475 | 145 | 0.592733 | false |
Jackysonglanlan/devops | IDEs/sublime/shared-pkgs/Packages/pygments/all/pygments/lexers/ml.py | 47 | 27891 | # -*- coding: utf-8 -*-
"""
pygments.lexers.ml
~~~~~~~~~~~~~~~~~~
Lexers for ML family languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['SMLLexer', 'OcamlLexer', 'OpaLexer']
class SMLLexer(RegexLexer):
"""
For the Standard ML language.
.. versionadded:: 1.5
"""
name = 'Standard ML'
aliases = ['sml']
filenames = ['*.sml', '*.sig', '*.fun']
mimetypes = ['text/x-standardml', 'application/x-standardml']
alphanumid_reserved = set((
# Core
'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else',
'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix',
'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse',
'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while',
# Modules
'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature',
'struct', 'structure', 'where',
))
symbolicid_reserved = set((
# Core
':', '\|', '=', '=>', '->', '#',
# Modules
':>',
))
nonid_reserved = set(('(', ')', '[', ']', '{', '}', ',', ';', '...', '_'))
alphanumid_re = r"[a-zA-Z][\w']*"
symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+"
# A character constant is a sequence of the form #s, where s is a string
# constant denoting a string of size one character. This setup just parses
# the entire string as either a String.Double or a String.Char (depending
# on the argument), even if the String.Char is an erronous
# multiple-character string.
def stringy(whatkind):
return [
(r'[^"\\]', whatkind),
(r'\\[\\"abtnvfr]', String.Escape),
# Control-character notation is used for codes < 32,
# where \^@ == \000
(r'\\\^[\x40-\x5e]', String.Escape),
# Docs say 'decimal digits'
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\\s+\\', String.Interpol),
(r'"', whatkind, '#pop'),
]
# Callbacks for distinguishing tokens and reserved words
def long_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved:
token = Error
else:
token = Name.Namespace
yield match.start(1), token, match.group(1)
yield match.start(2), Punctuation, match.group(2)
def end_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved:
token = Error
elif match.group(1) in self.symbolicid_reserved:
token = Error
else:
token = Name
yield match.start(1), token, match.group(1)
def id_callback(self, match):
str = match.group(1)
if str in self.alphanumid_reserved:
token = Keyword.Reserved
elif str in self.symbolicid_reserved:
token = Punctuation
else:
token = Name
yield match.start(1), token, str
tokens = {
# Whitespace and comments are (almost) everywhere
'whitespace': [
(r'\s+', Text),
(r'\(\*', Comment.Multiline, 'comment'),
],
'delimiters': [
# This lexer treats these delimiters specially:
# Delimiters define scopes, and the scope is how the meaning of
# the `|' is resolved - is it a case/handle expression, or function
# definition by cases? (This is not how the Definition works, but
# it's how MLton behaves, see http://mlton.org/SMLNJDeviations)
(r'\(|\[|\{', Punctuation, 'main'),
(r'\)|\]|\}', Punctuation, '#pop'),
(r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')),
(r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'),
(r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'),
],
'core': [
# Punctuation that doesn't overlap symbolic identifiers
(r'(%s)' % '|'.join(re.escape(z) for z in nonid_reserved),
Punctuation),
# Special constants: strings, floats, numbers in decimal and hex
(r'#"', String.Char, 'char'),
(r'"', String.Double, 'string'),
(r'~?0x[0-9a-fA-F]+', Number.Hex),
(r'0wx[0-9a-fA-F]+', Number.Hex),
(r'0w\d+', Number.Integer),
(r'~?\d+\.\d+[eE]~?\d+', Number.Float),
(r'~?\d+\.\d+', Number.Float),
(r'~?\d+[eE]~?\d+', Number.Float),
(r'~?\d+', Number.Integer),
# Labels
(r'#\s*[1-9][0-9]*', Name.Label),
(r'#\s*(%s)' % alphanumid_re, Name.Label),
(r'#\s+(%s)' % symbolicid_re, Name.Label),
# Some reserved words trigger a special, local lexer state change
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
(r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
(r'\b(functor|include|open|signature|structure)\b(?!\')',
Keyword.Reserved, 'sname'),
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
# Regular identifiers, long and otherwise
(r'\'[\w\']*', Name.Decorator),
(r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"),
(r'(%s)' % alphanumid_re, id_callback),
(r'(%s)' % symbolicid_re, id_callback),
],
'dotted': [
(r'(%s)(\.)' % alphanumid_re, long_id_callback),
(r'(%s)' % alphanumid_re, end_id_callback, "#pop"),
(r'(%s)' % symbolicid_re, end_id_callback, "#pop"),
(r'\s+', Error),
(r'\S+', Error),
],
# Main parser (prevents errors in files that have scoping errors)
'root': [
default('main')
],
# In this scope, I expect '|' to not be followed by a function name,
# and I expect 'and' to be followed by a binding site
'main': [
include('whitespace'),
# Special behavior of val/and/fun
(r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'),
(r'\b(fun)\b(?!\')', Keyword.Reserved,
('#pop', 'main-fun', 'fname')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# In this scope, I expect '|' and 'and' to be followed by a function
'main-fun': [
include('whitespace'),
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
# Special behavior of val/and/fun
(r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'),
(r'\b(val)\b(?!\')', Keyword.Reserved,
('#pop', 'main', 'vname')),
# Special behavior of '|' and '|'-manipulating keywords
(r'\|', Punctuation, 'fname'),
(r'\b(case|handle)\b(?!\')', Keyword.Reserved,
('#pop', 'main')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# Character and string parsers
'char': stringy(String.Char),
'string': stringy(String.Double),
'breakout': [
(r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'),
],
# Dealing with what comes after module system keywords
'sname': [
include('whitespace'),
include('breakout'),
(r'(%s)' % alphanumid_re, Name.Namespace),
default('#pop'),
],
# Dealing with what comes after the 'fun' (or 'and' or '|') keyword
'fname': [
include('whitespace'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)' % alphanumid_re, Name.Function, '#pop'),
(r'(%s)' % symbolicid_re, Name.Function, '#pop'),
# Ignore interesting function declarations like "fun (x + y) = ..."
default('#pop'),
],
# Dealing with what comes after the 'val' (or 'and') keyword
'vname': [
include('whitespace'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)' % alphanumid_re, Name.Variable, '#pop'),
(r'(%s)' % symbolicid_re, Name.Variable, '#pop'),
# Ignore interesting patterns like 'val (x, y)'
default('#pop'),
],
# Dealing with what comes after the 'type' (or 'and') keyword
'tname': [
include('whitespace'),
include('breakout'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# A type binding includes most identifiers
'typbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
include('breakout'),
include('core'),
(r'\S+', Error, '#pop'),
],
# Dealing with what comes after the 'datatype' (or 'and') keyword
'dname': [
include('whitespace'),
include('breakout'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(=)(\s*)(datatype)',
bygroups(Punctuation, Text, Keyword.Reserved), '#pop'),
(r'=(?!%s)' % symbolicid_re, Punctuation,
('#pop', 'datbind', 'datcon')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# common case - A | B | C of int
'datbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')),
(r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
(r'\b(of)\b(?!\')', Keyword.Reserved),
(r'(\|)(\s*)(%s)' % alphanumid_re,
bygroups(Punctuation, Text, Name.Class)),
(r'(\|)(\s+)(%s)' % symbolicid_re,
bygroups(Punctuation, Text, Name.Class)),
include('breakout'),
include('core'),
(r'\S+', Error),
],
# Dealing with what comes after an exception
'ename': [
include('whitespace'),
(r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(of)\b(?!\')', Keyword.Reserved),
include('breakout'),
include('core'),
(r'\S+', Error),
],
'datcon': [
include('whitespace'),
(r'(%s)' % alphanumid_re, Name.Class, '#pop'),
(r'(%s)' % symbolicid_re, Name.Class, '#pop'),
(r'\S+', Error, '#pop'),
],
# Series of type variables
'tyvarseq': [
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
(r'\'[\w\']*', Name.Decorator),
(alphanumid_re, Name),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
(symbolicid_re, Name),
],
'comment': [
(r'[^(*)]', Comment.Multiline),
(r'\(\*', Comment.Multiline, '#push'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[(*)]', Comment.Multiline),
],
}
class OcamlLexer(RegexLexer):
"""
For the OCaml language.
.. versionadded:: 0.7
"""
name = 'OCaml'
aliases = ['ocaml']
filenames = ['*.ml', '*.mli', '*.mll', '*.mly']
mimetypes = ['text/x-ocaml']
keywords = (
'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
'downto', 'else', 'end', 'exception', 'external', 'false',
'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
'type', 'value', 'val', 'virtual', 'when', 'while', 'with',
)
keyopts = (
'!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-',
r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
'<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~'
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or')
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name.Class),
(r'\(\*(?![)])', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'\d[\d_]*', Number.Integer),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name.Class, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
default('#pop'),
],
}
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).
.. versionadded:: 1.5
"""
name = 'Opa'
aliases = ['opa']
filenames = ['*.opa']
mimetypes = ['text/x-opa']
# most of these aren't strictly keywords
# but if you color only real keywords, you might just
# as well not color anything
keywords = (
'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do',
'else', 'end', 'external', 'forall', 'function', 'if', 'import',
'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then',
'type', 'val', 'with', 'xml_parser',
)
# matches both stuff and `stuff`
ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))'
op_re = r'[.=\-<>,@~%/+?*&^!]'
punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere
# because they are also used for inserts
tokens = {
# copied from the caml lexer, should be adapted
'escape-sequence': [
(r'\\[\\"\'ntr}]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
# factorizing these rules, because they are inserted many times
'comments': [
(r'/\*', Comment, 'nested-comment'),
(r'//.*?$', Comment),
],
'comments-and-spaces': [
include('comments'),
(r'\s+', Text),
],
'root': [
include('comments-and-spaces'),
# keywords
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
# directives
# we could parse the actual set of directives instead of anything
# starting with @, but this is troublesome
# because it needs to be adjusted all the time
# and assuming we parse only sources that compile, it is useless
(r'@' + ident_re + r'\b', Name.Builtin.Pseudo),
# number literals
(r'-?.[\d]+([eE][+\-]?\d+)', Number.Float),
(r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float),
(r'-?\d+[eE][+\-]?\d+', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'\d+', Number.Integer),
# color literals
(r'#[\da-fA-F]{3,6}', Number.Integer),
# string literals
(r'"', String.Double, 'string'),
# char literal, should be checked because this is the regexp from
# the caml lexer
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'",
String.Char),
# this is meant to deal with embedded exprs in strings
# every time we find a '}' we pop a state so that if we were
# inside a string, we are back in the string state
# as a consequence, we must also push a state every time we find a
# '{' or else we will have errors when parsing {} for instance
(r'\{', Operator, '#push'),
(r'\}', Operator, '#pop'),
# html literals
# this is a much more strict that the actual parser,
# since a<b would not be parsed as html
# but then again, the parser is way too lax, and we can't hope
# to have something as tolerant
(r'<(?=[a-zA-Z>])', String.Single, 'html-open-tag'),
# db path
# matching the '[_]' in '/a[_]' because it is a part
# of the syntax of the db path definition
# unfortunately, i don't know how to match the ']' in
# /a[1], so this is somewhat inconsistent
(r'[@?!]?(/\w+)+(\[_\])?', Name.Variable),
# putting the same color on <- as on db path, since
# it can be used only to mean Db.write
(r'<-(?!'+op_re+r')', Name.Variable),
# 'modules'
# although modules are not distinguished by their names as in caml
# the standard library seems to follow the convention that modules
# only area capitalized
(r'\b([A-Z]\w*)(?=\.)', Name.Namespace),
# operators
# = has a special role because this is the only
# way to syntactic distinguish binding constructions
# unfortunately, this colors the equal in {x=2} too
(r'=(?!'+op_re+r')', Keyword),
(r'(%s)+' % op_re, Operator),
(r'(%s)+' % punc_re, Operator),
# coercions
(r':', Operator, 'type'),
# type variables
# we need this rule because we don't parse specially type
# definitions so in "type t('a) = ...", "'a" is parsed by 'root'
("'"+ident_re, Keyword.Type),
# id literal, #something, or #{expr}
(r'#'+ident_re, String.Single),
(r'#(?=\{)', String.Single),
# identifiers
# this avoids to color '2' in 'a2' as an integer
(ident_re, Text),
# default, not sure if that is needed or not
# (r'.', Text),
],
# it is quite painful to have to parse types to know where they end
# this is the general rule for a type
# a type is either:
# * -> ty
# * type-with-slash
# * type-with-slash -> ty
# * type-with-slash (, type-with-slash)+ -> ty
#
# the code is pretty funky in here, but this code would roughly
# translate in caml to:
# let rec type stream =
# match stream with
# | [< "->"; stream >] -> type stream
# | [< ""; stream >] ->
# type_with_slash stream
# type_lhs_1 stream;
# and type_1 stream = ...
'type': [
include('comments-and-spaces'),
(r'->', Keyword.Type),
default(('#pop', 'type-lhs-1', 'type-with-slash')),
],
# parses all the atomic or closed constructions in the syntax of type
# expressions: record types, tuple types, type constructors, basic type
# and type variables
'type-1': [
include('comments-and-spaces'),
(r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(r'~?\{', Keyword.Type, ('#pop', 'type-record')),
(ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(ident_re, Keyword.Type, '#pop'),
("'"+ident_re, Keyword.Type),
# this case is not in the syntax but sometimes
# we think we are parsing types when in fact we are parsing
# some css, so we just pop the states until we get back into
# the root state
default('#pop'),
],
# type-with-slash is either:
# * type-1
# * type-1 (/ type-1)+
'type-with-slash': [
include('comments-and-spaces'),
default(('#pop', 'slash-type-1', 'type-1')),
],
'slash-type-1': [
include('comments-and-spaces'),
('/', Keyword.Type, ('#pop', 'type-1')),
# same remark as above
default('#pop'),
],
# we go in this state after having parsed a type-with-slash
# while trying to parse a type
# and at this point we must determine if we are parsing an arrow
# type (in which case we must continue parsing) or not (in which
# case we stop)
'type-lhs-1': [
include('comments-and-spaces'),
(r'->', Keyword.Type, ('#pop', 'type')),
(r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')),
default('#pop'),
],
'type-arrow': [
include('comments-and-spaces'),
# the look ahead here allows to parse f(x : int, y : float -> truc)
# correctly
(r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'),
(r'->', Keyword.Type, ('#pop', 'type')),
# same remark as above
default('#pop'),
],
# no need to do precise parsing for tuples and records
# because they are closed constructions, so we can simply
# find the closing delimiter
# note that this function would be not work if the source
# contained identifiers like `{)` (although it could be patched
# to support it)
'type-tuple': [
include('comments-and-spaces'),
(r'[^()/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\(', Keyword.Type, '#push'),
(r'\)', Keyword.Type, '#pop'),
],
'type-record': [
include('comments-and-spaces'),
(r'[^{}/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\{', Keyword.Type, '#push'),
(r'\}', Keyword.Type, '#pop'),
],
# 'type-tuple': [
# include('comments-and-spaces'),
# (r'\)', Keyword.Type, '#pop'),
# default(('#pop', 'type-tuple-1', 'type-1')),
# ],
# 'type-tuple-1': [
# include('comments-and-spaces'),
# (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,)
# (r',', Keyword.Type, 'type-1'),
# ],
# 'type-record':[
# include('comments-and-spaces'),
# (r'\}', Keyword.Type, '#pop'),
# (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'),
# ],
# 'type-record-field-expr': [
#
# ],
'nested-comment': [
(r'[^/*]+', Comment),
(r'/\*', Comment, '#push'),
(r'\*/', Comment, '#pop'),
(r'[/*]', Comment),
],
# the copy pasting between string and single-string
# is kinda sad. Is there a way to avoid that??
'string': [
(r'[^\\"{]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\{', Operator, 'root'),
include('escape-sequence'),
],
'single-string': [
(r'[^\\\'{]+', String.Double),
(r'\'', String.Double, '#pop'),
(r'\{', Operator, 'root'),
include('escape-sequence'),
],
# all the html stuff
# can't really reuse some existing html parser
# because we must be able to parse embedded expressions
# we are in this state after someone parsed the '<' that
# started the html literal
'html-open-tag': [
(r'[\w\-:]+', String.Single, ('#pop', 'html-attr')),
(r'>', String.Single, ('#pop', 'html-content')),
],
# we are in this state after someone parsed the '</' that
# started the end of the closing tag
'html-end-tag': [
# this is a star, because </> is allowed
(r'[\w\-:]*>', String.Single, '#pop'),
],
# we are in this state after having parsed '<ident(:ident)?'
# we thus parse a possibly empty list of attributes
'html-attr': [
(r'\s+', Text),
(r'[\w\-:]+=', String.Single, 'html-attr-value'),
(r'/>', String.Single, '#pop'),
(r'>', String.Single, ('#pop', 'html-content')),
],
'html-attr-value': [
(r"'", String.Single, ('#pop', 'single-string')),
(r'"', String.Single, ('#pop', 'string')),
(r'#'+ident_re, String.Single, '#pop'),
(r'#(?=\{)', String.Single, ('#pop', 'root')),
(r'[^"\'{`=<>]+', String.Single, '#pop'),
(r'\{', Operator, ('#pop', 'root')), # this is a tail call!
],
# we should probably deal with '\' escapes here
'html-content': [
(r'<!--', Comment, 'html-comment'),
(r'</', String.Single, ('#pop', 'html-end-tag')),
(r'<', String.Single, 'html-open-tag'),
(r'\{', Operator, 'root'),
(r'[^<{]+', String.Single),
],
'html-comment': [
(r'-->', Comment, '#pop'),
(r'[^\-]+|-', Comment),
],
}
| mit | 5,803,990,518,631,832,000 | 35.269181 | 87 | 0.456957 | false |
hanlind/nova | nova/policies/evacuate.py | 5 | 1069 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-evacuate'
POLICY_ROOT = 'os_compute_api:os-evacuate:%s'
evacuate_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_API),
]
def list_rules():
return evacuate_policies
| apache-2.0 | -887,845,066,219,517,600 | 28.694444 | 78 | 0.710009 | false |
GirlsCodePy/girlscode-coursebuilder | modules/dashboard/dashboard_tests.py | 3 | 34294 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/dashboard/."""
__author__ = 'Glenn De Jonghe ([email protected])'
import cgi
import time
import json
from common import crypto
from common import menus
from common import utils
from models import courses
from models import custom_modules
from models import models
from models import resources_display
from models import roles
from models import transforms
from modules.dashboard import dashboard
from modules.dashboard import question_group_editor
from modules.dashboard import role_editor
from tests.functional import actions
from google.appengine.api import namespace_manager
class QuestionTablesTests(actions.TestBase):
def _soup_table(self):
asset_tables = self.parse_html_string_to_soup(
self.get(self.URL).body).select('.assets-table')
self.assertEquals(len(asset_tables), 1)
return asset_tables[0]
class QuestionDashboardTestCase(QuestionTablesTests):
"""Tests Assets > Questions."""
COURSE_NAME = 'question_dashboard'
ADMIN_EMAIL = '[email protected]'
URL = 'dashboard?action=edit_questions'
def setUp(self):
super(QuestionDashboardTestCase, self).setUp()
actions.login(self.ADMIN_EMAIL, is_admin=True)
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'Questions Dashboard')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
def tearDown(self):
namespace_manager.set_namespace(self.old_namespace)
super(QuestionDashboardTestCase, self).tearDown()
def test_unused_question(self):
# Create an unused question
unused_question_dto = models.QuestionDTO(None, {
'description': 'unused',
'type': 0
})
unused_question_id = models.QuestionDAO.save(unused_question_dto)
self.course.save()
dom = self.parse_html_string(self.get(self.URL).body)
question_row = dom.find('.//tr[@data-quid=\'{}\']'.format(
unused_question_id))
filter_data = json.loads(question_row.get('data-filter'))
self.assertEqual(filter_data['unused'], 1)
def test_table_entries(self):
# Create a question
mc_question_description = 'Test MC Question'
mc_question_dto = models.QuestionDTO(None, {
'description': mc_question_description,
'type': 0 # MC
})
mc_question_id = models.QuestionDAO.save(mc_question_dto)
# Create an assessment and add the question to the content.
# Also include a broken question ref to the assessment (and expect this
# doesn't break anything).
assessment_one = self.course.add_assessment()
assessment_one.title = 'Test Assessment One'
assessment_one.html_content = """
<question quid="%s" weight="1" instanceid="1"></question>
<question quid="broken" weight="1" instanceid="broken"></question>
""" % mc_question_id
# Create a second question
sa_question_description = 'Test SA Question'
sa_question_dto = models.QuestionDTO(None, {
'description': sa_question_description,
'type': 1 # SA
})
sa_question_id = models.QuestionDAO.save(sa_question_dto)
# Create a question group and add the second question
qg_description = 'Question Group'
qg_dto = models.QuestionGroupDTO(None, {
'description': qg_description,
'items': [{'question': str(sa_question_id)}]
})
qg_id = models.QuestionGroupDAO.save(qg_dto)
# Create a second assessment and add the question group to the content
assessment_two = self.course.add_assessment()
assessment_two.title = 'Test Assessment'
assessment_two.html_content = """
<question-group qgid="%s" instanceid="QG"></question-group>
""" % qg_id
self.course.save()
# First check Question Bank table
questions_table = self._soup_table()
question_rows = questions_table.select('tr[data-filter]')
self.assertEquals(len(question_rows), 2)
# Check edit link and description of the first question
first_row = question_rows[0]
description_link = first_row.select('.description-cell a')[0]
self.assertEquals(
description_link.text.strip(), mc_question_description)
self.assertEquals(description_link.get('href'), (
'dashboard?action=edit_question&key={}'.format(mc_question_id)))
# Check if the assessment is listed
location_link = first_row.select('.locations-cell a')[0]
self.assertEquals(location_link.get('href'),
'assessment?name={}'.format(assessment_one.unit_id))
self.assertEquals(assessment_one.title, location_link.text.strip())
# Check second question (=row)
second_row = question_rows[1]
self.assertEquals(
second_row.select('.description-cell a')[0].text.strip(),
sa_question_description)
# Check whether the containing Question Group is listed
self.assertEquals(second_row.select('.groups-cell li')[0].text.strip(),
qg_description)
def test_no_questions(self):
soup = self.parse_html_string_to_soup(self.get(self.URL).body)
self.assertEquals(
1, len(soup.select('.gcb-list__empty-message')))
def test_no_question_groups(self):
description = 'Question description'
models.QuestionDAO.save(models.QuestionDTO(None, {
'description': description
}))
table = self._soup_table()
self.assertEquals(
description, table.select('.description-cell a')[0].text.strip())
def test_if_buttons_are_present(self):
"""Tests if all buttons are present.
In the past it wasn't allowed to add a question group when there
were no questions yet.
"""
body = self.get(self.URL).body
self.assertIn('Add Short Answer', body)
self.assertIn('Add Multiple Choice', body)
def test_last_modified_timestamp(self):
begin_time = time.time()
question_dto = models.QuestionDTO(None, {})
models.QuestionDAO.save(question_dto)
self.assertTrue((begin_time <= question_dto.last_modified) and (
question_dto.last_modified <= time.time()))
questions_table = self._soup_table()
self.assertEquals(
questions_table.select('[data-timestamp]')[0].get(
'data-timestamp', ''),
str(question_dto.last_modified)
)
def test_question_clone(self):
# Add a question by just nailing it in to the datastore.
mc_question_description = 'Test MC Question'
mc_question_dto = models.QuestionDTO(None, {
'description': mc_question_description,
'type': 0 # MC
})
models.QuestionDAO.save(mc_question_dto)
# On the assets -> questions page, clone the question.
response = self.get(self.URL)
soup = self.parse_html_string_to_soup(self.get(self.URL).body)
clone_link = soup.select('.clone-question')[0]
question_key = clone_link.get('data-key')
xsrf_token = soup.select(
'#question-table')[0].get('data-clone-question-token')
self.post(
'dashboard?action=clone_question',
{
'key': question_key,
'xsrf_token': xsrf_token
})
response = self.get(self.URL)
self.assertIn(mc_question_description + ' (clone)', response.body)
def _call_add_to_question_group(self, qu_id, qg_id, weight, xsrf_token):
return self.post('dashboard', {
'action': 'add_to_question_group',
'question_id': qu_id,
'group_id': qg_id,
'weight': weight,
'xsrf_token': xsrf_token,
}, True)
def test_add_to_question_group(self):
# Create a question
question_description = 'Question'
question_dto = models.QuestionDTO(None, {
'description': question_description,
'type': 0 # MC
})
question_id = models.QuestionDAO.save(question_dto)
add_to_group_selector = '.add-question-to-group'
# No groups are present so no add_to_group icon should be present
self.assertEqual([], self._soup_table().select(add_to_group_selector))
# Create a group
qg_description = 'Question Group'
qg_dto = models.QuestionGroupDTO(None, {
'description': qg_description,
'items': []
})
qg_id = models.QuestionGroupDAO.save(qg_dto)
# Since we now have a group, the add_to_group icon should be visible
self.assertIsNotNone(
self._soup_table().select(add_to_group_selector))
# Add Question to Question Group via post_add_to_question_group
questions_table = self._soup_table()
xsrf_token = questions_table.get('data-qg-xsrf-token', '')
response = self._call_add_to_question_group(
question_id, qg_id, 1, xsrf_token)
# Check if operation was successful
self.assertEquals(response.status_int, 200)
questions_table = self._soup_table()
self.assertEquals(
questions_table.select('.groups-cell li')[0].text.strip(),
qg_description
)
# Check a bunch of calls that should fail
response = self._call_add_to_question_group(question_id, qg_id, 1, 'a')
self.assertEquals(response.status_int, 403)
response = transforms.loads(self._call_add_to_question_group(
-1, qg_id, 1, xsrf_token).body)
self.assertEquals(response['status'], 500)
response = transforms.loads(self._call_add_to_question_group(
question_id, -1, 1, xsrf_token).body)
self.assertEquals(response['status'], 500)
response = transforms.loads(self._call_add_to_question_group(
'a', qg_id, 1, xsrf_token).body)
self.assertEquals(response['status'], 500)
response = transforms.loads(self._call_add_to_question_group(
question_id, qg_id, 'a', xsrf_token).body)
self.assertEquals(response['status'], 500)
class QuestionGroupDashboardTestCase(QuestionTablesTests):
"""Tests Assets > Question Groups."""
COURSE_NAME = 'question_group_dashboard'
ADMIN_EMAIL = '[email protected]'
URL = 'dashboard?action=edit_question_groups'
def setUp(self):
super(QuestionGroupDashboardTestCase, self).setUp()
actions.login(self.ADMIN_EMAIL, is_admin=True)
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'Question Groups Dashboard')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
def tearDown(self):
namespace_manager.set_namespace(self.old_namespace)
super(QuestionGroupDashboardTestCase, self).tearDown()
def test_table_entries(self):
# Create a question
mc_question_description = 'Test MC Question'
mc_question_dto = models.QuestionDTO(None, {
'description': mc_question_description,
'type': 0 # MC
})
mc_question_id = models.QuestionDAO.save(mc_question_dto)
# Create a question group and add the question
qg_description = 'Question Group'
qg_dto = models.QuestionGroupDTO(None, {
'description': qg_description,
'items': [{'question': str(mc_question_id)}]
})
qg_id = models.QuestionGroupDAO.save(qg_dto)
# Create a second assessment and add the question group to the content
assessment_two = self.course.add_assessment()
assessment_two.title = 'Test Assessment'
assessment_two.html_content = """
<question-group qgid="%s" instanceid="QG"></question-group>
""" % qg_id
self.course.save()
# Check Question Group table
question_groups_table = self._soup_table()
row = question_groups_table.select('tbody tr')[0]
# Check edit link and description
edit_link = row.select('.description-cell a')[0]
self.assertEquals(edit_link.text.strip(), qg_description)
self.assertEquals(
edit_link.get('href'),
'dashboard?action=edit_question_group&key={}'.format(qg_id))
# The question that is part of this group, should be listed
self.assertEquals(
row.select('.questions-cell li')[0].text.strip(),
mc_question_description)
# Assessment where this Question Group is located, should be linked
location_link = row.select('.locations-cell a')[0]
self.assertEquals(
location_link.get('href'),
'assessment?name={}'.format(assessment_two.unit_id))
self.assertEquals(assessment_two.title, location_link.text.strip())
def test_no_question_groups(self):
soup = self.parse_html_string_to_soup(self.get(self.URL).body)
self.assertEquals(
1, len(soup.select('.gcb-list__empty-message')))
def test_no_questions(self):
description = 'Group description'
models.QuestionGroupDAO.save(models.QuestionGroupDTO(None, {
'description': description
}))
question_groups_table = self._soup_table()
self.assertEquals(
question_groups_table.select('.description-cell a')[0].text.strip(),
description)
def test_if_buttons_are_present(self):
"""Tests if all buttons are present.
In the past it wasn't allowed to add a question group when there
were no questions yet.
"""
body = self.get(self.URL).body
self.assertIn('Add Question Group', body)
def test_adding_empty_question_group(self):
QG_URL = '/%s%s' % (
self.COURSE_NAME,
question_group_editor.QuestionGroupRESTHandler.URI)
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
question_group_editor.QuestionGroupRESTHandler.XSRF_TOKEN)
description = 'Question Group'
version = (
question_group_editor.QuestionGroupRESTHandler.SCHEMA_VERSIONS[0])
payload = {
'description': description,
'version': version,
'introduction': '',
'items': []
}
response = self.put(QG_URL, {'request': transforms.dumps({
'xsrf_token': cgi.escape(xsrf_token),
'payload': transforms.dumps(payload)})})
self.assertEquals(response.status_int, 200)
payload = transforms.loads(response.body)
self.assertEquals(payload['status'], 200)
self.assertEquals(payload['message'], 'Saved.')
question_groups_table = self._soup_table()
self.assertEquals(
question_groups_table.select('.description-cell a')[0].text.strip(),
description)
def test_last_modified_timestamp(self):
begin_time = time.time()
qg_dto = models.QuestionGroupDTO(None, {})
models.QuestionGroupDAO.save(qg_dto)
self.assertTrue((begin_time <= qg_dto.last_modified) and (
qg_dto.last_modified <= time.time()))
question_groups_table = self._soup_table()
self.assertEquals(
question_groups_table.select('[data-timestamp]')[0].get(
'data-timestamp', ''),
str(qg_dto.last_modified)
)
def _call_add_to_question_group(self, qu_id, qg_id, weight, xsrf_token):
return self.post('dashboard', {
'action': 'add_to_question_group',
'question_id': qu_id,
'group_id': qg_id,
'weight': weight,
'xsrf_token': xsrf_token,
}, True)
# TODO(tlarsen): add Question Group tests; tracked here: http://b/24373601
class CourseOutlineTestCase(actions.CourseOutlineTest):
"""Tests the Course Outline."""
COURSE_NAME = 'outline'
ADMIN_EMAIL = '[email protected]'
STUDENT_EMAIL = '[email protected]'
URL = 'dashboard'
def setUp(self):
super(CourseOutlineTestCase, self).setUp()
actions.login(self.ADMIN_EMAIL, is_admin=True)
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'Outline Testing')
self.course = courses.Course(None, context)
self.assessment = self.course.add_assessment()
self.assessment.title = 'Test Assessment'
self.link = self.course.add_link()
self.link.title = 'Test Link'
self.unit = self.course.add_unit()
self.unit.title = 'Test Unit'
self.lesson = self.course.add_lesson(self.unit)
self.lesson.title = 'Test Lesson'
self.course.save()
def _get_item_for(self, component_type, item_id):
return self._get_outline().select('[data-{}-id={}]'.format(
component_type, item_id))[0]
def _get_outline(self):
response = self.get(self.URL)
return self.parse_html_string_to_soup(response.body).select(
'.course-outline')[0]
def _check_item_label(self, li, student_url, title):
a = li.select('.name a')[0]
self.assertEquals(a.text, title)
self.assertEquals(
li.select('.view-icon')[0].get('href', ''), student_url)
def test_title(self):
self._check_item_label(
self._get_item_for('unit', self.link.unit_id), '',
self.link.title)
self._check_item_label(
self._get_item_for('unit', self.assessment.unit_id),
'assessment?name={}'.format(self.assessment.unit_id),
self.assessment.title)
self._check_item_label(
self._get_item_for('unit', self.unit.unit_id),
'unit?unit={}'.format(self.unit.unit_id), self.unit.title)
self._check_item_label(
self._get_item_for('lesson', self.lesson.lesson_id),
'unit?unit={}&lesson={}'.format(
self.unit.unit_id, self.lesson.lesson_id),
self.lesson.title)
class RoleEditorTestCase(actions.TestBase):
"""Tests the Roles tab and Role Editor."""
COURSE_NAME = 'role_editor'
ADMIN_EMAIL = '[email protected]'
URL = 'dashboard?action=edit_roles'
def setUp(self):
super(RoleEditorTestCase, self).setUp()
actions.login(self.ADMIN_EMAIL, is_admin=True)
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'Roles Testing')
self.course = courses.Course(None, context)
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
# Treat as module-protected. pylint: disable=protected-access
self.old_registered_permission = roles.Roles._REGISTERED_PERMISSIONS
roles.Roles._REGISTERED_PERMISSIONS = {}
def tearDown(self):
# Treat as module-protected. pylint: disable=protected-access
roles.Roles._REGISTERED_PERMISSIONS = self.old_registered_permission
namespace_manager.set_namespace(self.old_namespace)
super(RoleEditorTestCase, self).tearDown()
def _create_role(self, role):
role_dto = models.RoleDTO(None, {
'name': role,
})
return models.RoleDAO.save(role_dto)
def test_roles_tab(self):
role_name = 'Test Role'
role_id = self._create_role(role_name)
li = self.parse_html_string(self.get(self.URL).body).find(
'.//tbody/tr/td/a')
self.assertEquals(li.text.strip(), role_name)
self.assertEquals(li.get('href'), (
'dashboard?action=edit_role&key=%s' % role_id))
def test_editor_hooks(self):
module1 = custom_modules.Module('module1', '', [], [])
module2 = custom_modules.Module('module2', '', [], [])
module3 = custom_modules.Module('module3', '', [], [])
module4 = custom_modules.Module('module4', '', [], [])
roles.Roles.register_permissions(module1, lambda unused: [
roles.Permission('permissiona', 'a'),
roles.Permission('permissionb', 'b')])
roles.Roles.register_permissions(module2, lambda unused: [
roles.Permission('permissionc', 'c'),
roles.Permission('permissiond', 'd')])
roles.Roles.register_permissions(module4, lambda unused: [
roles.Permission('permissiong', 'g'),
roles.Permission('permissiond', 'h')])
handler = role_editor.RoleRESTHandler()
handler.course = self.course
datastore_permissions = {
module1.name: ['permission', 'permissiona', 'permissionb'],
module2.name: ['permissionc', 'permissiond'],
module3.name: ['permissione', 'permissionf']
}
datastore_dict = {
'name': 'Role Name',
'users': ['[email protected]', '[email protected]'],
'permissions': datastore_permissions
}
editor_dict = handler.transform_for_editor_hook(datastore_dict)
self.assertEquals(editor_dict['name'], 'Role Name')
self.assertEquals(editor_dict['users'], '[email protected], [email protected]')
modules = editor_dict['modules']
# Test registered assigned permission
permissionc = modules[module2.name][0]
self.assertEquals(permissionc['assigned'], True)
self.assertEquals(permissionc['name'], 'permissionc')
self.assertEquals(permissionc['description'], 'c')
# Test unregistered module with assigned permission
permissionsf = modules[role_editor.RoleRESTHandler.INACTIVE_MODULES][1]
self.assertEquals(permissionsf['assigned'], True)
self.assertEquals(permissionsf['name'], 'permissionf')
self.assertEquals(
permissionsf['description'],
'This permission was set by the module "module3" which is '
'currently not registered.'
)
# Test registered module with assigned unregistered permission
permission = modules[module1.name][2]
self.assertEquals(permission['assigned'], True)
self.assertEquals(permission['name'], 'permission')
self.assertEquals(
permission['description'],
'This permission is currently not registered.'
)
# Test registered unassigned permissions
permissiong = editor_dict['modules'][module4.name][0]
self.assertEquals(permissiong['assigned'], False)
self.assertEquals(permissiong['name'], 'permissiong')
self.assertEquals(permissiong['description'], 'g')
# Call the hook which gets called when saving
new_datastore_dict = handler.transform_after_editor_hook(datastore_dict)
# If original dict matches new dict then both hooks work correctly
self.assertEquals(datastore_dict, new_datastore_dict)
def test_not_unique_role_name(self):
role_name = 'Test Role'
role_id = self._create_role(role_name)
handler = role_editor.RoleRESTHandler()
handler.course = self.course
editor_dict = {
'name': role_name
}
errors = []
handler.validate(editor_dict, role_id + 1, None, errors)
self.assertEquals(
errors[0], 'The role must have a unique non-empty name.')
class DashboardAccessTestCase(actions.TestBase):
ACCESS_COURSE_NAME = 'dashboard_access_yes'
NO_ACCESS_COURSE_NAME = 'dashboard_access_no'
ADMIN_EMAIL = '[email protected]'
USER_EMAIL = '[email protected]'
ROLE = 'test_role'
ACTION = 'test_action'
PERMISSION = 'can_access_dashboard'
PERMISSION_DESCRIPTION = 'Can Access Dashboard.'
def setUp(self):
super(DashboardAccessTestCase, self).setUp()
actions.login(self.ADMIN_EMAIL, is_admin=True)
context = actions.simple_add_course(
self.ACCESS_COURSE_NAME, self.ADMIN_EMAIL, 'Course with access')
self.course_with_access = courses.Course(None, context)
with utils.Namespace(self.course_with_access.app_context.namespace):
role_dto = models.RoleDTO(None, {
'name': self.ROLE,
'users': [self.USER_EMAIL],
'permissions': {dashboard.custom_module.name: [self.PERMISSION]}
})
models.RoleDAO.save(role_dto)
context = actions.simple_add_course(
self.NO_ACCESS_COURSE_NAME, self.ADMIN_EMAIL,
'Course with no access'
)
self.course_without_access = courses.Course(None, context)
def test_content(self):
return self.render_page(
{'main_content': 'test', 'page_title': 'test'})
# save properties
self.old_menu_group = dashboard.DashboardHandler.root_menu_group
# pylint: disable=W0212
self.old_get_acitons = dashboard.DashboardHandler._custom_get_actions
# pylint: enable=W0212
# put a dummy method in
menu_group = menus.MenuGroup('test', 'Test Dashboard')
dashboard.DashboardHandler.root_menu_group = menu_group
dashboard.DashboardHandler.default_action = self.ACTION
dashboard.DashboardHandler.add_nav_mapping(self.ACTION, self.ACTION)
dashboard.DashboardHandler.add_sub_nav_mapping(self.ACTION, self.ACTION,
self.ACTION, action=self.ACTION, contents=test_content)
dashboard.DashboardHandler.map_get_action_to_permission(
self.ACTION, dashboard.custom_module, self.PERMISSION)
actions.logout()
def tearDown(self):
# restore properties
# pylint: disable=W0212
dashboard.DashboardHandler.root_menu_group = self.old_menu_group
dashboard.DashboardHandler._custom_get_actions = self.old_get_acitons
# pylint: enable=W0212
super(DashboardAccessTestCase, self).tearDown()
def test_dashboard_access_method(self):
with utils.Namespace(self.course_with_access.app_context.namespace):
self.assertFalse(dashboard.DashboardHandler.current_user_has_access(
self.course_with_access.app_context))
with utils.Namespace(self.course_without_access.app_context.namespace):
self.assertFalse(dashboard.DashboardHandler.current_user_has_access(
self.course_without_access.app_context))
actions.login(self.USER_EMAIL, is_admin=False)
with utils.Namespace(self.course_with_access.app_context.namespace):
self.assertTrue(dashboard.DashboardHandler.current_user_has_access(
self.course_with_access.app_context))
with utils.Namespace(self.course_without_access.app_context.namespace):
self.assertFalse(dashboard.DashboardHandler.current_user_has_access(
self.course_without_access.app_context))
actions.logout()
def _get_all_picker_options(self):
return self.parse_html_string(
self.get('/%s/dashboard' % self.ACCESS_COURSE_NAME).body
).findall('.//*[@id="gcb-course-picker-menu"]//a')
def test_course_picker(self):
actions.login(self.USER_EMAIL, is_admin=False)
picker_options = self._get_all_picker_options()
self.assertEquals(len(list(picker_options)), 1)
actions.logout()
actions.login(self.ADMIN_EMAIL, is_admin=True)
picker_options = self._get_all_picker_options()
# Expect 3 courses, as the default one is also considered for the picker
self.assertEquals(len(picker_options), 3)
actions.logout()
def _get_auth_nav_links(self):
return self.parse_html_string_to_soup(
self.get('/%s/' % self.ACCESS_COURSE_NAME).body
).select('.gcb-login-header a')
def test_dashboard_link(self):
# Not signed in => no dashboard or admin link visible
self.assertEquals(len(self._get_auth_nav_links()), 1)
# Sign in user with dashboard permissions => dashboard link visible
actions.login(self.USER_EMAIL, is_admin=False)
links = self._get_auth_nav_links()
self.assertEquals(len(links), 2)
self.assertEquals(links[0].get('href'), 'dashboard')
self.assertEquals(links[0].text, 'Dashboard')
# Sign in course admin => dashboard link visible
actions.login(self.ADMIN_EMAIL, is_admin=False)
links = self._get_auth_nav_links()
self.assertEquals(len(links), 2)
self.assertEquals(links[0].get('href'), 'dashboard')
self.assertEquals(links[0].text, 'Dashboard')
# Arbitrary users don't see the link
actions.login('[email protected]', is_admin=False)
self.assertEquals(len(self._get_auth_nav_links()), 1)
class DashboardCustomNavTestCase(actions.TestBase):
"""Tests Assets > Questions."""
COURSE_NAME = 'custom_dashboard'
ADMIN_EMAIL = '[email protected]'
URL = 'dashboard?action=custom_mod'
ACTION = 'custom_mod'
CONTENT_PATH = './/div[@id="gcb-main-area"]/div[@id="gcb-main-content"]'
def setUp(self):
super(DashboardCustomNavTestCase, self).setUp()
actions.login(self.ADMIN_EMAIL, is_admin=True)
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'Custom Dashboard')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
def tearDown(self):
namespace_manager.set_namespace(self.old_namespace)
super(DashboardCustomNavTestCase, self).tearDown()
def test_custom_top_nav(self):
# Add a new top level navigation action
class CustomNavHandler(object):
@classmethod
def show_page(cls, dashboard_handler):
dashboard_handler.render_page({
'page_title': dashboard_handler.format_title('CustomNav'),
'main_content': 'TheMainContent'})
dashboard.DashboardHandler.add_nav_mapping('custom_menu', 'CUSTOM_MOD')
dashboard.DashboardHandler.add_sub_nav_mapping(
'custom_menu', self.ACTION, 'Custom Action', self.ACTION)
dashboard.DashboardHandler.add_custom_get_action(
self.ACTION, CustomNavHandler.show_page)
response = self.get('dashboard')
soup = self.parse_html_string_to_soup(response.body)
actual = [
a.text.strip()
for a in soup.select('a.mdl-navigation__link.gcb-active')]
self.assertEquals(['Custom Dashboard', 'Outline'], actual)
response = self.get(self.URL)
soup = self.parse_html_string_to_soup(response.body)
actual = [
a.text.strip()
for a in soup.select('a.mdl-navigation__link.gcb-active')]
self.assertEquals(['Custom Dashboard', 'Custom Action'], actual)
self.assertEquals(
'TheMainContent',
soup.select('div#gcb-main-content')[0].text.strip())
def test_first_tab(self):
url = 'dashboard?action=analytics_students'
response = self.get(url)
soup = self.parse_html_string_to_soup(response.body)
actual = [
a.text.strip()
for a in soup.select('a.mdl-navigation__link.gcb-active')]
self.assertEquals(['Custom Dashboard', 'Students'], actual)
class TestLessonSchema(actions.TestBase):
COURSE_NAME = 'lesson_dashboard'
ADMIN_EMAIL = '[email protected]'
def setUp(self):
super(TestLessonSchema, self).setUp()
actions.login(self.ADMIN_EMAIL, is_admin=True)
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'Lesson Course')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
self.unit = self.course.add_unit()
self.course.save()
def tearDown(self):
namespace_manager.set_namespace(self.old_namespace)
super(TestLessonSchema, self).tearDown()
def test_video_field_hidden_in_new_lessons(self):
lesson = self.course.add_lesson(self.unit)
self.course.save()
schema = get_lesson_schema(self.course, lesson)
video_options = find_schema_field(schema, ['properties', 'video',
'_inputex'])
self.assertEqual(video_options['_type'], 'hidden')
def test_video_field_not_hidden_in_lessons_with_field_set(self):
lesson = self.course.add_lesson(self.unit)
lesson.video = 'oHg5SJYRHA0'
self.course.save()
schema = get_lesson_schema(self.course, lesson)
video_options = find_schema_field(schema, ['properties', 'video',
'_inputex'])
self.assertNotEqual(video_options.get('_type'), 'hidden')
def get_lesson_schema(course, lesson):
return resources_display.ResourceLesson.get_schema(
course, lesson.lesson_id).get_schema_dict()
def find_schema_field(schema, key):
for field, options in schema:
if field == key:
return options
| gpl-3.0 | 8,584,921,947,755,167,000 | 38.83043 | 80 | 0.624337 | false |
vitorio/hold-the-line | holdtheline.py | 1 | 7034 | ## hold-the-line - Simple Python voicemail and SMS/MMS receiver
## for holding onto phone numbers in Twilio
##
## Written in 2015 and 2016 and 2017 by Vitorio Miliano <http://vitor.io/>
##
## To the extent possible under law, the author has dedicated all
## copyright and related and neighboring rights to this software
## to the public domain worldwide. This software is distributed
## without any warranty.
##
## You should have received a copy of the CC0 Public Domain
## Dedication along with this software. If not, see
## <http://creativecommons.org/publicdomain/zero/1.0/>.
from flask import Flask, request, redirect, abort
import twilio.twiml
import twilio.rest
import twilio.util
import ConfigParser
import marrow.mailer
import sys
import json
import phonenumbers
config = ConfigParser.ConfigParser()
config.readfp(open('holdtheline.cfg'))
BLOCKED_NUMBERS = config.get('holdtheline', 'blocked_numbers').split(',')
CALL_REDIRECT = config.get('holdtheline', 'call_redirect')
BUTTON_SELECTION = config.get('holdtheline', 'button_selection')
BUTTON_REDIRECT = config.get('holdtheline', 'button_redirect')
BUTTONRETRY1_REDIRECT = config.get('holdtheline', 'buttonretry1_redirect')
BUTTONRETRY2_REDIRECT = config.get('holdtheline', 'buttonretry2_redirect')
BUTTONRETRY3_REDIRECT = config.get('holdtheline', 'buttonretry3_redirect')
TO_EMAIL = config.get('holdtheline', 'to_email')
FROM_EMAIL = config.get('holdtheline', 'from_email')
TEXT_SUBJECT = config.get('holdtheline', 'text_subject')
TEXT_AUTORESPONSE = config.get('holdtheline', 'text_autoresponse')
VOICEMAIL_SUBJECT = config.get('holdtheline', 'voicemail_subject')
TWILIO_ACCOUNT_SID = config.get('holdtheline', 'twilio_account_sid')
TWILIO_AUTH_TOKEN = config.get('holdtheline', 'twilio_auth_token')
twilioclient = twilio.rest.TwilioRestClient(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
validator = twilio.util.RequestValidator(TWILIO_AUTH_TOKEN)
mailer = marrow.mailer.Mailer(dict(transport = dict(config.items('marrow.mailer'))))
app = Flask(__name__)
def pass_number(number, addons):
"""Check number validity"""
passnum = True
try:
marchex = json.load(addons)
if marchex['results']['marchex_cleancall']['result']['result']['recommendation'] == 'BLOCK':
passnum = False
except:
pass
try:
gp = phonenumbers.parse(number)
except:
passnum = False
else:
if phonenumbers.is_possible_number(gp) and phonenumbers.is_valid_number(gp):
if gp.country_code == 1:
gpi = int(str(gp.national_number)[phonenumbers.phonenumberutil.length_of_national_destination_code(gp):])
if gpi >= 5550100 and gpi <= 5550199:
passnum = False
else:
passnum = False
if number in BLOCKED_NUMBERS:
passnum = False
return passnum
@app.route("/call", methods=['GET', 'POST'])
def handle_call():
"""Check number validity and redirect or reject"""
if not validator.validate(request.url, request.form, request.headers.get('X-Twilio-Signature', '')):
abort(403)
from_number = request.values.get('From', None)
addons = request.values.get('AddOns', None)
resp = twilio.twiml.Response()
if pass_number(from_number, addons):
resp.redirect(CALL_REDIRECT)
else:
resp.reject()
return str(resp)
@app.route("/text", methods=['GET', 'POST'])
def handle_text():
"""Check number validity and reject or send email"""
if not validator.validate(request.url, request.form, request.headers.get('X-Twilio-Signature', '')):
abort(403)
from_number = request.values.get('From', None)
addons = request.values.get('AddOns', None)
if pass_number(from_number, addons):
to_number = request.values.get('To', None)
sms_body = request.values.get('Body', None)
mms = request.values.get('NumMedia', None)
mail_text = u'''{} has a new text from {}.
{}
'''.format(to_number, from_number, sms_body)
if mms:
mms = int(mms)
if mms > 0:
for m in range(0, mms):
mc = request.values.get('MediaUrl{}'.format(m), None)
mail_text = mail_text + '''Media content: {}
'''.format(mc)
try:
mailer.start()
message = marrow.mailer.Message(author=FROM_EMAIL, to=TO_EMAIL)
message.subject = TEXT_SUBJECT.format(from_num=from_number, to_num=to_number)
message.plain = mail_text
mailer.send(message)
mailer.stop()
except:
e = sys.exc_info()
print 'A mailer error occurred: %s - %s' % (e[0], e[1])
raise
resp = twilio.twiml.Response()
if TEXT_AUTORESPONSE:
resp.message(unicode(TEXT_AUTORESPONSE, 'utf-8'))
return str(resp)
@app.route("/transcription", methods=['GET', 'POST'])
def handle_transcription():
"""Notify via email"""
if not validator.validate(request.url, request.form, request.headers.get('X-Twilio-Signature', '')):
abort(403)
from_number = request.values.get('From', None)
to_number = request.values.get('To', None)
voicemail = request.values.get('RecordingUrl', None)
transcript_status = request.values.get('TranscriptionStatus', None)
mail_text = u'''{} has a new voicemail from {}.
Recording: {}
'''.format(to_number, from_number, voicemail)
if (transcript_status == "completed"):
mail_text = mail_text + u"""Transcription:
{}
""".format(request.values.get('TranscriptionText', None))
try:
mailer.start()
message = marrow.mailer.Message(author=FROM_EMAIL, to=TO_EMAIL)
message.subject = VOICEMAIL_SUBJECT.format(from_num=from_number, to_num=to_number)
message.plain = mail_text
mailer.send(message)
mailer.stop()
except:
e = sys.exc_info()
print 'A mailer error occurred: %s - %s' % (e[0], e[1])
raise
resp = twilio.twiml.Response()
resp.hangup()
return str(resp)
@app.route('/button', methods=['GET', 'POST'])
def handle_button():
"""Route based on a single button selection"""
if not validator.validate(request.url, request.form, request.headers.get('X-Twilio-Signature', '')):
abort(403)
digit_pressed = request.values.get('Digits', None)
retry_time = request.values.get('Retry', None)
resp = twilio.twiml.Response()
if int(digit_pressed) == int(BUTTON_SELECTION):
resp.redirect(BUTTON_REDIRECT)
else:
if int(retry_time) == 1:
resp.redirect(BUTTONRETRY1_REDIRECT)
elif int(retry_time) == 2:
resp.redirect(BUTTONRETRY2_REDIRECT)
else:
resp.redirect(BUTTONRETRY3_REDIRECT)
return str(resp)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=6600)
| cc0-1.0 | -6,082,186,472,180,485,000 | 33.480392 | 121 | 0.632926 | false |
nburn42/tensorflow | tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py | 32 | 56938 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for creating sequence-to-sequence models in TensorFlow.
Sequence-to-sequence recurrent neural networks can learn complex functions
that map input sequences to output sequences. These models yield very good
results on a number of tasks, such as speech recognition, parsing, machine
translation, or even constructing automated replies to emails.
Before using this module, it is recommended to read the TensorFlow tutorial
on sequence-to-sequence models. It explains the basic concepts of this module
and shows an end-to-end example of how to build a translation model.
https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html
Here is an overview of functions available in this module. They all use
a very similar interface, so after reading the above tutorial and using
one of them, others should be easy to substitute.
* Full sequence-to-sequence models.
- basic_rnn_seq2seq: The most basic RNN-RNN model.
- tied_rnn_seq2seq: The basic model with tied encoder and decoder weights.
- embedding_rnn_seq2seq: The basic model with input embedding.
- embedding_tied_rnn_seq2seq: The tied model with input embedding.
- embedding_attention_seq2seq: Advanced model with input embedding and
the neural attention mechanism; recommended for complex tasks.
* Multi-task sequence-to-sequence models.
- one2many_rnn_seq2seq: The embedding model with multiple decoders.
* Decoders (when you write your own encoder, you can use these to decode;
e.g., if you want to write a model that generates captions for images).
- rnn_decoder: The basic decoder based on a pure RNN.
- attention_decoder: A decoder that uses the attention mechanism.
* Losses.
- sequence_loss: Loss for a sequence model returning average log-perplexity.
- sequence_loss_by_example: As above, but not averaging over all examples.
* model_with_buckets: A convenience function to create models with bucketing
(see the tutorial above for an explanation of why and how to use it).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
# We disable pylint because we need python3 compatibility.
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
# TODO(ebrevdo): Remove once _linear is fully deprecated.
Linear = core_rnn_cell._Linear # pylint: disable=protected-access,invalid-name
def _extract_argmax_and_embed(embedding,
output_projection=None,
update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
if output_projection is not None:
prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev
return loop_function
def rnn_decoder(decoder_inputs,
initial_state,
cell,
loop_function=None,
scope=None):
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with variable_scope.variable_scope(scope or "rnn_decoder"):
state = initial_state
outputs = []
prev = None
for i, inp in enumerate(decoder_inputs):
if loop_function is not None and prev is not None:
with variable_scope.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = cell(inp, state)
outputs.append(output)
if loop_function is not None:
prev = output
return outputs, state
def basic_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
dtype=dtypes.float32,
scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector,
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
enc_cell = copy.deepcopy(cell)
_, enc_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_state, cell)
def tied_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
loop_function=None,
dtype=dtypes.float32,
scope=None):
"""RNN sequence-to-sequence model with tied encoder and decoder parameters.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell and share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol), see rnn_decoder for details.
dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
scope = scope or "tied_rnn_seq2seq"
_, enc_state = rnn.static_rnn(
cell, encoder_inputs, dtype=dtype, scope=scope)
variable_scope.get_variable_scope().reuse_variables()
return rnn_decoder(
decoder_inputs,
enc_state,
cell,
loop_function=loop_function,
scope=scope)
def embedding_rnn_decoder(decoder_inputs,
initial_state,
cell,
num_symbols,
embedding_size,
output_projection=None,
feed_previous=False,
update_embedding_for_previous=True,
scope=None):
"""RNN decoder with embedding and a pure-decoding option.
Args:
decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function.
num_symbols: Integer, how many symbols come into the embedding.
embedding_size: Integer, the length of the embedding vector for each symbol.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/abs/1506.03099.
If False, decoder_inputs are used as given (the standard decoder case).
update_embedding_for_previous: Boolean; if False and feed_previous=True,
only the embedding for the first symbol of decoder_inputs (the "GO"
symbol) will be updated by back propagation. Embeddings for the symbols
generated from the decoder itself remain unchanged. This parameter has
no effect if feed_previous=False.
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors. The
output is of shape [batch_size x cell.output_size] when
output_projection is not None (and represents the dense representation
of predicted tokens). It is of shape [batch_size x num_decoder_symbols]
when output_projection is None.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
with variable_scope.variable_scope(scope or "embedding_rnn_decoder") as scope:
if output_projection is not None:
dtype = scope.dtype
proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
loop_function = _extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
emb_inp = (embedding_ops.embedding_lookup(embedding, i)
for i in decoder_inputs)
return rnn_decoder(
emb_inp, initial_state, cell, loop_function=loop_function)
def embedding_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None):
"""Embedding RNN sequence-to-sequence model.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
input_size]). Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
embedding_size: Integer, the length of the embedding vector for each symbol.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_seq2seq"
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors. The
output is of shape [batch_size x cell.output_size] when
output_projection is not None (and represents the dense representation
of predicted tokens). It is of shape [batch_size x num_decoder_symbols]
when output_projection is None.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "embedding_rnn_seq2seq") as scope:
if dtype is not None:
scope.set_dtype(dtype)
else:
dtype = scope.dtype
# Encoder.
encoder_cell = copy.deepcopy(cell)
encoder_cell = core_rnn_cell.EmbeddingWrapper(
encoder_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
_, encoder_state = rnn.static_rnn(encoder_cell, encoder_inputs, dtype=dtype)
# Decoder.
if output_projection is None:
cell = core_rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
return embedding_rnn_decoder(
decoder_inputs,
encoder_state,
cell,
num_decoder_symbols,
embedding_size,
output_projection=output_projection,
feed_previous=feed_previous)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=reuse):
outputs, state = embedding_rnn_decoder(
decoder_inputs,
encoder_state,
cell,
num_decoder_symbols,
embedding_size,
output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False)
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(feed_previous,
lambda: decoder(True),
lambda: decoder(False))
outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
if nest.is_sequence(encoder_state):
state = nest.pack_sequence_as(
structure=encoder_state, flat_sequence=state_list)
return outputs_and_state[:outputs_len], state
def embedding_tied_rnn_seq2seq(encoder_inputs,
decoder_inputs,
cell,
num_symbols,
embedding_size,
num_decoder_symbols=None,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None):
"""Embedding RNN sequence-to-sequence model with tied (shared) parameters.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_symbols x input_size]). Then it runs an RNN to encode embedded
encoder_inputs into a state vector. Next, it embeds decoder_inputs using
the same embedding. Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs. The decoder output is over symbols
from 0 to num_decoder_symbols - 1 if num_decoder_symbols is none; otherwise it
is over 0 to num_symbols - 1.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
num_symbols: Integer; number of symbols for both encoder and decoder.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_decoder_symbols: Integer; number of output symbols for decoder. If
provided, the decoder output is over symbols 0 to num_decoder_symbols - 1.
Otherwise, decoder output is over symbols 0 to num_symbols - 1. Note that
this assumes that the vocabulary is set up such that the first
num_decoder_symbols of num_symbols are part of decoding.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the initial RNN states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_symbols] containing the generated
outputs where output_symbols = num_decoder_symbols if
num_decoder_symbols is not None otherwise output_symbols = num_symbols.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
with variable_scope.variable_scope(
scope or "embedding_tied_rnn_seq2seq", dtype=dtype) as scope:
dtype = scope.dtype
if output_projection is not None:
proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
embedding = variable_scope.get_variable(
"embedding", [num_symbols, embedding_size], dtype=dtype)
emb_encoder_inputs = [
embedding_ops.embedding_lookup(embedding, x) for x in encoder_inputs
]
emb_decoder_inputs = [
embedding_ops.embedding_lookup(embedding, x) for x in decoder_inputs
]
output_symbols = num_symbols
if num_decoder_symbols is not None:
output_symbols = num_decoder_symbols
if output_projection is None:
cell = core_rnn_cell.OutputProjectionWrapper(cell, output_symbols)
if isinstance(feed_previous, bool):
loop_function = _extract_argmax_and_embed(embedding, output_projection,
True) if feed_previous else None
return tied_rnn_seq2seq(
emb_encoder_inputs,
emb_decoder_inputs,
cell,
loop_function=loop_function,
dtype=dtype)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
loop_function = _extract_argmax_and_embed(
embedding, output_projection, False) if feed_previous_bool else None
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=reuse):
outputs, state = tied_rnn_seq2seq(
emb_encoder_inputs,
emb_decoder_inputs,
cell,
loop_function=loop_function,
dtype=dtype)
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(feed_previous,
lambda: decoder(True),
lambda: decoder(False))
outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
# Calculate zero-state to know it's structure.
static_batch_size = encoder_inputs[0].get_shape()[0]
for inp in encoder_inputs[1:]:
static_batch_size.merge_with(inp.get_shape()[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(encoder_inputs[0])[0]
zero_state = cell.zero_state(batch_size, dtype)
if nest.is_sequence(zero_state):
state = nest.pack_sequence_as(
structure=zero_state, flat_sequence=state_list)
return outputs_and_state[:outputs_len], state
def attention_decoder(decoder_inputs,
initial_state,
attention_states,
cell,
output_size=None,
num_heads=1,
loop_function=None,
dtype=None,
scope=None,
initial_state_attention=False):
"""RNN decoder with attention for the sequence-to-sequence model.
In this context "attention" means that, during decoding, the RNN can look up
information in the additional tensor attention_states, and it does this by
focusing on a few entries from the tensor. This model has proven to yield
especially good results in a number of sequence-to-sequence tasks. This
implementation is based on http://arxiv.org/abs/1412.7449 (see below for
details). It is recommended for complex sequence-to-sequence tasks.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
output_size: Size of the output vectors; if None, we use cell.output_size.
num_heads: Number of attention heads that read from attention_states.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously
stored decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors of
shape [batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either the i-th element
of decoder_inputs or loop_function(output {i-1}, i)) as follows.
First, we run the cell on a combination of the input and previous
attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
state: The state of each decoder cell the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, shapes
of attention_states are not set, or input size cannot be inferred
from the input.
"""
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if attention_states.get_shape()[2].value is None:
raise ValueError("Shape[2] of attention_states must be known: %s" %
attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with variable_scope.variable_scope(
scope or "attention_decoder", dtype=dtype) as scope:
dtype = scope.dtype
batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.
attn_length = attention_states.get_shape()[1].value
if attn_length is None:
attn_length = array_ops.shape(attention_states)[1]
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = array_ops.reshape(attention_states,
[-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = variable_scope.get_variable("AttnW_%d" % a,
[1, 1, attn_size, attention_vec_size])
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(
variable_scope.get_variable("AttnV_%d" % a, [attention_vec_size]))
state = initial_state
def attention(query):
"""Put attention masks on hidden using hidden_features and query."""
ds = [] # Results of attention reads will be stored here.
if nest.is_sequence(query): # If the query is a tuple, flatten it.
query_list = nest.flatten(query)
for q in query_list: # Check that ndims == 2 if specified.
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
query = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope("Attention_%d" % a):
y = Linear(query, attention_vec_size, True)(query)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
# Attention mask is a softmax of v^T * tanh(...).
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y),
[2, 3])
a = nn_ops.softmax(s)
# Now calculate the attention-weighted vector d.
d = math_ops.reduce_sum(
array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
return ds
outputs = []
prev = None
batch_attn_size = array_ops.stack([batch_size, attn_size])
attns = [
array_ops.zeros(
batch_attn_size, dtype=dtype) for _ in xrange(num_heads)
]
for a in attns: # Ensure the second shape of attention vectors is set.
a.set_shape([None, attn_size])
if initial_state_attention:
attns = attention(initial_state)
for i, inp in enumerate(decoder_inputs):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
# If loop_function is set, we use it instead of decoder_inputs.
if loop_function is not None and prev is not None:
with variable_scope.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
# Merge input and previous attentions into one vector of the right size.
input_size = inp.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from input: %s" % inp.name)
inputs = [inp] + attns
x = Linear(inputs, input_size, True)(inputs)
# Run the RNN.
cell_output, state = cell(x, state)
# Run the attention mechanism.
if i == 0 and initial_state_attention:
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True):
attns = attention(state)
else:
attns = attention(state)
with variable_scope.variable_scope("AttnOutputProjection"):
inputs = [cell_output] + attns
output = Linear(inputs, output_size, True)(inputs)
if loop_function is not None:
prev = output
outputs.append(output)
return outputs, state
def embedding_attention_decoder(decoder_inputs,
initial_state,
attention_states,
cell,
num_symbols,
embedding_size,
num_heads=1,
output_size=None,
output_projection=None,
feed_previous=False,
update_embedding_for_previous=True,
dtype=None,
scope=None,
initial_state_attention=False):
"""RNN decoder with embedding and attention and a pure-decoding option.
Args:
decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function.
num_symbols: Integer, how many symbols come into the embedding.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_heads: Number of attention heads that read from attention_states.
output_size: Size of the output vectors; if None, use output_size.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/abs/1506.03099.
If False, decoder_inputs are used as given (the standard decoder case).
update_embedding_for_previous: Boolean; if False and feed_previous=True,
only the embedding for the first symbol of decoder_inputs (the "GO"
symbol) will be updated by back propagation. Embeddings for the symbols
generated from the decoder itself remain unchanged. This parameter has
no effect if feed_previous=False.
dtype: The dtype to use for the RNN initial states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_decoder".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously
stored decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(
scope or "embedding_attention_decoder", dtype=dtype) as scope:
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
loop_function = _extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
emb_inp = [
embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs
]
return attention_decoder(
emb_inp,
initial_state,
attention_states,
cell,
output_size=output_size,
num_heads=num_heads,
loop_function=loop_function,
initial_state_attention=initial_state_attention)
def embedding_attention_seq2seq(encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size,
num_heads=1,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None,
initial_state_attention=False):
"""Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Warning: when output_projection is None, the size of the attention vectors
and variables will be made proportional to num_decoder_symbols, can be large.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_heads: Number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(
scope or "embedding_attention_seq2seq", dtype=dtype) as scope:
dtype = scope.dtype
# Encoder.
encoder_cell = copy.deepcopy(cell)
encoder_cell = core_rnn_cell.EmbeddingWrapper(
encoder_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
encoder_outputs, encoder_state = rnn.static_rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in encoder_outputs
]
attention_states = array_ops.concat(top_states, 1)
# Decoder.
output_size = None
if output_projection is None:
cell = core_rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
output_size = num_decoder_symbols
if isinstance(feed_previous, bool):
return embedding_attention_decoder(
decoder_inputs,
encoder_state,
attention_states,
cell,
num_decoder_symbols,
embedding_size,
num_heads=num_heads,
output_size=output_size,
output_projection=output_projection,
feed_previous=feed_previous,
initial_state_attention=initial_state_attention)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=reuse):
outputs, state = embedding_attention_decoder(
decoder_inputs,
encoder_state,
attention_states,
cell,
num_decoder_symbols,
embedding_size,
num_heads=num_heads,
output_size=output_size,
output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False,
initial_state_attention=initial_state_attention)
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(feed_previous,
lambda: decoder(True),
lambda: decoder(False))
outputs_len = len(decoder_inputs) # Outputs length same as decoder inputs.
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
if nest.is_sequence(encoder_state):
state = nest.pack_sequence_as(
structure=encoder_state, flat_sequence=state_list)
return outputs_and_state[:outputs_len], state
def one2many_rnn_seq2seq(encoder_inputs,
decoder_inputs_dict,
enc_cell,
dec_cells_dict,
num_encoder_symbols,
num_decoder_symbols_dict,
embedding_size,
feed_previous=False,
dtype=None,
scope=None):
"""One-to-many RNN sequence-to-sequence model (multi-task).
This is a multi-task sequence-to-sequence model with one encoder and multiple
decoders. Reference to multi-task sequence-to-sequence learning can be found
here: http://arxiv.org/abs/1511.06114
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs_dict: A dictionary mapping decoder name (string) to
the corresponding decoder_inputs; each decoder_inputs is a list of 1D
Tensors of shape [batch_size]; num_decoders is defined as
len(decoder_inputs_dict).
enc_cell: tf.nn.rnn_cell.RNNCell defining the encoder cell function and
size.
dec_cells_dict: A dictionary mapping encoder name (string) to an
instance of tf.nn.rnn_cell.RNNCell.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols_dict: A dictionary mapping decoder name (string) to an
integer specifying number of symbols for the corresponding decoder;
len(num_decoder_symbols_dict) must be equal to num_decoders.
embedding_size: Integer, the length of the embedding vector for each symbol.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"one2many_rnn_seq2seq"
Returns:
A tuple of the form (outputs_dict, state_dict), where:
outputs_dict: A mapping from decoder name (string) to a list of the same
length as decoder_inputs_dict[name]; each element in the list is a 2D
Tensors with shape [batch_size x num_decoder_symbol_list[name]]
containing the generated outputs.
state_dict: A mapping from decoder name (string) to the final state of the
corresponding decoder RNN; it is a 2D Tensor of shape
[batch_size x cell.state_size].
Raises:
TypeError: if enc_cell or any of the dec_cells are not instances of RNNCell.
ValueError: if len(dec_cells) != len(decoder_inputs_dict).
"""
outputs_dict = {}
state_dict = {}
if not isinstance(enc_cell, rnn_cell_impl.RNNCell):
raise TypeError("enc_cell is not an RNNCell: %s" % type(enc_cell))
if set(dec_cells_dict) != set(decoder_inputs_dict):
raise ValueError("keys of dec_cells_dict != keys of decodre_inputs_dict")
for dec_cell in dec_cells_dict.values():
if not isinstance(dec_cell, rnn_cell_impl.RNNCell):
raise TypeError("dec_cell is not an RNNCell: %s" % type(dec_cell))
with variable_scope.variable_scope(
scope or "one2many_rnn_seq2seq", dtype=dtype) as scope:
dtype = scope.dtype
# Encoder.
enc_cell = core_rnn_cell.EmbeddingWrapper(
enc_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
_, encoder_state = rnn.static_rnn(enc_cell, encoder_inputs, dtype=dtype)
# Decoder.
for name, decoder_inputs in decoder_inputs_dict.items():
num_decoder_symbols = num_decoder_symbols_dict[name]
dec_cell = dec_cells_dict[name]
with variable_scope.variable_scope("one2many_decoder_" + str(
name)) as scope:
dec_cell = core_rnn_cell.OutputProjectionWrapper(
dec_cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
outputs, state = embedding_rnn_decoder(
decoder_inputs,
encoder_state,
dec_cell,
num_decoder_symbols,
embedding_size,
feed_previous=feed_previous)
else:
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def filled_embedding_rnn_decoder(feed_previous):
"""The current decoder with a fixed feed_previous parameter."""
# pylint: disable=cell-var-from-loop
reuse = None if feed_previous else True
vs = variable_scope.get_variable_scope()
with variable_scope.variable_scope(vs, reuse=reuse):
outputs, state = embedding_rnn_decoder(
decoder_inputs,
encoder_state,
dec_cell,
num_decoder_symbols,
embedding_size,
feed_previous=feed_previous)
# pylint: enable=cell-var-from-loop
state_list = [state]
if nest.is_sequence(state):
state_list = nest.flatten(state)
return outputs + state_list
outputs_and_state = control_flow_ops.cond(
feed_previous, lambda: filled_embedding_rnn_decoder(True),
lambda: filled_embedding_rnn_decoder(False))
# Outputs length is the same as for decoder inputs.
outputs_len = len(decoder_inputs)
outputs = outputs_and_state[:outputs_len]
state_list = outputs_and_state[outputs_len:]
state = state_list[0]
if nest.is_sequence(encoder_state):
state = nest.pack_sequence_as(
structure=encoder_state, flat_sequence=state_list)
outputs_dict[name] = outputs
state_dict[name] = state
return outputs_dict, state_dict
def sequence_loss_by_example(logits,
targets,
weights,
average_across_timesteps=True,
softmax_loss_function=None,
name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: Function (labels, logits) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
**Note that to avoid confusion, it is required for the function to accept
named arguments.**
name: Optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: The log-perplexity for each sequence.
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with ops.name_scope(name, "sequence_loss_by_example",
logits + targets + weights):
log_perp_list = []
for logit, target, weight in zip(logits, targets, weights):
if softmax_loss_function is None:
# TODO(irving,ebrevdo): This reshape is needed because
# sequence_loss_by_example is called with scalars sometimes, which
# violates our general scalar strictness policy.
target = array_ops.reshape(target, [-1])
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=target, logits=logit)
else:
crossent = softmax_loss_function(labels=target, logits=logit)
log_perp_list.append(crossent * weight)
log_perps = math_ops.add_n(log_perp_list)
if average_across_timesteps:
total_size = math_ops.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
def sequence_loss(logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True,
softmax_loss_function=None,
name=None):
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
softmax_loss_function: Function (labels, logits) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
**Note that to avoid confusion, it is required for the function to accept
named arguments.**
name: Optional name for this operation, defaults to "sequence_loss".
Returns:
A scalar float Tensor: The average log-perplexity per symbol (weighted).
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
with ops.name_scope(name, "sequence_loss", logits + targets + weights):
cost = math_ops.reduce_sum(
sequence_loss_by_example(
logits,
targets,
weights,
average_across_timesteps=average_across_timesteps,
softmax_loss_function=softmax_loss_function))
if average_across_batch:
batch_size = array_ops.shape(targets[0])[0]
return cost / math_ops.cast(batch_size, cost.dtype)
else:
return cost
def model_with_buckets(encoder_inputs,
decoder_inputs,
targets,
weights,
buckets,
seq2seq,
softmax_loss_function=None,
per_example_loss=False,
name=None):
"""Create a sequence-to-sequence model with support for bucketing.
The seq2seq argument is a function that defines a sequence-to-sequence model,
e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(
x, y, rnn_cell.GRUCell(24))
Args:
encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.
decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.
targets: A list of 1D batch-sized int32 Tensors (desired output sequence).
weights: List of 1D batch-sized float-Tensors to weight the targets.
buckets: A list of pairs of (input size, output size) for each bucket.
seq2seq: A sequence-to-sequence model function; it takes 2 input that
agree with encoder_inputs and decoder_inputs, and returns a pair
consisting of outputs and states (as, e.g., basic_rnn_seq2seq).
softmax_loss_function: Function (labels, logits) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
**Note that to avoid confusion, it is required for the function to accept
named arguments.**
per_example_loss: Boolean. If set, the returned loss will be a batch-sized
tensor of losses for each sequence in the batch. If unset, it will be
a scalar with the averaged loss from all examples.
name: Optional name for this operation, defaults to "model_with_buckets".
Returns:
A tuple of the form (outputs, losses), where:
outputs: The outputs for each bucket. Its j'th element consists of a list
of 2D Tensors. The shape of output tensors can be either
[batch_size x output_size] or [batch_size x num_decoder_symbols]
depending on the seq2seq model used.
losses: List of scalar Tensors, representing losses for each bucket, or,
if per_example_loss is set, a list of 1D batch-sized float Tensors.
Raises:
ValueError: If length of encoder_inputs, targets, or weights is smaller
than the largest (last) bucket.
"""
if len(encoder_inputs) < buckets[-1][0]:
raise ValueError("Length of encoder_inputs (%d) must be at least that of la"
"st bucket (%d)." % (len(encoder_inputs), buckets[-1][0]))
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last "
"bucket (%d)." % (len(targets), buckets[-1][1]))
if len(weights) < buckets[-1][1]:
raise ValueError("Length of weights (%d) must be at least that of last "
"bucket (%d)." % (len(weights), buckets[-1][1]))
all_inputs = encoder_inputs + decoder_inputs + targets + weights
losses = []
outputs = []
with ops.name_scope(name, "model_with_buckets", all_inputs):
for j, bucket in enumerate(buckets):
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True if j > 0 else None):
bucket_outputs, _ = seq2seq(encoder_inputs[:bucket[0]],
decoder_inputs[:bucket[1]])
outputs.append(bucket_outputs)
if per_example_loss:
losses.append(
sequence_loss_by_example(
outputs[-1],
targets[:bucket[1]],
weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
else:
losses.append(
sequence_loss(
outputs[-1],
targets[:bucket[1]],
weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
return outputs, losses
| apache-2.0 | 5,504,938,355,987,907,000 | 45.442088 | 80 | 0.652148 | false |
Fale/ansible | lib/ansible/module_utils/connection.py | 35 | 8113 | #
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import hashlib
import json
import socket
import struct
import traceback
import uuid
from functools import partial
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.common.json import AnsibleJSONEncoder
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import cPickle
def write_to_file_descriptor(fd, obj):
"""Handles making sure all data is properly written to file descriptor fd.
In particular, that data is encoded in a character stream-friendly way and
that all data gets written before returning.
"""
# Need to force a protocol that is compatible with both py2 and py3.
# That would be protocol=2 or less.
# Also need to force a protocol that excludes certain control chars as
# stdin in this case is a pty and control chars will cause problems.
# that means only protocol=0 will work.
src = cPickle.dumps(obj, protocol=0)
# raw \r characters will not survive pty round-trip
# They should be rehydrated on the receiving end
src = src.replace(b'\r', br'\r')
data_hash = to_bytes(hashlib.sha1(src).hexdigest())
os.write(fd, b'%d\n' % len(src))
os.write(fd, src)
os.write(fd, b'%s\n' % data_hash)
def send_data(s, data):
packed_len = struct.pack('!Q', len(data))
return s.sendall(packed_len + data)
def recv_data(s):
header_len = 8 # size of a packed unsigned long long
data = to_bytes("")
while len(data) < header_len:
d = s.recv(header_len - len(data))
if not d:
return None
data += d
data_len = struct.unpack('!Q', data[:header_len])[0]
data = data[header_len:]
while len(data) < data_len:
d = s.recv(data_len - len(data))
if not d:
return None
data += d
return data
def exec_command(module, command):
connection = Connection(module._socket_path)
try:
out = connection.exec_command(command)
except ConnectionError as exc:
code = getattr(exc, 'code', 1)
message = getattr(exc, 'err', exc)
return code, '', to_text(message, errors='surrogate_then_replace')
return 0, out, ''
def request_builder(method_, *args, **kwargs):
reqid = str(uuid.uuid4())
req = {'jsonrpc': '2.0', 'method': method_, 'id': reqid}
req['params'] = (args, kwargs)
return req
class ConnectionError(Exception):
def __init__(self, message, *args, **kwargs):
super(ConnectionError, self).__init__(message)
for k, v in iteritems(kwargs):
setattr(self, k, v)
class Connection(object):
def __init__(self, socket_path):
if socket_path is None:
raise AssertionError('socket_path must be a value')
self.socket_path = socket_path
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
if name.startswith('_'):
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
return partial(self.__rpc__, name)
def _exec_jsonrpc(self, name, *args, **kwargs):
req = request_builder(name, *args, **kwargs)
reqid = req['id']
if not os.path.exists(self.socket_path):
raise ConnectionError(
'socket path %s does not exist or cannot be found. See Troubleshooting socket '
'path issues in the Network Debug and Troubleshooting Guide' % self.socket_path
)
try:
data = json.dumps(req, cls=AnsibleJSONEncoder)
except TypeError as exc:
raise ConnectionError(
"Failed to encode some variables as JSON for communication with ansible-connection. "
"The original exception was: %s" % to_text(exc)
)
try:
out = self.send(data)
except socket.error as e:
raise ConnectionError(
'unable to connect to socket %s. See Troubleshooting socket path issues '
'in the Network Debug and Troubleshooting Guide' % self.socket_path,
err=to_text(e, errors='surrogate_then_replace'), exception=traceback.format_exc()
)
try:
response = json.loads(out)
except ValueError:
params = [repr(arg) for arg in args] + ['{0}={1!r}'.format(k, v) for k, v in iteritems(kwargs)]
params = ', '.join(params)
raise ConnectionError(
"Unable to decode JSON from response to {0}({1}). Received '{2}'.".format(name, params, out)
)
if response['id'] != reqid:
raise ConnectionError('invalid json-rpc id received')
if "result_type" in response:
response["result"] = cPickle.loads(to_bytes(response["result"]))
return response
def __rpc__(self, name, *args, **kwargs):
"""Executes the json-rpc and returns the output received
from remote device.
:name: rpc method to be executed over connection plugin that implements jsonrpc 2.0
:args: Ordered list of params passed as arguments to rpc method
:kwargs: Dict of valid key, value pairs passed as arguments to rpc method
For usage refer the respective connection plugin docs.
"""
response = self._exec_jsonrpc(name, *args, **kwargs)
if 'error' in response:
err = response.get('error')
msg = err.get('data') or err['message']
code = err['code']
raise ConnectionError(to_text(msg, errors='surrogate_then_replace'), code=code)
return response['result']
def send(self, data):
try:
sf = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sf.connect(self.socket_path)
send_data(sf, to_bytes(data))
response = recv_data(sf)
except socket.error as e:
sf.close()
raise ConnectionError(
'unable to connect to socket %s. See the socket path issue category in '
'Network Debug and Troubleshooting Guide' % self.socket_path,
err=to_text(e, errors='surrogate_then_replace'), exception=traceback.format_exc()
)
sf.close()
return to_text(response, errors='surrogate_or_strict')
| gpl-3.0 | -891,343,664,207,556,100 | 36.387097 | 108 | 0.639961 | false |
alvin319/CarnotKE | jyhton/lib-python/2.7/os.py | 147 | 25769 | r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
return name in globals()
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import warnings
msg = "os.popen2 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import warnings
msg = "os.popen3 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import warnings
msg = "os.popen4 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
| apache-2.0 | -6,478,401,462,278,475,000 | 33.822973 | 83 | 0.597617 | false |
CryptArc/bitcoinxt | contrib/devtools/symbol-check.py | 172 | 4344 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function
import subprocess
import re
import sys
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '_init', '__bss_start', '_fini'
}
READELF_CMD = '/usr/bin/readelf'
CPPFILT_CMD = '/usr/bin/c++filt'
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split('\n'):
line = line.split()
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym)))
retval = 1
exit(retval)
| mit | 4,517,823,212,617,634,300 | 35.504202 | 142 | 0.641344 | false |
rynomad/CCNx-Federated-Wiki-Prototype | server/express/node_modules/npm/node_modules/node-gyp/gyp/test/cflags/gyptest-cflags.py | 74 | 1559 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define, and
the use of the environment during regeneration when the gyp file changes.
"""
import os
import TestGyp
env_stack = []
def PushEnv():
env_copy = os.environ.copy()
env_stack.append(env_copy)
def PopEnv():
os.eniron=env_stack.pop()
# Regenerating build files when a gyp file changes is currently only supported
# by the make and Android generators.
test = TestGyp.TestGyp(formats=['make', 'android'])
try:
PushEnv()
os.environ['CFLAGS'] = '-O0'
test.run_gyp('cflags.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
PopEnv()
test.build('cflags.gyp')
expect = """\
Using no optimization flag
"""
test.run_built_executable('cflags', stdout=expect)
test.sleep()
try:
PushEnv()
os.environ['CFLAGS'] = '-O2'
test.run_gyp('cflags.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
PopEnv()
test.build('cflags.gyp')
expect = """\
Using an optimization flag
"""
test.run_built_executable('cflags', stdout=expect)
test.pass_test()
| mit | -7,045,152,750,854,276,000 | 22.984615 | 80 | 0.717126 | false |
TrondKjeldas/knxmonitor | knxmonitor/Knx/KnxParser.py | 1 | 5381 | from time import time, mktime, strptime
try:
import Gnuplot
except:
print "Warning: gnuplot not available"
from knxmonitor.Knx.KnxPdu import KnxPdu
from knxmonitor.Knx.KnxAddressStream import KnxAddressStream
from knxmonitor.Knx.KnxAddressCollection import KnxAddressCollection
verbose = True
def printVerbose(str):
if verbose:
print str
def setVerbose(v):
global verbose
verbose = v
class KnxParser(object):
devDict = KnxAddressCollection()
groupDict = KnxAddressCollection()
knxAddrStream = {}
def __init__(self, devicesfilename,
groupaddrfilename, dumpaddressinfo = False,
flanksOnly = False, types = None):
# Load device and address info
self.groupDict.loadGroupAddrs(open(groupaddrfilename), dumpaddressinfo)
# Populate streams dictionary
for k in self.groupDict.keys():
if k in types.keys():
t = types[k]
else:
t = None
self.knxAddrStream[k] = KnxAddressStream(k, self.groupDict[k],
t, flanksOnly)
self.knxAddrStream[k].prepareSynchronizedPrints()
self.devDict.loadDeviceAddrs(open(devicesfilename))
self.cache = []
def setTimeBase(self, basetime):
self.basetime = basetime
def parseVbusOutput(self, seq, timestamp, text):
# Skip programming related PDUs...
if text.find("Data system") != -1:
return
pdu = KnxPdu(self.devDict, self.groupDict, text)
tstamp = strptime(timestamp, "%a %b %d %H:%M:%S %Y")
try:
self.knxAddrStream[pdu.getTo()].addTelegram(seq, tstamp, pdu)
# Also add PDU to cache stream...
self.cache.append((seq, tstamp, pdu))
except KeyError:
printVerbose("unknown address, skipping: %s" %pdu.getTo())
def storeCachedInput(self, file, startline):
for seq, tstamp, pdu in self.cache:
pdu.storeCacheLine(tstamp, file)
print "Done storeing cache file %s" %file.name
file.close()
def getStreamMinMaxValues(self, groupAddr):
try:
min = self.knxAddrStream[groupAddr].minVal
max = self.knxAddrStream[groupAddr].maxVal
return min,max
except:
return None,None
def printStreams(self, groupAddrs, format = "text"):
if groupAddrs == None:
# Logically, "none" means all :)
groupAddrs = self.knxAddrStream.keys()
for g in groupAddrs:
self.knxAddrStream[g].setOutputFormat(format)
self.knxAddrStream[g].prepareSynchronizedPrints()
seq = 0
more = True
while more:
more = False
for g in groupAddrs:
hasMore = self.knxAddrStream[g].printTelegrams(seq)
more = more or hasMore
# Step sequence number
seq += 1
def plotStreams(self, groupAddrs, genImage="", addHorLine=None):
if groupAddrs == None:
# Logically, "none" means all :)
groupAddrs = self.knxAddrStream.keys()
plotter = {}
gdata = []
plotData = None
endTime = 0.0
startTime = time() + (3600*24*365*10)
for ga in groupAddrs:
try:
plotData = self.knxAddrStream[ga].preparePlotData(self.basetime)
except KeyError:
# Not a valid group address, skip it...
continue
if len(plotData["data"]) > 0:
st, tmp = plotData["data"][0]
et, tmp = plotData["data"][-1]
if st < startTime:
startTime = st
if et > endTime:
endTime = et
kwarg = { "using" : plotData["params"],
"title" : plotData["title"].encode("utf-8"),
"with" : plotData["style"] + plotData["smoothing"] }
gdata.append(Gnuplot.Data( plotData["data"], **kwarg ))
# Add a horisontal line, if requested
if plotData != None and addHorLine != None:
try:
dummy = iter(addHorLine)
except TypeError:
addHorLine = [addHorLine]
for hl in addHorLine:
kwarg = { "using" : "1:2",
"title" : "horisontal line at %s" %hl,
"with" : "linespoints smooth unique" }
gdata.append(Gnuplot.Data( [ [startTime, hl],
[endTime, hl] ], **kwarg ))
plotter = Gnuplot.Gnuplot(debug=1)
plotter('set xdata time')
plotter('set timefmt "%s"')
plotter('set format x "%d/%m"')
plotter('set grid')
#plotter('set style fill solid')
plotter('set key bottom left')
if len(gdata) < 1:
print "No data.."
return
plotter('set terminal x11')
plotter.plot(gdata[0])
for g in gdata[1:]:
plotter.replot(g)
if genImage != "":
plotter('set terminal png')
plotter('set output "%s"' %genImage)
plotter.replot()
else:
raw_input('Please press return to exit...\n')
| gpl-2.0 | 6,558,801,920,558,969,000 | 28.404372 | 80 | 0.537447 | false |
rrrene/django | tests/forms_tests/widget_tests/test_nullbooleanselect.py | 179 | 2142 | from django.forms import NullBooleanSelect
from django.test import override_settings
from django.utils import translation
from .base import WidgetTest
class NullBooleanSelectTest(WidgetTest):
widget = NullBooleanSelect()
def test_render_true(self):
self.check_html(self.widget, 'is_cool', True, html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>"""
))
def test_render_false(self):
self.check_html(self.widget, 'is_cool', False, html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>"""
))
def test_render_none(self):
self.check_html(self.widget, 'is_cool', None, html=(
"""<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>"""
))
def test_render_value(self):
self.check_html(self.widget, 'is_cool', '2', html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>"""
))
@override_settings(USE_L10N=True)
def test_l10n(self):
"""
Ensure that the NullBooleanSelect widget's options are lazily
localized (#17190).
"""
widget = NullBooleanSelect()
with translation.override('de-at'):
self.check_html(widget, 'id_bool', True, html=(
"""
<select name="id_bool">
<option value="1">Unbekannt</option>
<option value="2" selected="selected">Ja</option>
<option value="3">Nein</option>
</select>
"""
))
| bsd-3-clause | 7,998,247,293,956,354,000 | 32.46875 | 69 | 0.530345 | false |
palerdot/calibre | src/calibre/gui2/store/stores/waterstones_uk_plugin.py | 7 | 3469 | # -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 2 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
import urllib2
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl
from calibre import browser
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class WaterstonesUKStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://www.awin1.com/awclick.php?mid=3787&id=120917'
url_details = 'http://www.awin1.com/cread.php?awinmid=3787&awinaffid=120917&clickref=&p={0}'
if external or self.config.get('open_external', False):
if detail_item:
url = url_details.format(detail_item)
open_url(QUrl(url))
else:
detail_url = None
if detail_item:
detail_url = url_details.format(detail_item)
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://www.waterstones.com/waterstonesweb/simpleSearch.do?simpleSearchString=ebook+' + urllib2.quote(query)
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//div[contains(@class, "results-pane")]'):
if counter <= 0:
break
id = ''.join(data.xpath('./div/div/h2/a/@href')).strip()
if not id:
continue
cover_url = ''.join(data.xpath('.//div[@class="image"]/a/img/@src'))
if not cover_url.startswith("http"):
cover_url = 'http://www.waterstones.com' + cover_url
title = ''.join(data.xpath('./div/div/h2/a/text()'))
author = ', '.join(data.xpath('.//p[@class="byAuthor"]/a/text()'))
price = ''.join(data.xpath('.//p[@class="price"]/span[@class="priceRed2"]/text()'))
drm = data.xpath('boolean(.//td[@headers="productFormat" and contains(., "DRM")])')
pdf = data.xpath('boolean(.//td[@headers="productFormat" and contains(., "PDF")])')
epub = data.xpath('boolean(.//td[@headers="productFormat" and contains(., "EPUB")])')
counter -= 1
s = SearchResult()
s.cover_url = cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price
if drm:
s.drm = SearchResult.DRM_LOCKED
else:
s.drm = SearchResult.DRM_UNKNOWN
s.detail_item = id
formats = []
if epub:
formats.append('ePub')
if pdf:
formats.append('PDF')
s.formats = ', '.join(formats)
yield s
| gpl-3.0 | -1,608,507,348,840,372,000 | 38.873563 | 122 | 0.557798 | false |
mikkokeskinen/tunnistamo | auth_backends/adfs/helsinki_library_asko.py | 1 | 3123 | import uuid
from auth_backends.adfs.base import BaseADFS
class HelsinkiLibraryAskoADFS(BaseADFS):
"""Helsinki Libraries' ASKO ADFS authentication backend"""
name = 'helsinki_library_asko_adfs'
AUTHORIZATION_URL = 'https://askofs.lib.hel.fi/adfs/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://askofs.lib.hel.fi/adfs/oauth2/token'
resource = 'https://api.hel.fi/sso/asko_adfs'
domain_uuid = uuid.UUID('5bf9cda1-7a62-47ca-92c1-824650f58467')
realm = 'helsinki_asko'
cert = ('MIIC2jCCAcKgAwIBAgIQJ9GFZkQxN7BE/s9i5wpdmDANBgkqhkiG9w0BAQsFAD'
'ApMScwJQYDVQQDEx5BREZTIFNpZ25pbmcgLSBhZGZzLmFza28ubG9jYWwwHhcN'
'MTgwOTI4MDc1MzExWhcNMTkwOTI4MDc1MzExWjApMScwJQYDVQQDEx5BREZTIF'
'NpZ25pbmcgLSBhZGZzLmFza28ubG9jYWwwggEiMA0GCSqGSIb3DQEBAQUAA4IB'
'DwAwggEKAoIBAQCwHRbQsLaENU9Ed08gTKwm5oOIwRaksl+MzwQ+ydi2BRVfhf'
'RC257VeB3IlWmzENFIxcrpiL1xtsAOOjVWJbCVlU7PcjRu8zn9+B8sdO+9k/g/'
'vI44Ho/EMGbg1odQNDkzDCWhTfEA38cJHCxA8CTi2r2nspPPAl+C7dn5rsx5t/'
'kzX12S6Crmtl+cPeSuXO6mhQVXBAEmEn04lHTYlXqizmkEvUh/HAChNYKoxvUW'
'58LPMu1BaW0e6t9Ma1alTbc5GQppah0qYrXguU7zXFURRGI6JEsEj9qk1lTFsf'
'U1C6gns8maHHVfAZ+qHXwWoLtDiikReM+DAMKxaGOZ0Jb3AgMBAAEwDQYJKoZI'
'hvcNAQELBQADggEBAF51FZNX1EiwTX3C4yB5w56KetVXollB9WDdFcug06kdRE'
'6RefkGqK3B5c9tqyOivH61B77UN/6jFIfg62sxJ6ayycXMAdRGH2kQGoTpqs/4'
'86PjiGgFOJJYPd6tWkqwl2SxtbciEaTSnZr3jWlk6ZJNm7aJLLQV7qd7mOybwX'
'QD+vrvY5HmBz7Lrwm47IXnWb5Nrm/cgVstF94i3TLAP+2a5aUXm8SyyIArhTh7'
'e9G4mgmktvSgc1LCK9JAJ76ICaN/p0UfxEXcy3LQj32ihUbKb7dFC+FBCIJhSr'
'EMwdHX1eilAT2gAJkTmU+F/ISo95BBuBNunpwBt2Pa93T6GZ0=')
def auth_params(self, *args, **kwargs):
params = super().auth_params(*args, **kwargs)
params['prompt'] = 'login'
return params
def clean_attributes(self, attrs_in):
attr_map = {
'primarysid': 'primary_sid',
'company': 'department_name',
'email': 'email',
'winaccountname': 'username',
'group': 'ad_groups',
'unique_name': 'last_first_name',
'given_name': 'first_name',
'family_name': 'last_name',
}
# Convert attribute names to lowercase
attrs_in = {k.lower(): v for k, v in attrs_in.items()}
attrs = {}
for in_name, out_name in attr_map.items():
val = attrs_in.get(in_name, None)
if val is not None:
if out_name in ('department_name', 'email', 'username'):
val = val.lower()
attrs[out_name] = val
else:
print(in_name, 'not found in data')
attrs[out_name] = val
if 'last_first_name' in attrs:
names = attrs['last_first_name'].split(' ')
if 'first_name' not in attrs:
attrs['first_name'] = [names[0]]
if 'last_name' not in attrs:
attrs['last_name'] = [' '.join(names[1:])]
del attrs['last_first_name']
return attrs
| mit | -7,898,783,602,054,292,000 | 42.985915 | 76 | 0.639449 | false |
n0max/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/mock.py | 465 | 6715 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Mocks for testing.
"""
import Queue
import threading
from mod_pywebsocket import common
from mod_pywebsocket.stream import StreamHixie75
class _MockConnBase(object):
"""Base class of mocks for mod_python.apache.mp_conn.
This enables tests to check what is written to a (mock) mp_conn.
"""
def __init__(self):
self._write_data = []
self.remote_addr = 'fake_address'
def write(self, data):
"""Override mod_python.apache.mp_conn.write."""
self._write_data.append(data)
def written_data(self):
"""Get bytes written to this mock."""
return ''.join(self._write_data)
class MockConn(_MockConnBase):
"""Mock for mod_python.apache.mp_conn.
This enables tests to specify what should be read from a (mock) mp_conn as
well as to check what is written to it.
"""
def __init__(self, read_data):
"""Constructs an instance.
Args:
read_data: bytes that should be returned when read* methods are
called.
"""
_MockConnBase.__init__(self)
self._read_data = read_data
self._read_pos = 0
def readline(self):
"""Override mod_python.apache.mp_conn.readline."""
if self._read_pos >= len(self._read_data):
return ''
end_index = self._read_data.find('\n', self._read_pos) + 1
if not end_index:
end_index = len(self._read_data)
return self._read_up_to(end_index)
def read(self, length):
"""Override mod_python.apache.mp_conn.read."""
if self._read_pos >= len(self._read_data):
return ''
end_index = min(len(self._read_data), self._read_pos + length)
return self._read_up_to(end_index)
def _read_up_to(self, end_index):
line = self._read_data[self._read_pos:end_index]
self._read_pos = end_index
return line
class MockBlockingConn(_MockConnBase):
"""Blocking mock for mod_python.apache.mp_conn.
This enables tests to specify what should be read from a (mock) mp_conn as
well as to check what is written to it.
Callers of read* methods will block if there is no bytes available.
"""
def __init__(self):
_MockConnBase.__init__(self)
self._queue = Queue.Queue()
def readline(self):
"""Override mod_python.apache.mp_conn.readline."""
line = ''
while True:
c = self._queue.get()
line += c
if c == '\n':
return line
def read(self, length):
"""Override mod_python.apache.mp_conn.read."""
data = ''
for unused in range(length):
data += self._queue.get()
return data
def put_bytes(self, bytes):
"""Put bytes to be read from this mock.
Args:
bytes: bytes to be read.
"""
for byte in bytes:
self._queue.put(byte)
class MockTable(dict):
"""Mock table.
This mimics mod_python mp_table. Note that only the methods used by
tests are overridden.
"""
def __init__(self, copy_from={}):
if isinstance(copy_from, dict):
copy_from = copy_from.items()
for key, value in copy_from:
self.__setitem__(key, value)
def __getitem__(self, key):
return super(MockTable, self).__getitem__(key.lower())
def __setitem__(self, key, value):
super(MockTable, self).__setitem__(key.lower(), value)
def get(self, key, def_value=None):
return super(MockTable, self).get(key.lower(), def_value)
class MockRequest(object):
"""Mock request.
This mimics mod_python request.
"""
def __init__(self, uri=None, headers_in={}, connection=None, method='GET',
protocol='HTTP/1.1', is_https=False):
"""Construct an instance.
Arguments:
uri: URI of the request.
headers_in: Request headers.
connection: Connection used for the request.
method: request method.
is_https: Whether this request is over SSL.
See the document of mod_python Request for details.
"""
self.uri = uri
self.unparsed_uri = uri
self.connection = connection
self.method = method
self.protocol = protocol
self.headers_in = MockTable(headers_in)
# self.is_https_ needs to be accessible from tests. To avoid name
# conflict with self.is_https(), it is named as such.
self.is_https_ = is_https
self.ws_stream = StreamHixie75(self, True)
self.ws_close_code = None
self.ws_close_reason = None
self.ws_version = common.VERSION_HYBI00
self.ws_deflate = False
def is_https(self):
"""Return whether this request is over SSL."""
return self.is_https_
class MockDispatcher(object):
"""Mock for dispatch.Dispatcher."""
def __init__(self):
self.do_extra_handshake_called = False
def do_extra_handshake(self, conn_context):
self.do_extra_handshake_called = True
def transfer_data(self, conn_context):
pass
# vi:sts=4 sw=4 et
| mpl-2.0 | 7,290,695,741,897,704,000 | 29.384615 | 78 | 0.629039 | false |
dennisobrien/bokeh | bokeh/models/tests/test_tools.py | 3 | 1491 | from __future__ import absolute_import
import mock
from bokeh.core.validation import check_integrity
from bokeh.models.layouts import LayoutDOM
from bokeh.models.tools import Toolbar, ToolbarBox
# TODO (bev) validate entire list of props
def test_Toolbar():
tb = Toolbar()
assert tb.active_drag == 'auto'
assert tb.active_inspect == 'auto'
assert tb.active_scroll == 'auto'
assert tb.active_tap == 'auto'
assert tb.autohide is False
def test_Toolbar_with_autohide():
tb = Toolbar(autohide=True)
assert tb.active_drag == 'auto'
assert tb.active_inspect == 'auto'
assert tb.active_scroll == 'auto'
assert tb.active_tap == 'auto'
assert tb.autohide is True
#
# ToolbarBox
#
def test_toolbar_box_is_instance_of_LayoutDOM():
tb_box = ToolbarBox()
assert isinstance(tb_box, LayoutDOM)
def test_toolbar_box_properties():
tb_box = ToolbarBox()
assert tb_box.toolbar_location == "right"
@mock.patch('bokeh.io.showing._show_with_state')
def test_toolbar_box_with_no_children_does_not_raise_a_bokeh_warning(mock__show_with_state):
# This is the normal way a ToolbarBox would be instantiated for example in
# a gridplot. So we don't want to worry people with warnings. The children
# for the ToolbarBox are created on the JS side.
tb_box = ToolbarBox()
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([tb_box])
assert mock_logger.warning.call_count == 0
| bsd-3-clause | -7,677,830,760,568,657,000 | 27.673077 | 92 | 0.69886 | false |
mdanielwork/intellij-community | python/helpers/pydev/tests_pydevd_mainloop/gui-qt.py | 100 | 1074 | #!/usr/bin/env python
"""Simple Qt4 example to manually test event loop integration.
To run this:
1) Enable the PyDev GUI event loop integration for qt
2) do an execfile on this script
3) ensure you have a working GUI simultaneously with an
interactive console
Ref: Modified from http://zetcode.com/tutorials/pyqt4/firstprograms/
"""
if __name__ == '__main__':
import sys
from PyQt4 import QtGui, QtCore
class SimpleWindow(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setGeometry(300, 300, 200, 80)
self.setWindowTitle('Hello World')
quit = QtGui.QPushButton('Close', self)
quit.setGeometry(10, 10, 60, 35)
self.connect(quit, QtCore.SIGNAL('clicked()'),
self, QtCore.SLOT('close()'))
if __name__ == '__main__':
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication([])
sw = SimpleWindow()
sw.show()
| apache-2.0 | -1,921,897,595,483,880,700 | 28.833333 | 68 | 0.591248 | false |
espadrine/opera | chromium/src/tools/diagnose-me.py | 50 | 3016 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Diagnose some common system configuration problems on Linux, and
suggest fixes."""
import os
import subprocess
import sys
all_checks = []
def Check(name):
"""Decorator that defines a diagnostic check."""
def wrap(func):
all_checks.append((name, func))
return func
return wrap
@Check("/usr/bin/ld is not gold")
def CheckSystemLd():
proc = subprocess.Popen(['/usr/bin/ld', '-v'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if 'GNU gold' in stdout:
return ("When /usr/bin/ld is gold, system updates can silently\n"
"corrupt your graphics drivers.\n"
"Try 'sudo apt-get remove binutils-gold'.\n")
return None
@Check("random lds are not in the $PATH")
def CheckPathLd():
proc = subprocess.Popen(['which', '-a', 'ld'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
instances = stdout.split()
if len(instances) > 1:
return ("You have multiple 'ld' binaries in your $PATH:\n"
+ '\n'.join(' - ' + i for i in instances) + "\n"
"You should delete all of them but your system one.\n"
"gold is hooked into your build via gyp.\n")
return None
@Check("/usr/bin/ld doesn't point to gold")
def CheckLocalGold():
# Check /usr/bin/ld* symlinks.
for path in ('ld.bfd', 'ld'):
path = '/usr/bin/' + path
try:
target = os.readlink(path)
except OSError, e:
if e.errno == 2:
continue # No such file
if e.errno == 22:
continue # Not a symlink
raise
if '/usr/local/gold' in target:
return ("%s is a symlink into /usr/local/gold.\n"
"It's difficult to make a recommendation, because you\n"
"probably set this up yourself. But you should make\n"
"/usr/bin/ld be the standard linker, which you likely\n"
"renamed /usr/bin/ld.bfd or something like that.\n" % path)
return None
@Check("random ninja binaries are not in the $PATH")
def CheckPathNinja():
proc = subprocess.Popen(['which', 'ninja'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if not 'depot_tools' in stdout:
return ("The ninja binary in your path isn't from depot_tools:\n"
+ " " + stdout +
"Remove custom ninjas from your path so that the one\n"
"in depot_tools is used.\n")
return None
def RunChecks():
for name, check in all_checks:
sys.stdout.write("* Checking %s: " % name)
sys.stdout.flush()
error = check()
if not error:
print "ok"
else:
print "FAIL"
print error
if __name__ == '__main__':
RunChecks()
| bsd-3-clause | 2,882,276,566,240,686,600 | 30.747368 | 79 | 0.576923 | false |
SebastianoF/LabelsManager | tests/tools/test_aux_methods_utils_rotations.py | 1 | 25701 | import os
import numpy as np
from numpy.testing import assert_array_equal, assert_raises, assert_array_almost_equal
from nilabels.tools.aux_methods.utils_rotations import get_small_orthogonal_rotation, get_roto_translation_matrix, \
basic_90_rot_ax, axial_90_rotations, flip_data, symmetrise_data, reorient_b_vect, reorient_b_vect_from_files, matrix_vector_field_product
from tests.tools.decorators_tools import create_and_erase_temporary_folder_with_a_dummy_b_vectors_list, pfo_tmp_test
# TEST aux_methods.utils_rotations : get_small_orthogonal_rotation_yaw
def test_get_small_orthogonal_rotation_yaw():
theta = np.pi / 8
expected_rot = np.array([[np.cos(theta), -np.sin(theta), 0, 0],
[np.sin(theta), np.cos(theta), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
rot = get_small_orthogonal_rotation(theta, 'yaw')
assert_array_equal(rot, expected_rot)
theta = - np.pi / 12
expected_rot = np.array([[np.cos(theta), -np.sin(theta), 0, 0],
[np.sin(theta), np.cos(theta), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
rot = get_small_orthogonal_rotation(theta, 'yaw')
assert_array_equal(rot, expected_rot)
def test_get_small_orthogonal_rotation_pitch():
theta = np.pi / 8
expected_rot = np.array([[1, 0, 0, 0],
[0, np.cos(theta), -np.sin(theta), 0],
[0, np.sin(theta), np.cos(theta), 0],
[0, 0, 0, 1]])
rot = get_small_orthogonal_rotation(theta, 'pitch')
assert_array_equal(rot, expected_rot)
theta = - np.pi / 6
expected_rot = np.array([[1, 0, 0, 0],
[0, np.cos(theta), -np.sin(theta), 0],
[0, np.sin(theta), np.cos(theta), 0],
[0, 0, 0, 1]])
rot = get_small_orthogonal_rotation(theta, 'pitch')
assert_array_equal(rot, expected_rot)
def test_get_small_orthogonal_rotation_roll():
theta = np.pi / 9
expected_rot = np.array([[np.cos(theta), 0, np.sin(theta), 0],
[0, 1, 0, 0],
[-np.sin(theta), 0, np.cos(theta), 0],
[0, 0, 0, 1]])
rot = get_small_orthogonal_rotation(theta, 'roll')
assert_array_equal(rot, expected_rot)
theta = - np.pi / 11
expected_rot = np.array([[np.cos(theta), 0, np.sin(theta), 0],
[0, 1, 0, 0],
[-np.sin(theta), 0, np.cos(theta), 0],
[0, 0, 0, 1]])
rot = get_small_orthogonal_rotation(theta, 'roll')
assert_array_equal(rot, expected_rot)
def test_get_small_orthogonal_rotation_zeros_theta():
assert_array_equal(get_small_orthogonal_rotation(0, 'yaw'), np.eye(4))
assert_array_equal(get_small_orthogonal_rotation(0, 'pitch'), np.eye(4))
assert_array_equal(get_small_orthogonal_rotation(0, 'roll'), np.eye(4))
def test_get_small_orthogonal_rotation_unkonwn_axis():
with assert_raises(IOError):
get_small_orthogonal_rotation(0, 'spam')
# TEST aux_methods.utils_rotations : get_roto_translation_matrix
def test_get_roto_translation_matrix_too_small_rotation_axis():
with assert_raises(IOError):
get_roto_translation_matrix(np.pi/4, rotation_axis=np.array([0.0009, 0, 0]), translation=np.array([0, 0, 0]))
def test_get_roto_translation_matrix_get_check_shape_and_translation():
rt = get_roto_translation_matrix(np.pi/8, rotation_axis=np.array([1, 0, 0]), translation=np.array([1, 2, 3]))
assert rt.shape == (4, 4)
assert_array_equal(rt[3, :], np.array([0, 0, 0, 1]))
assert_array_equal(rt[:, 3], np.array([1, 2, 3, 1]))
def test_get_roto_translation_matrix_around_x_axis():
# standard nifti convention (RAS) x -> pitch. y -> roll, z -> yaw
theta = np.pi / 9
rot_axis = np.array([1, 0, 0])
transl = np.array([1, 2, 3])
expected_rot = np.array([[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]])
rt = get_roto_translation_matrix(theta, rot_axis, transl)
expected_rt = np.eye(4)
expected_rt[:3, :3] = expected_rot
expected_rt[:3, 3] = transl
assert_array_equal(rt, expected_rt)
def test_get_roto_translation_matrix_around_y_axis():
# standard nifti convention (RAS) x -> pitch. y -> roll, z -> yaw
theta = np.pi / 9
rot_axis = np.array([0, 1, 0])
transl = np.array([1, 2, 3])
expected_rot = np.array([[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]])
rt = get_roto_translation_matrix(theta, rot_axis, transl)
expected_rt = np.eye(4)
expected_rt[:3, :3] = expected_rot
expected_rt[:3, 3] = transl
assert_array_equal(rt, expected_rt)
def test_get_roto_translation_matrix_around_z_axis():
# standard nifti convention (RAS) x -> pitch, y -> roll, z -> yaw.
theta = np.pi / 9
rot_axis = np.array([0, 0, 1])
transl = np.array([1, 2, 3])
expected_rot = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
rt = get_roto_translation_matrix(theta, rot_axis, transl)
expected_rt = np.eye(4)
expected_rt[:3, :3] = expected_rot
expected_rt[:3, 3] = transl
assert_array_equal(rt, expected_rt)
def test_basic_rotation_ax_simple_and_visual():
cube_id = np.array([[[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
# counterclockwise looking front face axis
cube_ax0 = np.array([[[3, 7, 11],
[2, 6, 10],
[1, 5, 9],
[0, 4, 8]],
[[15, 19, 23],
[14, 18, 22],
[13, 17, 21],
[12, 16, 20]]])
# clockwise looking upper face axis
cube_ax1 = np.array([[[3, 15],
[7, 19],
[11, 23]],
[[2, 14],
[6, 18],
[10, 22]],
[[1, 13],
[5, 17],
[9, 21]],
[[0, 12],
[4, 16],
[8, 20]]])
# clockwise looking right face axis
cube_ax2 = np.array([[[8, 9, 10, 11],
[20, 21, 22, 23]],
[[4, 5, 6, 7],
[16, 17, 18, 19]],
[[0, 1, 2, 3],
[12, 13, 14, 15]]])
assert_array_equal(basic_90_rot_ax(cube_id, ax=0), cube_ax0)
assert_array_equal(basic_90_rot_ax(cube_id, ax=1), cube_ax1)
assert_array_equal(basic_90_rot_ax(cube_id, ax=2), cube_ax2)
def test_axial_90_rotations_4_rotations_invariance():
cube = np.array(range(2 * 3 * 4)).reshape(2, 3, 4)
for x in range(3):
# rot = 4 rotates four times around the same axis (no changes) must give the input matrix.
assert_array_equal(axial_90_rotations(cube, rot=4, ax=x), cube)
def test_axial_90_rotations_wrong_input_dimensions():
with assert_raises(IOError):
axial_90_rotations(np.ones([5, 5, 5, 5]), rot=1, ax=2)
def test_axial_90_rotations_around_x_standard_input_data():
# input
cube = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
# around front face (0)
cube_rot_1_axis0 = np.array([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
cube_rot_2_axis0 = np.array([[[3, 2],
[1, 0]],
[[7, 6],
[5, 4]]])
cube_rot_3_axis0 = np.array([[[2, 0],
[3, 1]],
[[6, 4],
[7, 5]]])
assert_array_equal(axial_90_rotations(cube, rot=3, ax=0), cube_rot_3_axis0)
assert_array_equal(axial_90_rotations(cube, rot=2, ax=0), cube_rot_2_axis0)
assert_array_equal(axial_90_rotations(cube, rot=1, ax=0), cube_rot_1_axis0)
# around top-bottom face (1)
cube_rot_1_axis1 = np.array([[[1, 5],
[3, 7]],
[[0, 4],
[2, 6]]])
cube_rot_2_axis1 = np.array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
cube_rot_3_axis1 = np.array([[[4, 0],
[6, 2]],
[[5, 1],
[7, 3]]])
assert_array_equal(axial_90_rotations(cube, rot=1, ax=1), cube_rot_1_axis1)
assert_array_equal(axial_90_rotations(cube, rot=2, ax=1), cube_rot_2_axis1)
assert_array_equal(axial_90_rotations(cube, rot=3, ax=1), cube_rot_3_axis1)
# around front face (2)
cube_rot_1_axis2 = np.array([[[2, 3],
[6, 7]],
[[0, 1],
[4, 5]]])
cube_rot_2_axis2 = np.array([[[6, 7],
[4, 5]],
[[2, 3],
[0, 1]]])
cube_rot_3_axis2 = np.array([[[4, 5],
[0, 1]],
[[6, 7],
[2, 3]]])
assert_array_equal(axial_90_rotations(cube, rot=1, ax=2), cube_rot_1_axis2)
assert_array_equal(axial_90_rotations(cube, rot=2, ax=2), cube_rot_2_axis2)
assert_array_equal(axial_90_rotations(cube, rot=3, ax=2), cube_rot_3_axis2)
def test_flip_data():
cube_id = np.array([[[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
cube_flip_x = np.array([[[8, 9, 10, 11],
[4, 5, 6, 7],
[0, 1, 2, 3]],
[[20, 21, 22, 23],
[16, 17, 18, 19],
[12, 13, 14, 15]]])
cube_flip_y = np.array([[[3, 2, 1, 0],
[7, 6, 5, 4],
[11, 10, 9, 8]],
[[15, 14, 13, 12],
[19, 18, 17, 16],
[23, 22, 21, 20]]])
cube_flip_z = np.array([[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]],
[[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]])
assert_array_equal(flip_data(cube_id, axis_direction='x'), cube_flip_x)
assert_array_equal(flip_data(cube_id, axis_direction='y'), cube_flip_y)
assert_array_equal(flip_data(cube_id, axis_direction='z'), cube_flip_z)
def test_flip_data_error_input_direction():
in_data = np.zeros([10, 10, 10])
with assert_raises(IOError):
flip_data(in_data, axis_direction='s')
def test_flip_data_error_dimension():
in_data = np.zeros([10, 10, 10, 10])
with assert_raises(IOError):
flip_data(in_data, axis_direction='x')
def test_symmetrise_data_x_axis():
cube_id = np.array([[[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]]])
cube_sym_x2_be_T = np.array([[[0, 1, 2, 3],
[4, 5, 6, 7],
[4, 5, 6, 7],
[0, 1, 2, 3]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[20, 21, 22, 23],
[16, 17, 18, 19]]])
cube_sym_x2_ab_T = np.array([[[12, 13, 14, 15],
[8, 9, 10, 11],
[8, 9, 10, 11],
[12, 13, 14, 15]],
[[28, 29, 30, 31],
[24, 25, 26, 27],
[24, 25, 26, 27],
[28, 29, 30, 31]]])
cube_sym_x3_be_F = np.array([[[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[8, 9, 10, 11],
[4, 5, 6, 7],
[0, 1, 2, 3]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[24, 25, 26, 27],
[20, 21, 22, 23],
[16, 17, 18, 19]]])
cube_sym_x3_be_T = np.array([[[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[8, 9, 10, 11]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[24, 25, 26, 27]]])
assert_array_equal(symmetrise_data(cube_id, axis_direction='x', plane_intercept=2, side_to_copy='below',
keep_in_data_dimensions_boundaries=True), cube_sym_x2_be_T)
assert_array_equal(symmetrise_data(cube_id, axis_direction='x', plane_intercept=2, side_to_copy='above',
keep_in_data_dimensions_boundaries=True), cube_sym_x2_ab_T)
assert_array_equal(symmetrise_data(cube_id, axis_direction='x', plane_intercept=3, side_to_copy='below',
keep_in_data_dimensions_boundaries=False), cube_sym_x3_be_F)
assert_array_equal(symmetrise_data(cube_id, axis_direction='x', plane_intercept=3, side_to_copy='below',
keep_in_data_dimensions_boundaries=True), cube_sym_x3_be_T)
def test_symmetrise_data_y_axis():
cube_id = np.array([[[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]]])
cube_sym_y2_be_T = np.array([[[0, 1, 1, 0],
[4, 5, 5, 4],
[8, 9, 9, 8],
[12, 13, 13, 12]],
[[16, 17, 17, 16],
[20, 21, 21, 20],
[24, 25, 25, 24],
[28, 29, 29, 28]]])
cube_sym_y2_ab_T = np.array([[[3, 2, 2, 3],
[7, 6, 6, 7],
[11, 10, 10, 11],
[15, 14, 14, 15]],
[[19, 18, 18, 19],
[23, 22, 22, 23],
[27, 26, 26, 27],
[31, 30, 30, 31]]])
cube_sym_y3_be_F = np.array([[[0, 1, 2, 2, 1, 0],
[4, 5, 6, 6, 5, 4],
[8, 9, 10, 10, 9, 8],
[12, 13, 14, 14, 13, 12]],
[[16, 17, 18, 18, 17, 16],
[20, 21, 22, 22, 21, 20],
[24, 25, 26, 26, 25, 24],
[28, 29, 30, 30, 29, 28]]])
cube_sym_y3_be_T = np.array([[[0, 1, 2, 2],
[4, 5, 6, 6],
[8, 9, 10, 10],
[12, 13, 14, 14]],
[[16, 17, 18, 18],
[20, 21, 22, 22],
[24, 25, 26, 26],
[28, 29, 30, 30]]])
assert_array_equal(symmetrise_data(cube_id, axis_direction='y', plane_intercept=2, side_to_copy='below',
keep_in_data_dimensions_boundaries=True), cube_sym_y2_be_T)
assert_array_equal(symmetrise_data(cube_id, axis_direction='y', plane_intercept=2, side_to_copy='above',
keep_in_data_dimensions_boundaries=True), cube_sym_y2_ab_T)
assert_array_equal(symmetrise_data(cube_id, axis_direction='y', plane_intercept=3, side_to_copy='below',
keep_in_data_dimensions_boundaries=False), cube_sym_y3_be_F)
assert_array_equal(symmetrise_data(cube_id, axis_direction='y', plane_intercept=3, side_to_copy='below',
keep_in_data_dimensions_boundaries=True), cube_sym_y3_be_T)
def test_symmetrise_data_z_axis():
cube_id = np.array([[[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]],
[[32, 33, 34, 35],
[36, 37, 38, 39],
[40, 41, 42, 43],
[44, 45, 46, 47]]])
cube_sym_z2_be_T = np.array([[[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]]])
cube_sym_z1_ab_T = np.array([[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]],
[[32, 33, 34, 35],
[36, 37, 38, 39],
[40, 41, 42, 43],
[44, 45, 46, 47]]])
cube_sym_z1_ab_F = np.array([[[32, 33, 34, 35],
[36, 37, 38, 39],
[40, 41, 42, 43],
[44, 45, 46, 47]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]],
[[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]],
[[32, 33, 34, 35],
[36, 37, 38, 39],
[40, 41, 42, 43],
[44, 45, 46, 47]]])
assert_array_equal(symmetrise_data(cube_id, axis_direction='z', plane_intercept=2, side_to_copy='below',
keep_in_data_dimensions_boundaries=True), cube_sym_z2_be_T)
assert_array_equal(symmetrise_data(cube_id, axis_direction='z', plane_intercept=1, side_to_copy='above',
keep_in_data_dimensions_boundaries=True), cube_sym_z1_ab_T)
assert_array_equal(symmetrise_data(cube_id, axis_direction='z', plane_intercept=1, side_to_copy='above',
keep_in_data_dimensions_boundaries=False), cube_sym_z1_ab_F)
def test_symmetrise_data_error_input_ndim():
with assert_raises(IOError):
symmetrise_data(np.ones([5, 5, 5, 5]))
def test_symmetrise_data_error_input_side_to_copy():
with assert_raises(IOError):
symmetrise_data(np.ones([5, 5, 5]), side_to_copy='spam')
def test_symmetrise_data_error_input_axis_direction():
with assert_raises(IOError):
symmetrise_data(np.ones([5, 5, 5]), axis_direction='s')
def test_reorient_b_vect():
n = 10 # 10 random b-vectors
b_vects = np.random.randn(n, 3)
transformation = np.random.randn(3, 3)
expected_answer = np.zeros_like(b_vects)
for r in range(n):
expected_answer[r, :] = transformation.dot(b_vects[r, :])
assert_array_almost_equal(reorient_b_vect(b_vects, transformation), expected_answer)
@create_and_erase_temporary_folder_with_a_dummy_b_vectors_list
def test_reorient_b_vect_from_files():
in_b_vects = np.loadtxt(os.path.join(pfo_tmp_test, 'b_vects_file.txt'))
transformation = np.random.randn(3, 3)
expected_saved_answer = np.zeros_like(in_b_vects)
for r in range(in_b_vects.shape[0]):
expected_saved_answer[r, :] = transformation.dot(in_b_vects[r, :])
reorient_b_vect_from_files(os.path.join(pfo_tmp_test, 'b_vects_file.txt'),
os.path.join(pfo_tmp_test, 'b_vects_file_reoriented.txt'),
transformation)
loaded_answer = np.loadtxt(os.path.join(pfo_tmp_test, 'b_vects_file_reoriented.txt'))
assert_array_almost_equal(loaded_answer, expected_saved_answer)
def test_matrix_vector_fields_product():
j_input = np.random.randn(10, 15, 4)
v_input = np.random.randn(10, 15, 2)
d = v_input.shape[-1]
vol = list(v_input.shape[:-1])
v = np.tile(v_input, [1] * d + [d])
j_times_v = np.multiply(j_input, v)
expected_answer = np.sum(j_times_v.reshape(vol + [d, d]), axis=d + 1).reshape(vol + [d])
obtained_answer = matrix_vector_field_product(j_input, v_input)
assert_array_almost_equal(expected_answer, obtained_answer)
def test_matrix_vector_fields_product_3d():
j_input = np.random.randn(10, 15, 18, 9)
v_input = np.random.randn(10, 15, 18, 3)
d = v_input.shape[-1]
vol = list(v_input.shape[:-1])
v = np.tile(v_input, [1] * d + [d])
j_times_v = np.multiply(j_input, v)
expected_answer = np.sum(j_times_v.reshape(vol + [d, d]), axis=d + 1).reshape(vol + [d])
obtained_answer = matrix_vector_field_product(j_input, v_input)
assert_array_almost_equal(expected_answer, obtained_answer)
def test_matrix_vector_fields_product_3d_bad_input():
j_input = np.random.randn(10, 15, 3, 9)
v_input = np.random.randn(10, 15, 3)
with assert_raises(IOError):
matrix_vector_field_product(j_input, v_input)
j_input = np.random.randn(10, 15, 9)
v_input = np.random.randn(10, 14, 3)
with assert_raises(IOError):
matrix_vector_field_product(j_input, v_input)
if __name__ == '__main__':
test_get_small_orthogonal_rotation_yaw()
test_get_small_orthogonal_rotation_pitch()
test_get_small_orthogonal_rotation_roll()
test_get_small_orthogonal_rotation_zeros_theta()
test_get_small_orthogonal_rotation_unkonwn_axis()
test_get_roto_translation_matrix_too_small_rotation_axis()
test_get_roto_translation_matrix_get_check_shape_and_translation()
test_get_roto_translation_matrix_around_x_axis()
test_get_roto_translation_matrix_around_y_axis()
test_get_roto_translation_matrix_around_z_axis()
test_basic_rotation_ax_simple_and_visual()
test_axial_90_rotations_4_rotations_invariance()
test_axial_90_rotations_around_x_standard_input_data()
test_flip_data()
test_flip_data_error_input_direction()
test_flip_data_error_dimension()
test_symmetrise_data_x_axis()
test_symmetrise_data_y_axis()
test_symmetrise_data_z_axis()
test_symmetrise_data_error_input_ndim()
test_symmetrise_data_error_input_side_to_copy()
test_symmetrise_data_error_input_axis_direction()
test_reorient_b_vect()
test_reorient_b_vect_from_files()
test_matrix_vector_fields_product()
test_matrix_vector_fields_product_3d()
test_matrix_vector_fields_product_3d_bad_input()
| mit | -8,558,416,037,143,464,000 | 39.537855 | 141 | 0.423213 | false |
andmos/ansible | test/units/modules/source_control/gitlab.py | 21 | 29411 | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import
import sys
from httmock import response # noqa
from httmock import urlmatch # noqa
from units.compat import unittest
from gitlab import Gitlab
class FakeAnsibleModule(object):
def __init__(self):
self.check_mode = False
def fail_json(self, **args):
pass
def exit_json(self, **args):
pass
class GitlabModuleTestCase(unittest.TestCase):
def setUp(self):
unitest_python_version_check_requirement(self)
self.mock_module = FakeAnsibleModule()
self.gitlab_instance = Gitlab("http://localhost", private_token="private_token", api_version=4)
# Python 2.7+ is needed for python-gitlab
GITLAB_MINIMUM_PYTHON_VERSION = (2, 7)
# Verify if the current Python version is higher than GITLAB_MINIMUM_PYTHON_VERSION
def python_version_match_requirement():
return sys.version_info >= GITLAB_MINIMUM_PYTHON_VERSION
# Skip unittest test case if python version don't match requirement
def unitest_python_version_check_requirement(unittest_testcase):
if not python_version_match_requirement():
unittest_testcase.skipTest("Python %s+ is needed for python-gitlab" % ",".join(map(str, GITLAB_MINIMUM_PYTHON_VERSION)))
'''
USER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="get")
def resp_find_user(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith"}, {"id": 2,'
'"username": "jack_smith", "name": "Jack Smith", "state": "blocked",'
'"avatar_url": "http://gravatar.com/../e32131cd8.jpeg",'
'"web_url": "http://localhost:3000/jack_smith"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="post")
def resp_create_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith","created_at": "2012-05-23T08:00:58Z",'
'"bio": null, "location": null, "public_email": "[email protected]", "skype": "",'
'"linkedin": "", "twitter": "", "website_url": "", "organization": ""}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
def resp_get_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "john_smith", "name": "John Smith",'
'"state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith",'
'"created_at": "2012-05-23T08:00:58Z", "bio": null, "location": null,'
'"public_email": "[email protected]", "skype": "", "linkedin": "",'
'"twitter": "", "website_url": "", "organization": "", "is_admin": false}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
def resp_get_missing_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_missing_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
'''
USER SSHKEY API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="get")
def resp_get_user_keys(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQa'
'SeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2014-08-01T14:47:39.080Z"},{"id": 3,'
'"title": "Another Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaS'
'eP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2014-08-01T14:47:39.080Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="post")
def resp_create_user_keys(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "title": "Private key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcUdRuSuA5z'
'szUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+xawxKWmI7hJ5S0tOv6MJ+Ixy'
'Ta4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2jTiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH'
'2WOKBw6za0az6XoG75obUdFVdW3qcD0xc809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9'
'M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF",'
'"created_at": "2014-08-01T14:47:39.080Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
'''
GROUP API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="get")
def resp_find_group(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null, "projects": []}, {"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
'"web_url": "http://localhost:3000/groups/bar-foo", "request_access_enabled": false,'
'"full_name": "BarFoo Group", "full_path": "bar-foo",'
'"file_template_project_id": 1, "parent_id": null, "projects": []}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
def resp_get_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
def resp_get_missing_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
def resp_create_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
def resp_create_subgroup(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar/bar-foo", "request_access_enabled": false,'
'"full_name": "BarFoo Group", "full_path": "foo-bar/bar-foo",'
'"file_template_project_id": 1, "parent_id": 1}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
GROUP MEMBER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="get")
def resp_get_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="get")
def resp_find_member(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30},{'
'"id": 2, "username": "john_doe", "name": "John Doe","state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root","expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 30}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="post")
def resp_add_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
'"state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 30}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="put")
def resp_update_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
'"state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 10}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
'''
DEPLOY KEY API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="get")
def resp_find_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"},{"id": 3,"title": "Another Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T11:12:29Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="get")
def resp_get_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="post")
def resp_create_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="delete")
def resp_delete_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
PROJECT API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="get")
def resp_find_project(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="get")
def resp_get_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/foo-bar%2Fdiaspora-client", method="get")
def resp_get_project_by_name(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects", method="get")
def resp_find_group_project(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects/1", method="get")
def resp_get_group_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="post")
def resp_create_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "[email protected]:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="delete")
def resp_delete_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
HOOK API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="get")
def resp_find_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="get")
def resp_get_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="post")
def resp_create_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="delete")
def resp_delete_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
HOOK API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/all", method="get")
def resp_find_runners(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"},{"active": true,'
'"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",'
'"is_shared": false,"name": null,"online": false,"status": "offline"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/1", method="get")
def resp_get_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners", method="post")
def resp_create_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/1", method="delete")
def resp_delete_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
| gpl-3.0 | 4,768,407,525,212,499,000 | 51.239787 | 137 | 0.621468 | false |
vinicius-carvalho/fatec-prog-script | backend/appengine/routes/comportamentos/rest.py | 1 | 1233 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from tekton.gae.middleware.json_middleware import JsonResponse
from comportamento_app import facade
def index():
cmd = facade.list_comportamentos_cmd()
comportamento_list = cmd()
short_form=facade.comportamento_short_form()
comportamento_short = [short_form.fill_with_model(m) for m in comportamento_list]
return JsonResponse(comportamento_short)
def save(**comportamento_properties):
cmd = facade.save_comportamento_cmd(**comportamento_properties)
return _save_or_update_json_response(cmd)
def update(comportamento_id, **comportamento_properties):
cmd = facade.update_comportamento_cmd(comportamento_id, **comportamento_properties)
return _save_or_update_json_response(cmd)
def delete(comportamento_id):
facade.delete_comportamento_cmd(comportamento_id)()
def _save_or_update_json_response(cmd):
try:
comportamento = cmd()
except CommandExecutionException:
return JsonResponse({'errors': cmd.errors})
short_form=facade.comportamento_short_form()
return JsonResponse(short_form.fill_with_model(comportamento))
| mit | 3,348,626,290,949,927,400 | 32.324324 | 87 | 0.750203 | false |
mexeniz/django-oscar | src/oscar/apps/dashboard/shipping/views.py | 23 | 5163 | from django import shortcuts
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.views import generic
from oscar.core.loading import get_model, get_classes
WeightBandForm, WeightBasedForm = get_classes(
'dashboard.shipping.forms', ['WeightBandForm', 'WeightBasedForm'])
WeightBased = get_model('shipping', 'WeightBased')
WeightBand = get_model('shipping', 'WeightBand')
class WeightBasedListView(generic.ListView):
model = WeightBased
template_name = "dashboard/shipping/weight_based_list.html"
context_object_name = "methods"
class WeightBasedCreateView(generic.CreateView):
model = WeightBased
form_class = WeightBasedForm
template_name = "dashboard/shipping/weight_based_form.html"
def get_success_url(self):
msg = render_to_string(
'dashboard/shipping/messages/method_created.html',
{'method': self.object})
messages.success(self.request, msg, extra_tags='safe noicon')
return reverse('dashboard:shipping-method-detail',
kwargs={'pk': self.object.pk})
class WeightBasedDetailView(generic.CreateView):
model = WeightBand
form_class = WeightBandForm
template_name = "dashboard/shipping/weight_based_detail.html"
def dispatch(self, request, *args, **kwargs):
self.method = shortcuts.get_object_or_404(
WeightBased, pk=kwargs['pk'])
return super(WeightBasedDetailView, self).dispatch(
request, *args, **kwargs)
def get_form_kwargs(self, **kwargs):
kwargs = super(WeightBasedDetailView, self).get_form_kwargs(**kwargs)
kwargs['method'] = self.method
return kwargs
def get_context_data(self, **kwargs):
ctx = super(WeightBasedDetailView, self).get_context_data(**kwargs)
ctx['method'] = self.method
return ctx
def get_success_url(self):
msg = render_to_string(
'dashboard/shipping/messages/band_created.html',
{'band': self.object})
messages.success(self.request, msg, extra_tags='safe noicon')
return reverse('dashboard:shipping-method-detail',
kwargs={'pk': self.method.pk})
class WeightBasedUpdateView(generic.UpdateView):
model = WeightBased
form_class = WeightBasedForm
template_name = "dashboard/shipping/weight_based_form.html"
context_object_name = "method"
def get_success_url(self):
msg = render_to_string(
'dashboard/shipping/messages/method_updated.html',
{'method': self.object})
messages.success(self.request, msg, extra_tags='safe noicon')
return reverse('dashboard:shipping-method-detail',
kwargs={'pk': self.object.pk})
class WeightBandUpdateView(generic.UpdateView):
model = WeightBand
form_class = WeightBandForm
template_name = "dashboard/shipping/weight_band_form.html"
context_object_name = "band"
def dispatch(self, request, *args, **kwargs):
self.method = shortcuts.get_object_or_404(
WeightBased, pk=kwargs['method_pk'])
return super(WeightBandUpdateView, self).dispatch(
request, *args, **kwargs)
def get_queryset(self):
return self.method.bands.all()
def get_form_kwargs(self, **kwargs):
kwargs = super(WeightBandUpdateView, self).get_form_kwargs(**kwargs)
kwargs['method'] = self.method
return kwargs
def get_success_url(self):
msg = render_to_string(
'dashboard/shipping/messages/band_updated.html',
{'band': self.object})
messages.success(self.request, msg, extra_tags='safe noicon')
return reverse('dashboard:shipping-method-detail',
kwargs={'pk': self.method.pk})
class WeightBandDeleteView(generic.DeleteView):
model = WeightBased
template_name = "dashboard/shipping/weight_band_delete.html"
context_object_name = "band"
def dispatch(self, request, *args, **kwargs):
self.method = shortcuts.get_object_or_404(
WeightBased, pk=kwargs['method_pk'])
return super(WeightBandDeleteView, self).dispatch(
request, *args, **kwargs)
def get_queryset(self):
return self.method.bands.all()
def get_success_url(self):
msg = render_to_string(
'dashboard/shipping/messages/band_deleted.html',
{'band': self.object})
messages.success(self.request, msg, extra_tags='safe noicon')
return reverse('dashboard:shipping-method-detail',
kwargs={'pk': self.method.pk})
class WeightBasedDeleteView(generic.DeleteView):
model = WeightBased
template_name = "dashboard/shipping/weight_based_delete.html"
context_object_name = "method"
def get_success_url(self):
msg = render_to_string(
'dashboard/shipping/messages/method_deleted.html',
{'method': self.object})
messages.success(self.request, msg, extra_tags='safe noicon')
return reverse('dashboard:shipping-method-list')
| bsd-3-clause | -4,635,080,362,376,403,000 | 35.359155 | 77 | 0.65737 | false |
olesk75/SparkLED | Tools/logserver.py | 1 | 3236 | #!/usr/bin/env python3
""" Python3 program which listens to sensor information over the network as port 2208.
The purpose of this server is to collect data and serve queries from other clients.
In the SuperLED project, this acts as a "syslog" server, listening for and recording
data from sensors (access control, temperature, lights, heating etc.). SuperLED.py will
query this server to see if there is anything it should put on the LED display. An
example would be the image of a bell if someone rings the doorbell.
Message format for sensors and other who want to log data:
sensorname:level:value (level is LOGLEVEL, normally info)
Message format for clients who want to read data:
req:sensorname <- get last reading from sensor
Message format for clients who want to check for priority messages:
pri <- pop last priority message from priority stack
"""
import logging
import socket
import sys
import signal
HOST = '' # Symbolic name meaning all available interfaces
PORT = 2208 # Arbitrary non-privileged port
MAX_CONN = 10 # Maximum simultaneous connections
priority = []
# noinspection PyUnusedLocal,PyUnusedLocal,PyShadowingNames
def signal_handler(signal, frame):
print('\n- Interrupted manually, aborting')
logger.critical('Abort received, shutting down')
server.shutdown()
server.close()
exit(1)
if __name__ == "__main__": # Making sure we don't have problems if importing from this file as a module
logging.basicConfig(level=logging.DEBUG, # The lowest log level that will be printed to STDOUT (DEBUG < INFO <WARN < ERROR < CRITICAL)
format='%(asctime)s:%(message)s',
datefmt='%d%m%y:%H%M%S',
filename='sensors.log')
#logger = logging.getLogger(__name__)
logger = logging.getLogger('msg_logger')
signal.signal(signal.SIGINT, signal_handler) # Setting up th signal handler to arrange tidy exit if manually interrupted
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created')
try:
server.bind((HOST, PORT))
except socket.error:
print('Bind failed')
sys.exit()
print('Socket bind complete')
server.listen(MAX_CONN)
print('Socket now listening on port ' + str(PORT))
while True:
#wait to accept a connection - blocking call
conn, addr = server.accept()
#logger.debug('Connected with ' + addr[0] + ':' + str(addr[1]))
data, client_address = conn.recvfrom(1024)
data = data.decode() # Converting from bytearray to string
data = data.split(':') # Converting string to list of strings, split by colon
if data[0] == 'req': # Information request
sensor = data[1]
else: # Data logging
[sensor, level, value] = data
logger.info(addr[0] + ':' + sensor + ':' + level + ':' + value) # We log the info
reply = 'ack ==> ' + sensor + ':' + level + ':' + value # We confirm message, allowing the client to resend if it doesn't agree
if level == 'CRIT': priority.append(sensor + ':' + level + ':' + value)
conn.send(bytes(reply.encode(encoding='utf8'))) # sendall doesn't understand unicode strings (Python3 default strings) without encoding
conn.close()
| gpl-3.0 | -4,166,800,974,136,641,500 | 38.463415 | 145 | 0.682015 | false |
zrax/moul-scripts | Python/system/encodings/cp1026.py | 593 | 13369 | """ Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1026',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'{' # 0x48 -> LEFT CURLY BRACKET
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'[' # 0x68 -> LEFT SQUARE BRACKET
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
u':' # 0x7A -> COLON
u'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'}' # 0x8C -> RIGHT CURLY BRACKET
u'`' # 0x8D -> GRAVE ACCENT
u'\xa6' # 0x8E -> BROKEN BAR
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u']' # 0xAC -> RIGHT SQUARE BRACKET
u'$' # 0xAD -> DOLLAR SIGN
u'@' # 0xAE -> COMMERCIAL AT
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'~' # 0xCC -> TILDE
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\\' # 0xDC -> REVERSE SOLIDUS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'#' # 0xEC -> NUMBER SIGN
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'"' # 0xFC -> QUOTATION MARK
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 | -6,458,926,565,253,153,000 | 42.547231 | 118 | 0.529209 | false |
eramirem/astroML | book_figures/chapter8/fig_huber_loss.py | 3 | 2933 | """
Huber Loss Function
-------------------
Figure 8.8
An example of fitting a simple linear model to data which includes outliers
(data is from table 1 of Hogg et al 2010). A comparison of linear regression
using the squared-loss function (equivalent to ordinary least-squares
regression) and the Huber loss function, with c = 1 (i.e., beyond 1 standard
deviation, the loss becomes linear).
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from scipy import optimize
from astroML.datasets import fetch_hogg2010test
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Get data: this includes outliers
data = fetch_hogg2010test()
x = data['x']
y = data['y']
dy = data['sigma_y']
# Define the standard squared-loss function
def squared_loss(m, b, x, y, dy):
y_fit = m * x + b
return np.sum(((y - y_fit) / dy) ** 2, -1)
# Define the log-likelihood via the Huber loss function
def huber_loss(m, b, x, y, dy, c=2):
y_fit = m * x + b
t = abs((y - y_fit) / dy)
flag = t > c
return np.sum((~flag) * (0.5 * t ** 2) - (flag) * c * (0.5 * c - t), -1)
f_squared = lambda beta: squared_loss(beta[0], beta[1], x=x, y=y, dy=dy)
f_huber = lambda beta: huber_loss(beta[0], beta[1], x=x, y=y, dy=dy, c=1)
#------------------------------------------------------------
# compute the maximum likelihood using the huber loss
beta0 = (2, 30)
beta_squared = optimize.fmin(f_squared, beta0)
beta_huber = optimize.fmin(f_huber, beta0)
print(beta_squared)
print(beta_huber)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
x_fit = np.linspace(0, 350, 10)
ax.plot(x_fit, beta_squared[0] * x_fit + beta_squared[1], '--k',
label="squared loss:\n $y=%.2fx + %.1f$" % tuple(beta_squared))
ax.plot(x_fit, beta_huber[0] * x_fit + beta_huber[1], '-k',
label="Huber loss:\n $y=%.2fx + %.1f$" % tuple(beta_huber))
ax.legend(loc=4)
ax.errorbar(x, y, dy, fmt='.k', lw=1, ecolor='gray')
ax.set_xlim(0, 350)
ax.set_ylim(100, 700)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
plt.show()
| bsd-2-clause | 4,189,784,919,215,258,000 | 32.329545 | 79 | 0.615752 | false |
yvaucher/stock-logistics-transport | transport_information/__openerp__.py | 3 | 1365 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2014 Camptocamp SA
# Author: Leonardo Pistone
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{"name": "Transport Information",
"summary": "Transport Information",
"version": "0.1",
"author": "Camptocamp,Odoo Community Association (OCA)",
"category": "Purchase Management",
"license": "AGPL-3",
'complexity': "easy",
"depends": ["purchase",
],
"data": ["view/transport_mode.xml",
"view/transport_vehicle.xml",
"security/ir.model.access.csv",
],
"installable": True,
}
| agpl-3.0 | 3,829,911,369,465,723,400 | 36.916667 | 78 | 0.597802 | false |
pkainz/pylearn2 | pylearn2/models/model.py | 44 | 24339 | """Generic "model" class."""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from collections import defaultdict
from theano.compat.six.moves import zip as izip_no_length_check
import numpy as np
from theano.compat import six
from theano import tensor as T
from pylearn2.compat import OrderedDict
from pylearn2.model_extensions.model_extension import ModelExtension
from pylearn2.space import NullSpace
from pylearn2.utils import function
from pylearn2.utils import safe_zip
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.track_version import MetaLibVersion
class Model(object):
"""
A class representing a model with learnable parameters.
Parameters
----------
extensions : list of ModelExtension
Plugins to extend the model's functionality
"""
__metaclass__ = MetaLibVersion
_test_batch_size = 2
def __init__(self, extensions=None):
super(Model, self).__init__()
if extensions is None:
extensions = []
else:
assert isinstance(extensions, list)
assert all(isinstance(extension, ModelExtension) for extension in
extensions)
self.__dict__.update(locals())
del self.self
self._disallow_censor_updates()
self.names_to_del = set()
def _disallow_censor_updates(self):
"""
Don't let subclasses use censor_updates.
"""
if self._overrides_censor_updates():
raise TypeError(str(type(self)) + " overrides "
"Model.censor_updates, which is no longer in use. "
"Change this to _modify_updates. This check may "
"quit being performed after 2015-05-13.")
def _ensure_extensions(self):
"""
Makes sure the model has an "extensions" field.
"""
if not hasattr(self, "extensions"):
raise TypeError("The " + str(type(self)) + " Model subclass "
"is required to call the Model superclass "
"constructor but does not.")
self.extensions = []
def __setstate__(self, d):
"""
An implementation of __setstate__ that patches old pickle files.
"""
self._disallow_censor_updates()
self.__dict__.update(d)
# Patch old pickle files
if 'extensions' not in d:
self.extensions = []
def get_default_cost(self):
"""
Returns the default cost to use with this model.
Returns
-------
default_cost : Cost
The default cost to use with this model.
"""
raise NotImplementedError(str(type(self)) +
" does not implement get_default_cost.")
def train_all(self, dataset):
"""
If implemented, performs one epoch of training.
Parameters
----------
dataset : pylearn2.datasets.dataset.Dataset
Dataset object to draw training data from
Notes
-----
This method is useful
for models with highly specialized training algorithms for which is
does not make much sense to factor the training code into a separate
class. It is also useful for implementors that want to make their model
trainable without enforcing compatibility with pylearn2
TrainingAlgorithms.
"""
raise NotImplementedError(str(type(self)) +
" does not implement train_all.")
def continue_learning(self):
"""
If train_all is used to train the model, this method is used to
determine when the training process has converged. This method is
called after the monitor has been run on the latest parameters.
Returns
-------
rval : bool
True if training should continue
"""
raise NotImplementedError(str(type(self)) +
" does not implement continue_learning.")
def train_batch(self, dataset, batch_size):
"""
If implemented, performs an update on a single minibatch.
Parameters
----------
dataset: pylearn2.datasets.dataset.Dataset
The object to draw training data from.
batch_size: int
Size of the minibatch to draw from dataset.
Returns
-------
rval : bool
True if the method should be called again for another update.
False if convergence has been reached.
"""
raise NotImplementedError()
def get_weights_view_shape(self):
"""
Returns the shape `PatchViewer` should use to display the
weights.
Returns
-------
shape : tuple
A tuple containing two ints. These are used as the
`grid_shape` argument to `PatchViewer` when
displaying the weights of this model.
Notes
-----
This can be useful when there is some geometric
significance to the order of your weight
vectors. For example, the `Maxout` model makes sure that all of
the filters for the same hidden unit appear on the same row
of the display.
"""
raise NotImplementedError(str(type(self)) + " does not implement "
"get_weights_view_shape (perhaps by design)")
def get_monitoring_channels(self, data):
"""
Get monitoring channels for this model.
Parameters
----------
data : tensor_like, or (possibly nested) tuple of tensor_likes,
This is data on which the monitoring quantities will be
calculated (e.g., a validation set). See
`self.get_monitoring_data_specs()`.
Returns
-------
channels : OrderedDict
A dictionary with strings as keys, mapping channel names to
symbolic values that depend on the variables in `data`.
Notes
-----
You can make any channel names you want, just try to make sure they
won't collide with names made by the training Cost, etc. Anything you
think is worth monitoring during training can be added here. You
probably want to control which channels get added with some config
option for your model.
"""
space, source = self.get_monitoring_data_specs()
space.validate(data)
return OrderedDict()
def get_monitoring_data_specs(self):
"""
Get the data_specs describing the data for get_monitoring_channels.
This implementation returns an empty data_specs, appropriate for
when no monitoring channels are defined, or when none of the channels
actually need data (for instance, if they only monitor functions
of the model's parameters).
Returns
-------
data_specs : TODO WRITEME
TODO WRITEME
"""
return (NullSpace(), '')
def set_batch_size(self, batch_size):
"""
Sets the batch size used by the model.
Parameters
----------
batch_size : int
If None, allows the model to use any batch size.
"""
pass
def get_weights(self):
"""
Returns the weights (of the first layer if more than one layer is
present).
Returns
-------
weights : ndarray
Returns any matrix that is analogous to the weights of the first
layer of an MLP, such as the dictionary of a sparse coding model.
This implementation raises NotImplementedError. For models where
this method is not conceptually applicable, do not override it.
Format should be compatible with the return value of
self.get_weights_format.
"""
raise NotImplementedError(str(type(self)) + " does not implement "
"get_weights (perhaps by design)")
def get_weights_format(self):
"""
Returns a description of how to interpret the return value of
`get_weights`.
Returns
-------
format : tuple
Either ('v', 'h') or ('h', 'v'). ('v', 'h') means self.get_weights
returns a matrix of shape (num visible units, num hidden units),
while ('h', 'v') means it returns the transpose of this.
"""
return ('v', 'h')
def get_weights_topo(self):
"""
Returns a topological view of the weights.
Returns
-------
weights : ndarray
Same as the return value of `get_weights` but formatted as a 4D
tensor with the axes being (hidden units, rows, columns,
channels). Only applicable for models where the weights can be
viewed as 2D-multichannel, and the number of channels is either
1 or 3 (because they will be visualized as grayscale or RGB color).
"""
raise NotImplementedError(str(type(self)) + " does not implement "
"get_weights_topo (perhaps by design)")
def score(self, V):
"""
Compute a "score function" for this model, if this model has
probabilistic semantics.
Parameters
----------
V : tensor_like, 2-dimensional
A batch of i.i.d. examples with examples indexed along the
first axis and features along the second. This is data on which
the monitoring quantities will be calculated (e.g., a validation
set).
Returns
-------
score : tensor_like
The gradient of the negative log probability of the model
on the given datal.
Notes
-----
If the model implements a probability distribution on R^n,
this method should return the gradient of the log probability
of the batch with respect to V, or raise an exception explaining
why this is not possible.
"""
return T.grad(-self.free_energy(V).sum(), V)
def get_lr_scalers(self):
"""
Specify how to rescale the learning rate on each parameter.
Returns
-------
lr_scalers : OrderedDict
A dictionary mapping the parameters of the model to floats. The
learning rate will be multiplied by the float for each parameter.
If a parameter does not appear in the dictionary, it will use
the global learning rate with no scaling.
"""
return OrderedDict()
def _overrides_censor_updates(self):
"""
Returns true if the model overrides censor_updates.
(It shouldn't do so because it's deprecated, and we have
to take special action to handle this case)
"""
return type(self).censor_updates != Model.censor_updates
def censor_updates(self, updates):
"""
Deprecated method. Callers should call modify_updates instead.
Subclasses should override _modify_updates instead.
This method may be removed on or after 2015-05-25.
Parameters
----------
updates : dict
A dictionary mapping shared variables to symbolic values they
will be updated to.
"""
raise TypeError("Model.censor_updates has been replaced by "
"Model.modify_updates.")
def modify_updates(self, updates):
""""
Modifies the parameters before a learning update is applied. Behavior
is defined by subclass's implementation of _modify_updates and any
ModelExtension's implementation of post_modify_updates.
Parameters
----------
updates : dict
A dictionary mapping shared variables to symbolic values they
will be updated to
Notes
-----
For example, if a given parameter is not meant to be learned, a
subclass or extension
should remove it from the dictionary. If a parameter has a restricted
range, e.g.. if it is the precision of a normal distribution,
a subclass or extension should clip its update to that range. If a
parameter
has any other special properties, its updates should be modified
to respect that here, e.g. a matrix that must be orthogonal should
have its update value modified to be orthogonal here.
This is the main mechanism used to make sure that generic training
algorithms such as those found in pylearn2.training_algorithms
respect the specific properties of the models passed to them.
"""
self._modify_updates(updates)
self._ensure_extensions()
for extension in self.extensions:
extension.post_modify_updates(updates, self)
def _modify_updates(self, updates):
"""
Subclasses may override this method to add functionality to
modify_updates.
Parameters
----------
updates : dict
A dictionary mapping shared variables to symbolic values they
will be updated to.
"""
# Catch classes that try to override the old method.
# This check may be removed after 2015-05-13.
self._disallow_censor_updates()
def get_input_space(self):
"""
Returns an instance of pylearn2.space.Space describing the format of
the vector space that the model operates on (this is a generalization
of get_input_dim)
"""
return self.input_space
def get_output_space(self):
"""
Returns an instance of pylearn2.space.Space describing the format of
the vector space that the model outputs (this is a generalization
of get_output_dim)
"""
return self.output_space
def get_target_space(self):
"""
Returns an instance of pylearn2.space.Space describing the format of
that the targets should be in, which may be different from the output
space. Calls get_output_space() unless _target_space exists.
"""
if hasattr(self, '_target_space'):
return self._target_space
else:
return self.get_output_space()
def get_input_source(self):
"""
Returns a string, stating the source for the input. By default the
model expects only one input source, which is called 'features'.
"""
if hasattr(self, 'input_source'):
return self.input_source
else:
return 'features'
def get_target_source(self):
"""
Returns a string, stating the source for the output. By default the
model expects only one output source, which is called 'targets'.
"""
if hasattr(self, 'target_source'):
return self.target_source
else:
return 'targets'
def free_energy(self, V):
"""
Compute the free energy of data examples, if this model has
probabilistic semantics.
Parameters
----------
V : tensor_like, 2-dimensional
A batch of i.i.d. examples with examples indexed along the
first axis and features along the second. This is data on which
the monitoring quantities will be calculated (e.g., a validation
set).
Returns
-------
free_energy : tensor, 1-dimensional
A (symbolic) vector of free energies for each data example in
`V`, i.e. `free_energy[i] = F(V[i])`.
"""
raise NotImplementedError()
def get_params(self):
"""
Returns the parameters that define the model.
Returns
-------
params : list
A list of (Theano shared variable) parameters of the model.
Notes
-----
By default, this returns a copy of the _params attribute, which
individual models can simply fill with the list of model parameters.
Alternatively, models may override `get_params`, so this should
be considered the public interface to model parameters -- directly
accessing or modifying _params is at-your-own-risk, as it may
or may not exist.
This is the main mechanism by which generic training algorithms
like SGD know which values to update, however, even model
parameters that should not be learned ought to be included here,
so that the model's parameter set is more predictable.
Parameters may be included here but held constant during
learning via the `modify_updates` method.
"""
return list(self._params)
def get_param_values(self, borrow=False):
"""
Returns numerical values for the parameters that define the model.
Parameters
----------
borrow : bool, optional
Flag to be passed to the `.get_value()` method of the
shared variable. If `False`, a copy will always be returned.
Returns
-------
params : list
A list of `numpy.ndarray` objects containing the current
parameters of the model.
"""
assert not isinstance(self.get_params(), set)
return [param.get_value(borrow=borrow) for param in self.get_params()]
def set_param_values(self, values, borrow=False):
"""
Sets the values of the parameters that define the model
Parameters
----------
values : list
list of ndarrays
borrow : bool
The `borrow` flag to use with `set_value`.
"""
for param, value in zip(self.get_params(), values):
param.set_value(value, borrow=borrow)
def get_param_vector(self):
"""
Returns all parameters flattened into a single vector.
Returns
-------
params : ndarray
1-D array of all parameter values.
"""
values = self.get_param_values()
values = [value.reshape(value.size) for value in values]
return np.concatenate(values, axis=0)
def set_param_vector(self, vector):
"""
Sets all parameters from a single flat vector. Format is consistent
with `get_param_vector`.
Parameters
----------
vector : ndarray
1-D array of all parameter values.
"""
params = self.get_params()
cur_values = self.get_param_values()
pos = 0
for param, value in safe_zip(params, cur_values):
size = value.size
new_value = vector[pos:pos + size]
param.set_value(new_value.reshape(*value.shape))
pos += size
assert pos == vector.size
def redo_theano(self):
"""
Re-compiles all Theano functions used internally by the model.
Notes
-----
This function is often called after a model is unpickled from
disk, since Theano functions are not pickled. However, it is
not always called. This allows scripts like show_weights.py
to rapidly unpickle a model and inspect its weights without
needing to recompile all of its learning machinery.
All Theano functions compiled by this method should be registered
with the register_names_to_del method.
"""
pass
def get_input_dim(self):
"""
Returns the number of visible units of the model.
Deprecated; this assumes the model operates on a vector.
Use get_input_space instead.
This method may be removed on or after 2015-05-25.
"""
raise NotImplementedError()
def get_output_dim(self):
"""
Returns the number of visible units of the model.
Deprecated; this assumes the model operates on a vector.
Use get_input_space instead.
This method may be removed on or after 2015-05-25.
"""
raise NotImplementedError()
def __getstate__(self):
"""
This is the method that pickle/cPickle uses to determine what
portion of the model to serialize. We remove all fields listed in
`self.fields_to_del`. In particular, this should include all Theano
functions, since they do not play nice with pickling.
"""
self._disallow_censor_updates()
d = OrderedDict()
names_to_del = getattr(self, 'names_to_del', set())
names_to_keep = set(self.__dict__.keys()).difference(names_to_del)
for name in names_to_keep:
d[name] = self.__dict__[name]
return d
def get_test_batch_size(self):
"""
Specifies the batch size to use with compute.test_value
Returns
-------
test_batch_size : int
Number of examples to use in batches with compute.test_value
Notes
-----
The model specifies
the number of examples in case it needs a fixed batch size or to
keep
the memory usage of testing under control.
"""
return self._test_batch_size
def print_versions(self, print_theano_config=False):
"""
Print version of the various Python packages and basic information
about the experiment setup (e.g. cpu, os)
Parameters
----------
print_theano_config : bool
TODO WRITEME
Notes
-----
Example output:
.. code-block:: none
numpy:1.6.1 | pylearn:a6e634b83d | pylearn2:57a156beb0
CPU: x86_64
OS: Linux-2.6.35.14-106.fc14.x86_64-x86_64-with-fedora-14-Laughlin
"""
self.libv.print_versions()
self.libv.print_exp_env_info(print_theano_config)
def register_names_to_del(self, names):
"""
Register names of fields that should not be pickled.
Parameters
----------
names : iterable
A collection of strings indicating names of fields on ts
object that should not be pickled.
Notes
-----
All names registered will be deleted from the dictionary returned
by the model's `__getstate__` method (unless a particular model
overrides this method).
"""
if isinstance(names, six.string_types):
names = [names]
try:
assert all(isinstance(n, six.string_types) for n in iter(names))
except (TypeError, AssertionError):
reraise_as(ValueError('Invalid names argument'))
# Quick check in case __init__ was never called, e.g. by a derived
# class.
if not hasattr(self, 'names_to_del'):
self.names_to_del = set()
self.names_to_del = self.names_to_del.union(names)
def enforce_constraints(self):
"""
Enforces all constraints encoded by self.modify_updates.
"""
params = self.get_params()
updates = OrderedDict(izip_no_length_check(params, params))
self.modify_updates(updates)
f = function([], updates=updates)
f()
@property
def tag(self):
"""
A "scratch-space" for storing model metadata.
Returns
-------
tag : defaultdict
A defaultdict with "dict" as the default constructor. This
lets you do things like `model.tag[ext_name][quantity_name]`
without the annoyance of first initializing the dict
`model.tag[ext_name]`.
Notes
-----
Nothing critical to the implementation of a particular model or
training algorithm in the library should get stored in `tag`. This
is mainly for extensions or user code to take advantage of, and
segregate such things from actual model implementation attributes.
"""
if not hasattr(self, '_tag'):
self._tag = defaultdict(dict)
return self._tag
| bsd-3-clause | -817,098,115,755,556,100 | 32.386831 | 79 | 0.591355 | false |
shakamunyi/tensorflow | tensorflow/contrib/keras/api/keras/losses/__init__.py | 75 | 2203 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Loss functions.
from tensorflow.python.keras._impl.keras.losses import binary_crossentropy
from tensorflow.python.keras._impl.keras.losses import categorical_crossentropy
from tensorflow.python.keras._impl.keras.losses import categorical_hinge
from tensorflow.python.keras._impl.keras.losses import cosine_proximity
from tensorflow.python.keras._impl.keras.losses import hinge
from tensorflow.python.keras._impl.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras._impl.keras.losses import logcosh
from tensorflow.python.keras._impl.keras.losses import mean_absolute_error
from tensorflow.python.keras._impl.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras._impl.keras.losses import mean_squared_error
from tensorflow.python.keras._impl.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras._impl.keras.losses import poisson
from tensorflow.python.keras._impl.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras._impl.keras.losses import squared_hinge
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras._impl.keras.losses import deserialize
from tensorflow.python.keras._impl.keras.losses import serialize
from tensorflow.python.keras._impl.keras.losses import get
del absolute_import
del division
del print_function
| apache-2.0 | -432,928,622,580,595,700 | 47.955556 | 86 | 0.781661 | false |
simone-f/qat_script | tools/data/Osmose/Osmose_prefs_gui.py | 1 | 2151 | #! /usr/bin/env jython
# -*- coding: utf-8 -*-
"""Preferences for Osmose tool
"""
from javax.swing import JPanel, JLabel, JTextField, JComboBox
from java.awt import GridLayout
from java.lang import Integer, NumberFormatException
class PrefsPanel(JPanel):
"""JPanle with gui for tool preferences
"""
def __init__(self, app):
strings = app.strings
self.setLayout(GridLayout(3, 2, 5, 5))
userLbl = JLabel(strings.getString("osmose_pref_username"))
self.userTextField = JTextField(20)
self.userTextField.setToolTipText(strings.getString("osmose_pref_username_tooltip"))
levelLbl = JLabel(strings.getString("osmose_pref_level"))
self.levels = ["1", "1,2", "1,2,3", "2", "3"]
self.levelsCombo = JComboBox(self.levels)
self.levelsCombo.setToolTipText(strings.getString("osmose_pref_level_tooltip"))
limitLbl = JLabel(strings.getString("osmose_pref_limit"))
self.limitTextField = JTextField(20)
self.limitTextField.setToolTipText(strings.getString("osmose_pref_limit_tooltip"))
self.add(userLbl)
self.add(self.userTextField)
self.add(levelLbl)
self.add(self.levelsCombo)
self.add(limitLbl)
self.add(self.limitTextField)
def update_gui(self, preferences):
"""Update preferences gui
"""
self.userTextField.setText(preferences["username"])
self.levelsCombo.setSelectedIndex(self.levels.index(preferences["level"]))
self.limitTextField.setText(str(preferences["limit"]))
def read_gui(self):
"""Read preferences from gui
"""
username = self.userTextField.getText()
level = self.levelsCombo.getSelectedItem()
limit = self.limitTextField.getText()
try:
limit = Integer.parseInt(limit)
if limit > 500:
limit = 500
limit = str(limit)
except NumberFormatException:
limit = ""
preferences = {"username": username.strip(),
"level": level,
"limit": limit}
return preferences
| gpl-2.0 | -984,214,471,240,011,800 | 33.142857 | 92 | 0.624826 | false |
guillermo-sentinella/jaeger-client-python | jaeger_client/local_agent_net.py | 1 | 3685 | # Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from builtins import object
from threadloop import ThreadLoop
import tornado
import tornado.httpclient
from tornado.httputil import url_concat
from .TUDPTransport import TUDPTransport
from concurrent.futures import Future
from thrift.transport.TTransport import TBufferedTransport
class LocalAgentHTTP(object):
def __init__(self, host, port):
self.agent_http_host = host
self.agent_http_port = int(port)
def request_sampling_strategy(self, service_name, timeout):
http_client = tornado.httpclient.AsyncHTTPClient(
defaults=dict(request_timeout=timeout))
# Properly url encode the params
url = url_concat(
'http://%s:%d/sampling' % (self.agent_http_host, self.agent_http_port),
[('service', service_name)])
return http_client.fetch(url)
class LocalAgentSender(TBufferedTransport):
"""
LocalAgentSender implements a everything necessary to communicate with
local jaeger-agent. This class is designed to work in tornado and
non-tornado environments. If in torndado, pass in the ioloop, if not
then LocalAgentSender will create one for itself.
NOTE: LocalAgentSender derives from TBufferedTransport. This will buffer
up all written data until flush() is called. Flush gets called at the
end of the batch span submission call.
"""
def __init__(self, host, sampling_port, reporting_port, io_loop=None):
# IOLoop
self._thread_loop = None
self.io_loop = io_loop or self._create_new_thread_loop()
# http sampling
self.local_agent_http = LocalAgentHTTP(host, sampling_port)
# udp reporting - this will only get written to after our flush() call.
# We are buffering things up because we are a TBufferedTransport.
udp = TUDPTransport(host, reporting_port)
TBufferedTransport.__init__(self, udp)
def _create_new_thread_loop(self):
"""
Create a daemonized thread that will run Tornado IOLoop.
:return: the IOLoop backed by the new thread.
"""
self._thread_loop = ThreadLoop()
if not self._thread_loop.is_ready():
self._thread_loop.start()
return self._thread_loop._io_loop
def readFrame(self):
"""Empty read frame that is never ready"""
return Future()
# Pass-through for the http
def request_sampling_strategy(self, service_name, timeout):
return self.local_agent_http.request_sampling_strategy(
service_name, timeout)
| mit | 2,413,121,204,964,829,000 | 39.944444 | 83 | 0.70882 | false |
Tejas-Khot/deep-learning | test/nnet/fflayers.py | 7 | 2390 | """Feed-forward Layers (not includeing ConvNet Layer)
This module contains feedforward layers for
+ Identity layer
+ Tanh layer
+ Sigmoid layer
+ ReLU layer
+ Softmax layer
"""
import theano.tensor as T;
import telaugesa.nnfuns as nnfuns;
from telaugesa.layer import Layer;
class IdentityLayer(Layer):
"""Identity Layer
"""
def __init__(self, **kwargs):
super(IdentityLayer, self).__init__(**kwargs);
def apply(self, X):
return self.apply_lin(X);
class TanhLayer(Layer):
"""Tanh Layer
"""
def __init__(self, **kwargs):
super(TanhLayer, self).__init__(**kwargs);
self.initialize("tanh");
def apply(self, X):
return nnfuns.tanh(self.apply_lin(X));
class SigmoidLayer(Layer):
"""Sigmoid Layer"""
def __init__(self, **kwargs):
super(SigmoidLayer, self).__init__(**kwargs);
self.initialize("sigmoid");
def apply(self, X):
return nnfuns.sigmoid(self.apply_lin(X));
class ReLULayer(Layer):
"""ReLU Layer"""
def __init__(self, **kwargs):
super(ReLULayer, self).__init__(**kwargs);
def apply(self, X):
return nnfuns.relu(self.apply_lin(X));
class SoftmaxLayer(Layer):
"""Softmax Layer"""
def __init__(self, **kwargs):
super(SoftmaxLayer, self).__init__(**kwargs);
def apply(self, X):
return nnfuns.softmax(self.apply_lin(X));
def predict(self, X_out):
"""Predict label
Parameters
----------
X_out : matrix
input sample outputs, the size is (number of cases, number of classes)
Returns
-------
Y_pred : vector
predicted label, the size is (number of cases)
"""
return T.argmax(X_out, axis=1);
def error(self, X_out, Y):
"""Mis-classified label
Parameters
----------
X_out : vector
predict labels, the size is (number of cases, number of classes)
Y : vector
correct labels, the size is (number of cases)
Returns
-------
error : scalar
difference between predicted label and true label.
"""
return T.mean(T.neq(self.predict(X_out), Y)); | gpl-2.0 | -935,516,110,150,137,200 | 22.91 | 82 | 0.533054 | false |
eayunstack/python-cinderclient | cinderclient/extension.py | 10 | 1409 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import base
from cinderclient import utils
class Extension(utils.HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
def __init__(self, name, module):
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in list(self.module.__dict__.items()):
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
elif utils.safe_issubclass(attr_value, base.Manager):
self.manager_class = attr_value
def __repr__(self):
return "<Extension '%s'>" % self.name
| apache-2.0 | -1,040,650,254,897,088,000 | 35.128205 | 78 | 0.663591 | false |
sai16vicky/deepdive | examples/test/test_smoke.py | 17 | 1907 | import os
import getpass
import psycopg2
from sets import Set
# Error threshold
eps = 0.5
# Get the environment variables
DBNAME = os.environ['DBNAME']
PGUSER = os.environ['PGUSER']
PGPASSWORD = os.environ['PGPASSWORD']
PGHOST = os.environ['PGHOST']
PGPORT = os.environ['PGPORT']
# Stanfard status
std = dict([])
std_res = open('smoke.dat', 'r')
for row in std_res:
if (len(row) < 2): continue
dat = row.strip().split(' ')
std[str(dat[0])] = dat[1]
std_res.close()
# Connect database
conn = psycopg2.connect(database = DBNAME, user = PGUSER, password = PGPASSWORD, host = PGHOST, port = PGPORT)
cur = conn.cursor()
# Check table status
cur.execute("SELECT COUNT(*) FROM friends")
for row in cur.fetchall(): num = row[0]
if (std["friends"] != str(num)):
print "Error in Table friends"
exit(0)
cur.execute("SELECT COUNT(*) FROM person")
for row in cur.fetchall(): num = row[0]
if (std["person"] != str(num)):
print "Error in Table person"
exit(0)
cur.execute("SELECT COUNT(*) FROM person_has_cancer")
for row in cur.fetchall(): num = row[0]
if (std["person_has_cancer"] != str(num)):
print "Error in Table person_has_cancer"
exit(0)
cur.execute("SELECT COUNT(*) FROM person_smokes")
for row in cur.fetchall(): num = row[0]
if (std["person_smokes"] != str(num)):
print "Error in Table person_smokes"
exit(0)
# Check result
cur.execute("SELECT person_id, expectation FROM person_has_cancer_has_cancer_inference")
rows = cur.fetchall()
for row in rows:
if (abs(float(std["person_has_cancer_" + str(row[0])]) - float(row[1])) > eps):
print "Error result!"
exit(0)
cur.execute("SELECT person_id, expectation FROM person_smokes_smokes_inference")
rows = cur.fetchall()
for row in rows:
if (abs(float(std["person_smokes_" + str(row[0])]) - float(row[1])) > eps):
print "Error result!"
exit(0)
print "Test passed!"
| apache-2.0 | -6,854,156,015,969,028,000 | 24.092105 | 110 | 0.657577 | false |
atosatto/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_facts.py | 68 | 7702 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachineimage_facts
version_added: "2.1"
short_description: Get virtual machine image facts.
description:
- Get facts for virtual machine images.
options:
name:
description:
- Only show results for a specific security group.
default: null
required: false
location:
description:
- Azure location value (ie. westus, eastus, eastus2, northcentralus, etc.). Supplying only a
location value will yield a list of available publishers for the location.
required: true
publisher:
description:
- Name of an image publisher. List image offerings associated with a particular publisher.
default: null
required: false
offer:
description:
- Name of an image offering. Combine with sku to see a list of available image versions.
default: null
required: false
sku:
description:
- Image offering SKU. Combine with offer to see a list of available versions.
default: null
required: false
version:
description:
- Specific version number of an image.
default: null
required: false
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for a specific image
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
offer: CentOS
sku: '7.1'
version: '7.1.20160308'
- name: List available versions
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
offer: CentOS
sku: '7.1'
- name: List available offers
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
- name: List available publishers
azure_rm_virtualmachineimage_facts:
location: eastus
'''
RETURN = '''
azure_vmimages:
description: List of image dicts.
returned: always
type: list
example: []
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models.compute_management_client_enums']
class AzureRMVirtualMachineImageFacts(AzureRMModuleBase):
def __init__(self, **kwargs):
self.module_arg_spec = dict(
location=dict(type='str', required=True),
publisher=dict(type='str'),
offer=dict(type='str'),
sku=dict(type='str'),
version=dict(type='str')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_vmimages=[])
)
self.location = None
self.publisher = None
self.offer = None
self.sku = None
self.version = None
super(AzureRMVirtualMachineImageFacts, self).__init__(self.module_arg_spec)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.location and self.publisher and self.offer and self.sku and self.version:
self.results['ansible_facts']['azure_vmimages'] = self.get_item()
elif self.location and self.publisher and self.offer and self.sku:
self.results['ansible_facts']['azure_vmimages'] = self.list_images()
elif self.location and self.publisher:
self.results['ansible_facts']['azure_vmimages'] = self.list_offers()
elif self.location:
self.results['ansible_facts']['azure_vmimages'] = self.list_publishers()
return self.results
def get_item(self):
item = None
result = []
try:
item = self.compute_client.virtual_machine_images.get(self.location,
self.publisher,
self.offer,
self.sku,
self.version)
except CloudError:
pass
if item:
result = [self.serialize_obj(item, 'VirtualMachineImage', enum_modules=AZURE_ENUM_MODULES)]
return result
def list_images(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list(self.location,
self.publisher,
self.offer,
self.sku,)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list images: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def list_offers(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list_offers(self.location,
self.publisher)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list offers: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def list_publishers(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list_publishers(self.location)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list publishers: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def main():
AzureRMVirtualMachineImageFacts()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,666,786,931,940,447,000 | 30.565574 | 104 | 0.570371 | false |
a-doumoulakis/tensorflow | tensorflow/contrib/layers/python/layers/regularizers_test.py | 73 | 6622 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class RegularizerTest(test.TestCase):
def test_l1(self):
with self.assertRaises(ValueError):
regularizers.l1_regularizer(-1.)
with self.assertRaises(ValueError):
regularizers.l1_regularizer(0)
self.assertIsNone(regularizers.l1_regularizer(0.)(None))
values = np.array([1., -1., 4., 2.])
weights = constant_op.constant(values)
with session.Session() as sess:
result = sess.run(regularizers.l1_regularizer(.5)(weights))
self.assertAllClose(np.abs(values).sum() * .5, result)
def test_l2(self):
with self.assertRaises(ValueError):
regularizers.l2_regularizer(-1.)
with self.assertRaises(ValueError):
regularizers.l2_regularizer(0)
self.assertIsNone(regularizers.l2_regularizer(0.)(None))
values = np.array([1., -1., 4., 2.])
weights = constant_op.constant(values)
with session.Session() as sess:
result = sess.run(regularizers.l2_regularizer(.42)(weights))
self.assertAllClose(np.power(values, 2).sum() / 2.0 * .42, result)
def test_l1_l2(self):
with self.assertRaises(ValueError):
regularizers.l1_l2_regularizer(-1., 0.5)
with self.assertRaises(ValueError):
regularizers.l1_l2_regularizer(0.5, -1.)
with self.assertRaises(ValueError):
regularizers.l1_l2_regularizer(0, 0.5)
with self.assertRaises(ValueError):
regularizers.l1_l2_regularizer(0.5, 0)
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
loss = regularizers.l1_l2_regularizer(1.0, 1.0)(tensor)
self.assertEquals(loss.op.name, 'l1_l2_regularizer')
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)
def test_l1_l2_scale_l1Zero(self):
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
loss = regularizers.l1_l2_regularizer(0.0, 1.0)(tensor)
with self.test_session():
self.assertEquals(loss.op.name, 'l1_l2_regularizer')
self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
def test_l1_l2_scale_l2Zero(self):
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
loss = regularizers.l1_l2_regularizer(1.0, 0.0)(tensor)
with self.test_session():
self.assertEquals(loss.op.name, 'l1_l2_regularizer')
self.assertAlmostEqual(loss.eval(), num_elem, 5)
def test_l1_l2_scales_Zero(self):
shape = [5, 5, 5]
tensor = constant_op.constant(1.0, shape=shape)
loss = regularizers.l1_l2_regularizer(0.0, 0.0)(tensor)
self.assertEquals(loss, None)
def testL1L2RegularizerWithScope(self):
with self.test_session():
shape = [5, 5, 5]
num_elem = 5 * 5 * 5
tensor = constant_op.constant(1.0, shape=shape)
with ops.name_scope('foo'):
loss = regularizers.l1_l2_regularizer(1.0, 1.0, scope='l1_l2')(tensor)
self.assertEquals(loss.op.name, 'foo/l1_l2')
self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5)
def test_sum_regularizer(self):
l1_function = regularizers.l1_regularizer(.1)
l2_function = regularizers.l2_regularizer(.2)
self.assertIsNone(regularizers.sum_regularizer([]))
self.assertIsNone(regularizers.sum_regularizer([None]))
values = np.array([-3.])
weights = constant_op.constant(values)
with session.Session() as sess:
l1_reg1 = regularizers.sum_regularizer([l1_function])
l1_result1 = sess.run(l1_reg1(weights))
l1_reg2 = regularizers.sum_regularizer([l1_function, None])
l1_result2 = sess.run(l1_reg2(weights))
l1_l2_reg = regularizers.sum_regularizer([l1_function, l2_function])
l1_l2_result = sess.run(l1_l2_reg(weights))
self.assertAllClose(.1 * np.abs(values).sum(), l1_result1)
self.assertAllClose(.1 * np.abs(values).sum(), l1_result2)
self.assertAllClose(
.1 * np.abs(values).sum() + .2 * np.power(values, 2).sum() / 2.0,
l1_l2_result)
def test_apply_regularization(self):
dummy_regularizer = lambda x: math_ops.reduce_sum(2 * x)
array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
tensor_weights_list = [constant_op.constant(x) for x in array_weights_list]
expected = sum([2 * x for l in array_weights_list for x in l])
with self.test_session():
result = regularizers.apply_regularization(dummy_regularizer,
tensor_weights_list)
self.assertAllClose(expected, result.eval())
def test_apply_zero_regularization(self):
regularizer = regularizers.l2_regularizer(0.0)
array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
tensor_weights_list = [constant_op.constant(x) for x in array_weights_list]
with self.test_session():
result = regularizers.apply_regularization(regularizer,
tensor_weights_list)
self.assertAllClose(0.0, result.eval())
def test_apply_regularization_invalid_regularizer(self):
non_scalar_regularizer = lambda x: array_ops.tile(x, [2])
tensor_weights_list = [
constant_op.constant(x) for x in [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
]
with self.test_session():
with self.assertRaises(ValueError):
regularizers.apply_regularization(non_scalar_regularizer,
tensor_weights_list)
if __name__ == '__main__':
test.main()
| apache-2.0 | -3,250,233,736,248,000,500 | 37.725146 | 80 | 0.658562 | false |
MIGreenberg/NFLPredict | scripts/correlation.py | 1 | 1485 | import nflgame
import math
import operator
from classes.league import *
from scipy.stats.stats import pearsonr
def statistical_correlation(statistics, performances):
stats_a = []
stats_b = []
for performance in performances:
stats_a.append(performance.statistics[statistics[0]])
stats_b.append(performance.statistics[statistics[1]])
pearson = pearsonr(stats_a,stats_b)
return pearson[0]
stats_to_query = [
# "net_total_yards",
# "rushing_yards",
# "top",
# "passing_yards",
# "first_downs",
# "int_thrown",
# "int_caught",
# "tot_fumbles",
# "def_fumbles_rec",
# "fumbles_lost",
# "third_down_efficiency",
# "completion_percentage",
# "yards_per_attempt",
# "yards_per_completion",
# "sacks_taken",
# "sacks",
# "yards_per_drive",
# "turnover_differential"
# "avg_starting_pos",
# "opp_avg_starting_pos"
"home",
"total_yards",
"opp_total_yards",
"rushing_yards",
"passing_yards",
"penalty_yards",
"fourth_down_efficiency",
"yards_per_rush",
"opp_rushing_yards",
"opp_passing_yards",
"opp_first_downs",
"opp_yards_per_drive",
"opp_yards_per_rush",
"opp_yards_per_completion",
"opp_yards_per_attempt",
"opp_fourth_down_efficiency"
]
if __name__ == '__main__':
league = League()
league.record_performances_for_years(range(2009,2015))
stat_correlation = {}
for stat in stats_to_query:
correlation = statistical_correlation(('net_score', stat), league.performances)
stat_correlation[stat] = correlation
print stat_correlation
| mit | 6,581,912,810,090,876,000 | 20.838235 | 81 | 0.687542 | false |
sarvex/depot-tools | third_party/boto/services/bs.py | 115 | 8115 | #!/usr/bin/env python
# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from optparse import OptionParser
from boto.services.servicedef import ServiceDef
from boto.services.submit import Submitter
from boto.services.result import ResultProcessor
import boto
import sys, os, StringIO
class BS(object):
Usage = "usage: %prog [options] config_file command"
Commands = {'reset' : 'Clear input queue and output bucket',
'submit' : 'Submit local files to the service',
'start' : 'Start the service',
'status' : 'Report on the status of the service buckets and queues',
'retrieve' : 'Retrieve output generated by a batch',
'batches' : 'List all batches stored in current output_domain'}
def __init__(self):
self.service_name = None
self.parser = OptionParser(usage=self.Usage)
self.parser.add_option("--help-commands", action="store_true", dest="help_commands",
help="provides help on the available commands")
self.parser.add_option("-a", "--access-key", action="store", type="string",
help="your AWS Access Key")
self.parser.add_option("-s", "--secret-key", action="store", type="string",
help="your AWS Secret Access Key")
self.parser.add_option("-p", "--path", action="store", type="string", dest="path",
help="the path to local directory for submit and retrieve")
self.parser.add_option("-k", "--keypair", action="store", type="string", dest="keypair",
help="the SSH keypair used with launched instance(s)")
self.parser.add_option("-l", "--leave", action="store_true", dest="leave",
help="leave the files (don't retrieve) files during retrieve command")
self.parser.set_defaults(leave=False)
self.parser.add_option("-n", "--num-instances", action="store", type="string", dest="num_instances",
help="the number of launched instance(s)")
self.parser.set_defaults(num_instances=1)
self.parser.add_option("-i", "--ignore-dirs", action="append", type="string", dest="ignore",
help="directories that should be ignored by submit command")
self.parser.add_option("-b", "--batch-id", action="store", type="string", dest="batch",
help="batch identifier required by the retrieve command")
def print_command_help(self):
print '\nCommands:'
for key in self.Commands.keys():
print ' %s\t\t%s' % (key, self.Commands[key])
def do_reset(self):
iq = self.sd.get_obj('input_queue')
if iq:
print 'clearing out input queue'
i = 0
m = iq.read()
while m:
i += 1
iq.delete_message(m)
m = iq.read()
print 'deleted %d messages' % i
ob = self.sd.get_obj('output_bucket')
ib = self.sd.get_obj('input_bucket')
if ob:
if ib and ob.name == ib.name:
return
print 'delete generated files in output bucket'
i = 0
for k in ob:
i += 1
k.delete()
print 'deleted %d keys' % i
def do_submit(self):
if not self.options.path:
self.parser.error('No path provided')
if not os.path.exists(self.options.path):
self.parser.error('Invalid path (%s)' % self.options.path)
s = Submitter(self.sd)
t = s.submit_path(self.options.path, None, self.options.ignore, None,
None, True, self.options.path)
print 'A total of %d files were submitted' % t[1]
print 'Batch Identifier: %s' % t[0]
def do_start(self):
ami_id = self.sd.get('ami_id')
instance_type = self.sd.get('instance_type', 'm1.small')
security_group = self.sd.get('security_group', 'default')
if not ami_id:
self.parser.error('ami_id option is required when starting the service')
ec2 = boto.connect_ec2()
if not self.sd.has_section('Credentials'):
self.sd.add_section('Credentials')
self.sd.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id)
self.sd.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key)
s = StringIO.StringIO()
self.sd.write(s)
rs = ec2.get_all_images([ami_id])
img = rs[0]
r = img.run(user_data=s.getvalue(), key_name=self.options.keypair,
max_count=self.options.num_instances,
instance_type=instance_type,
security_groups=[security_group])
print 'Starting AMI: %s' % ami_id
print 'Reservation %s contains the following instances:' % r.id
for i in r.instances:
print '\t%s' % i.id
def do_status(self):
iq = self.sd.get_obj('input_queue')
if iq:
print 'The input_queue (%s) contains approximately %s messages' % (iq.id, iq.count())
ob = self.sd.get_obj('output_bucket')
ib = self.sd.get_obj('input_bucket')
if ob:
if ib and ob.name == ib.name:
return
total = 0
for k in ob:
total += 1
print 'The output_bucket (%s) contains %d keys' % (ob.name, total)
def do_retrieve(self):
if not self.options.path:
self.parser.error('No path provided')
if not os.path.exists(self.options.path):
self.parser.error('Invalid path (%s)' % self.options.path)
if not self.options.batch:
self.parser.error('batch identifier is required for retrieve command')
s = ResultProcessor(self.options.batch, self.sd)
s.get_results(self.options.path, get_file=(not self.options.leave))
def do_batches(self):
d = self.sd.get_obj('output_domain')
if d:
print 'Available Batches:'
rs = d.query("['type'='Batch']")
for item in rs:
print ' %s' % item.name
else:
self.parser.error('No output_domain specified for service')
def main(self):
self.options, self.args = self.parser.parse_args()
if self.options.help_commands:
self.print_command_help()
sys.exit(0)
if len(self.args) != 2:
self.parser.error("config_file and command are required")
self.config_file = self.args[0]
self.sd = ServiceDef(self.config_file)
self.command = self.args[1]
if hasattr(self, 'do_%s' % self.command):
method = getattr(self, 'do_%s' % self.command)
method()
else:
self.parser.error('command (%s) not recognized' % self.command)
if __name__ == "__main__":
bs = BS()
bs.main()
| bsd-3-clause | 3,883,919,502,657,512,400 | 44.335196 | 108 | 0.582994 | false |
marco-lancini/Showcase | django/contrib/localflavor/jp/forms.py | 333 | 1211 | """
JP-specific Form helpers
"""
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.forms.fields import RegexField, Select
class JPPostalCodeField(RegexField):
"""
A form field that validates its input is a Japanese postcode.
Accepts 7 digits, with or without a hyphen.
"""
default_error_messages = {
'invalid': _('Enter a postal code in the format XXXXXXX or XXX-XXXX.'),
}
def __init__(self, *args, **kwargs):
super(JPPostalCodeField, self).__init__(r'^\d{3}-\d{4}$|^\d{7}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self, value):
"""
Validates the input and returns a string that contains only numbers.
Returns an empty string for empty values.
"""
v = super(JPPostalCodeField, self).clean(value)
return v.replace('-', '')
class JPPrefectureSelect(Select):
"""
A Select widget that uses a list of Japanese prefectures as its choices.
"""
def __init__(self, attrs=None):
from jp_prefectures import JP_PREFECTURES
super(JPPrefectureSelect, self).__init__(attrs, choices=JP_PREFECTURES)
| mit | 2,722,479,080,661,496,000 | 31.72973 | 79 | 0.648225 | false |
Athrun29/horizon | openstack_dashboard/contrib/trove/content/database_backups/views.py | 19 | 4383 | # Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for displaying database backups.
"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables as horizon_tables
from horizon.utils import filters
from horizon import views as horizon_views
from horizon import workflows as horizon_workflows
from openstack_dashboard.contrib.trove import api
from openstack_dashboard.contrib.trove.content.database_backups import tables
from openstack_dashboard.contrib.trove.content.database_backups \
import workflows
class IndexView(horizon_tables.DataTableView):
table_class = tables.BackupsTable
template_name = 'project/database_backups/index.html'
page_title = _("Backups")
def _get_extra_data(self, backup):
"""Apply extra info to the backup."""
instance_id = backup.instance_id
# TODO(rdopieralski) It's not clear where this attribute is supposed
# to come from. At first glance it looks like it will always be {}.
if not hasattr(self, '_instances'):
self._instances = {}
instance = self._instances.get(instance_id)
if instance is None:
try:
instance = api.trove.instance_get(self.request, instance_id)
except Exception:
instance = _('Not Found')
backup.instance = instance
return backup
def get_data(self):
# TODO(rmyers) Add pagination support after it is available
# https://blueprints.launchpad.net/trove/+spec/paginate-backup-list
try:
backups = api.trove.backup_list(self.request)
backups = map(self._get_extra_data, backups)
except Exception:
backups = []
msg = _('Error getting database backup list.')
exceptions.handle(self.request, msg)
return backups
class BackupView(horizon_workflows.WorkflowView):
workflow_class = workflows.CreateBackup
template_name = "project/database_backups/backup.html"
page_title = _("Backup Database")
def get_context_data(self, **kwargs):
context = super(BackupView, self).get_context_data(**kwargs)
context["instance_id"] = kwargs.get("instance_id")
self._instance = context['instance_id']
return context
class DetailView(horizon_views.APIView):
template_name = "project/database_backups/details.html"
page_title = _("Backup Details: {{ backup.name }}")
def get_data(self, request, context, *args, **kwargs):
backup_id = kwargs.get("backup_id")
try:
backup = api.trove.backup_get(request, backup_id)
created_at = filters.parse_isotime(backup.created)
updated_at = filters.parse_isotime(backup.updated)
backup.duration = updated_at - created_at
except Exception:
redirect = reverse('horizon:project:database_backups:index')
msg = _('Unable to retrieve details for backup: %s') % backup_id
exceptions.handle(self.request, msg, redirect=redirect)
try:
if(hasattr(backup, 'parent_id') and backup.parent_id is not None):
backup.parent = api.trove.backup_get(request, backup.parent_id)
except Exception:
redirect = reverse('horizon:project:database_backups:index')
msg = (_('Unable to retrieve details for parent backup: %s')
% backup.parent_id)
exceptions.handle(self.request, msg, redirect=redirect)
try:
instance = api.trove.instance_get(request, backup.instance_id)
except Exception:
instance = None
context['backup'] = backup
context['instance'] = instance
return context
| apache-2.0 | 8,503,472,768,327,153,000 | 38.845455 | 79 | 0.660735 | false |
JioCloud/horizon | openstack_dashboard/dashboards/admin/flavors/panel.py | 46 | 1059 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.admin import dashboard
class Flavors(horizon.Panel):
name = _("Flavors")
slug = 'flavors'
permissions = ('openstack.services.compute',)
dashboard.Admin.register(Flavors)
| apache-2.0 | -1,642,987,502,206,253,000 | 32.09375 | 78 | 0.744098 | false |
croxis/SpaceDrive | spacedrive/renderpipeline/rpcore/render_target.py | 1 | 14841 | """
RenderTarget
Copyright (c) 2015 tobspr <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function, division
from panda3d.core import GraphicsOutput, Texture, AuxBitplaneAttrib, NodePath
from panda3d.core import Vec4, TransparencyAttrib, ColorWriteAttrib, SamplerState
from panda3d.core import WindowProperties, FrameBufferProperties, GraphicsPipe
from panda3d.core import LVecBase2i
from rplibs.six.moves import range
from rplibs.six import iterkeys, itervalues
from rpcore.globals import Globals
from rpcore.rpobject import RPObject
from rpcore.util.post_process_region import PostProcessRegion
__all__ = "RenderTarget",
__version__ = "2.0"
class setter(object): # pylint: disable=C0103
""" Setter only property """
def __init__(self, func):
self.__func = func
self.__doc__ = func.__doc__
def __set__(self, name, value):
return self.__func(name, value)
class RenderTarget(RPObject):
""" Second version of the RenderTarget library, provides functions
to easily setup buffers in Panda3D. """
NUM_ALLOCATED_BUFFERS = 0
USE_R11G11B10 = True
REGISTERED_TARGETS = []
def __init__(self, name="target"):
RPObject.__init__(self, name)
self._targets = {}
self._color_bits = (0, 0, 0, 0)
self._aux_bits = 8
self._aux_count = 0
self._depth_bits = 0
self._size = LVecBase2i(-1, -1)
self._source_window = Globals.base.win
self._source_region = None
self._active = False
self._internal_buffer = None
# Public attributes
self.engine = Globals.base.graphicsEngine
self.support_transparency = False
self.create_default_region = True
# Disable all global clears, since they are not required
for region in Globals.base.win.get_display_regions():
region.disable_clears()
def add_color_attachment(self, bits=8, alpha=False):
""" Adds a new color attachment with the given amount of bits, bits can
be either a single int or a tuple determining the bits. If bits is a
single int, alpha determines whether alpha bits are requested """
self._targets["color"] = Texture(self.debug_name + "_color")
if isinstance(bits, (list, tuple)):
self._color_bits = (bits[0], bits[1], bits[2], bits[3] if len(bits) == 4 else 0)
else:
self._color_bits = ((bits, bits, bits, (bits if alpha else 0)))
def add_depth_attachment(self, bits=32):
""" Adds a depth attachment wit the given amount of bits """
self._targets["depth"] = Texture(self.debug_name + "_depth")
self._depth_bits = bits
def add_aux_attachment(self, bits=8):
""" Adds a new aux attachment with the given amount of bits. The amount
of bits passed overrides all previous bits set, since all aux textures
have to have the same amount of bits. """
self._aux_bits = bits
self._aux_count += 1
def add_aux_attachments(self, bits=8, count=1):
""" Adds n new aux attachments, with the given amount of bits. All
previously set aux bits are overriden, since all aux textures have to
have the same amount of bits """
self._aux_bits = bits
self._aux_count += count
@setter
def size(self, *args):
""" Sets the render target size. This can be either a single integer,
in which case it applies to both dimensions. Negative integers cause
the render target to be proportional to the screen size, i.e. a value
of -4 produces a quarter resolution target, a value of -2 a half
resolution target, and a value of -1 a full resolution target
(the default). """
self._size = LVecBase2i(*args)
@property
def active(self):
""" Returns whether the target is currently active """
return self._active
@active.setter
def active(self, flag):
""" Sets whether the target is active, this just propagates the active
flag to all display regions """
for region in self._internal_buffer.get_display_regions():
region.set_active(flag)
@property
def color_tex(self):
""" Returns the color attachment if present """
return self._targets["color"]
@property
def depth_tex(self):
""" Returns the depth attachment if present """
return self._targets["depth"]
@property
def aux_tex(self):
""" Returns a list of aux textures, can be used like target.aux_tex[2],
notice the indices start at zero, so the first target has the index 0. """
return [self._targets[i] for i in sorted(iterkeys(self._targets)) if i.startswith("aux_")]
def set_shader_input(self, *args, **kwargs):
""" Sets a shader input available to the target """
if self.create_default_region:
self._source_region.set_shader_input(*args, **kwargs)
@setter
def shader(self, shader_obj):
""" Sets a shader on the target """
if not shader_obj:
self.error("shader must not be None!")
return
self._source_region.set_shader(shader_obj)
@property
def internal_buffer(self):
""" Returns a handle to the internal GraphicsBuffer object """
return self._internal_buffer
@property
def targets(self):
""" Returns the dictionary of attachments, whereas the key is the name
of the attachment and the value is the Texture handle of the attachment """
return self._targets
@property
def region(self):
""" Returns the internally used PostProcessRegion """
return self._source_region
def prepare_render(self, camera_np):
""" Prepares to render a scene """
self.create_default_region = False
self._create_buffer()
self._source_region = self._internal_buffer.get_display_region(0)
if camera_np:
initial_state = NodePath("rtis")
initial_state.set_state(camera_np.node().get_initial_state())
if self._aux_count:
initial_state.set_attrib(AuxBitplaneAttrib.make(self._aux_bits), 20)
initial_state.set_attrib(TransparencyAttrib.make(TransparencyAttrib.M_none), 20)
if max(self._color_bits) == 0:
initial_state.set_attrib(ColorWriteAttrib.make(ColorWriteAttrib.C_off), 20)
# Disable existing regions of the camera
for region in camera_np.node().get_display_regions():
region.set_active(False)
# Remove the existing display region of the camera
for region in self._source_window.get_display_regions():
if region.get_camera() == camera_np:
self._source_window.remove_display_region(region)
camera_np.node().set_initial_state(initial_state.get_state())
self._source_region.set_camera(camera_np)
self._internal_buffer.disable_clears()
self._source_region.disable_clears()
self._source_region.set_active(True)
self._source_region.set_sort(20)
# Reenable depth-clear, usually desireable
self._source_region.set_clear_depth_active(True)
self._source_region.set_clear_depth(1.0)
self._active = True
def prepare_buffer(self):
""" Prepares the target to render to an offscreen buffer """
self._create_buffer()
self._active = True
def present_on_screen(self):
""" Prepares the target to render on the main window, to present the
final rendered image """
self._source_region = PostProcessRegion.make(self._source_window)
self._source_region.set_sort(5)
def cleanup(self):
""" Deletes this buffer, restoring the previous state """
self._internal_buffer.clear_render_textures()
self.engine.remove_window(self._internal_buffer)
self._active = False
for target in itervalues(self._targets):
target.release_all()
def set_clear_color(self, *args):
""" Sets the clear color """
self._internal_buffer.set_clear_color_active(True)
self._internal_buffer.set_clear_color(Vec4(*args))
@setter
def instance_count(self, count):
""" Sets the instance count """
self._source_region.set_instance_count(count)
def _create_buffer(self):
""" Internal method to create the buffer object """
if self._source_window == Globals.base.win:
w, h = Globals.resolution.x, Globals.resolution.y
else:
w, h = self._source_window.get_x_size(), self._source_window.get_y_size()
if self._size.x < 0:
self._size.x = (w - self._size.x - 1) // (-self._size.x)
if self._size.y < 0:
self._size.y = (h - self._size.y - 1) // (-self._size.y)
if not self._create():
self.error("Failed to create buffer!")
return False
if self.create_default_region:
self._source_region = PostProcessRegion.make(self._internal_buffer)
if max(self._color_bits) == 0:
self._source_region.set_attrib(ColorWriteAttrib.make(ColorWriteAttrib.M_none), 1000)
def _setup_textures(self):
""" Prepares all bound textures """
for i in range(self._aux_count):
self._targets["aux_{}".format(i)] = Texture(
self.debug_name + "_aux{}".format(i))
for tex in itervalues(self._targets):
tex.set_wrap_u(SamplerState.WM_clamp)
tex.set_wrap_v(SamplerState.WM_clamp)
tex.set_anisotropic_degree(0)
tex.set_x_size(self._size.x)
tex.set_y_size(self._size.y)
tex.set_minfilter(SamplerState.FT_linear)
tex.set_magfilter(SamplerState.FT_linear)
def _make_properties(self):
""" Creates the window and buffer properties """
window_props = WindowProperties.size(self._size.x, self._size.y)
buffer_props = FrameBufferProperties()
if self._color_bits == (16, 16, 16, 0):
if RenderTarget.USE_R11G11B10:
buffer_props.set_rgba_bits(11, 11, 10, 0)
else:
buffer_props.set_rgba_bits(*self._color_bits)
elif 8 in self._color_bits:
# When specifying 8 bits, specify 1 bit, this is a workarround
# to a legacy logic in panda
buffer_props.set_rgba_bits(*[i if i != 8 else 1 for i in self._color_bits])
else:
buffer_props.set_rgba_bits(*self._color_bits)
buffer_props.set_accum_bits(0)
buffer_props.set_stencil_bits(0)
buffer_props.set_back_buffers(0)
buffer_props.set_coverage_samples(0)
buffer_props.set_depth_bits(self._depth_bits)
if self._depth_bits == 32:
buffer_props.set_float_depth(True)
buffer_props.set_float_color(max(self._color_bits) > 8)
buffer_props.set_force_hardware(True)
buffer_props.set_multisamples(0)
buffer_props.set_srgb_color(False)
buffer_props.set_stereo(False)
buffer_props.set_stencil_bits(0)
if self._aux_bits == 8:
buffer_props.set_aux_rgba(self._aux_count)
elif self._aux_bits == 16:
buffer_props.set_aux_hrgba(self._aux_count)
elif self._aux_bits == 32:
buffer_props.set_aux_float(self._aux_count)
else:
self.error("Invalid aux bits")
return window_props, buffer_props
def _create(self):
""" Creates the internally used buffer """
self._setup_textures()
window_props, buffer_props = self._make_properties()
self._internal_buffer = self.engine.make_output(
self._source_window.get_pipe(), self.debug_name, 1,
buffer_props, window_props, GraphicsPipe.BF_refuse_window,
self._source_window.get_gsg(), self._source_window)
if not self._internal_buffer:
self.error("Failed to create buffer")
return
if self._depth_bits:
self._internal_buffer.add_render_texture(
self.depth_tex, GraphicsOutput.RTM_bind_or_copy,
GraphicsOutput.RTP_depth)
if max(self._color_bits) > 0:
self._internal_buffer.add_render_texture(
self.color_tex, GraphicsOutput.RTM_bind_or_copy,
GraphicsOutput.RTP_color)
aux_prefix = {
8: "RTP_aux_rgba_{}",
16: "RTP_aux_hrgba_{}",
32: "RTP_aux_float_{}",
}[self._aux_bits]
for i in range(self._aux_count):
target_mode = getattr(GraphicsOutput, aux_prefix.format(i))
self._internal_buffer.add_render_texture(
self.aux_tex[i], GraphicsOutput.RTM_bind_or_copy, target_mode)
sort = -300 + RenderTarget.NUM_ALLOCATED_BUFFERS * 10
RenderTarget.NUM_ALLOCATED_BUFFERS += 1
self._internal_buffer.set_sort(sort)
self._internal_buffer.disable_clears()
self._internal_buffer.get_display_region(0).disable_clears()
self._internal_buffer.get_overlay_display_region().disable_clears()
self._internal_buffer.get_overlay_display_region().set_active(False)
RenderTarget.REGISTERED_TARGETS.append(self)
return True
| mit | 2,700,231,084,393,569,300 | 37.576 | 100 | 0.61128 | false |
catapult-project/catapult | third_party/gsutil/gslib/__main__.py | 4 | 33440 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for Google Cloud Storage command line tool."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import datetime
import errno
import getopt
import logging
import os
import re
import signal
import socket
import sys
import textwrap
import traceback
import six
from six.moves import configparser
from six.moves import range
from gslib.utils.version_check import check_python_version_support
# Load the gsutil version number and append it to boto.UserAgent so the value is
# set before anything instantiates boto. This has to run after THIRD_PARTY_DIR
# is modified (done in gsutil.py) but before any calls are made that would cause
# boto.s3.Connection to be loaded - otherwise the Connection class would end up
# with a static reference to the pre-modified version of the UserAgent field,
# so boto requests would not include gsutil/version# in the UserAgent string.
import boto
import gslib
from gslib.utils import system_util
boto.UserAgent += ' gsutil/%s (%s)' % (gslib.VERSION, sys.platform)
if system_util.InvokedViaCloudSdk():
boto.UserAgent += ' google-cloud-sdk'
if system_util.CloudSdkVersion():
boto.UserAgent += '/%s' % system_util.CloudSdkVersion()
# pylint: disable=g-import-not-at-top
# This module also imports boto, and will override the UserAgent global variable
# if imported above.
from gslib import metrics
if metrics.MetricsCollector.IsDisabled():
boto.UserAgent += ' analytics/disabled'
else:
boto.UserAgent += ' analytics/enabled'
# pylint: disable=g-bad-import-order
import httplib2
import oauth2client
from google_reauth import reauth_creds
from gslib import wildcard_iterator
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import ProjectIdException
from gslib.cloud_api import ServiceException
from gslib.command_runner import CommandRunner
import gslib.exception
from gslib.exception import CommandException
from gslib.exception import ControlCException
import apitools.base.py.exceptions as apitools_exceptions
from gslib.utils import boto_util
from gslib.utils import constants
from gslib.utils import system_util
from gslib.utils import text_util
from gslib.sig_handling import GetCaughtSignals
from gslib.sig_handling import InitializeSignalHandling
from gslib.sig_handling import RegisterSignalHandler
CONFIG_KEYS_TO_REDACT = ['proxy', 'proxy_port', 'proxy_user', 'proxy_pass']
# We don't use the oauth2 authentication plugin directly; importing it here
# ensures that it's loaded and available by default when an operation requiring
# authentication is performed.
try:
# pylint: disable=unused-import,g-import-not-at-top
import gcs_oauth2_boto_plugin
except ImportError:
pass
DEBUG_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with debug output enabled.
*** Be aware that debug output includes authentication credentials.
*** Make sure to remove the value of the Authorization header for
*** each HTTP request printed to the console prior to posting to
*** a public medium such as a forum post or Stack Overflow.
***************************** WARNING *****************************
""".lstrip()
TRACE_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with trace output enabled.
*** Be aware that trace output includes authentication credentials
*** and may include the contents of any files accessed during the trace.
***************************** WARNING *****************************
""".lstrip()
HTTP_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with the "https_validate_certificates" config
*** variable set to False. This option should always be set to True in
*** production environments to protect against man-in-the-middle attacks,
*** and leaking of user data.
***************************** WARNING *****************************
""".lstrip()
debug_level = 0
test_exception_traces = False
# pylint: disable=unused-argument
def _CleanupSignalHandler(signal_num, cur_stack_frame):
"""Cleans up if process is killed with SIGINT, SIGQUIT or SIGTERM.
Note that this method is called after main() has been called, so it has
access to all the modules imported at the start of main().
Args:
signal_num: Unused, but required in the method signature.
cur_stack_frame: Unused, but required in the method signature.
"""
_Cleanup()
if (gslib.utils.parallelism_framework_util.
CheckMultiprocessingAvailableAndInit().is_available):
gslib.command.TeardownMultiprocessingProcesses()
def _Cleanup():
for fname in boto_util.GetCleanupFiles():
try:
os.unlink(fname)
except: # pylint: disable=bare-except
pass
def _OutputAndExit(message, exception=None):
"""Outputs message to stderr and exits gsutil with code 1.
This function should only be called in single-process, single-threaded mode.
Args:
message: Message to print to stderr.
exception: The exception that caused gsutil to fail.
"""
if debug_level >= constants.DEBUGLEVEL_DUMP_REQUESTS or test_exception_traces:
stack_trace = traceback.format_exc()
err = ('DEBUG: Exception stack trace:\n %s\n%s\n' %
(re.sub('\\n', '\n ', stack_trace), message))
else:
err = '%s\n' % message
try:
text_util.print_to_fd(err, end='', file=sys.stderr)
except UnicodeDecodeError:
# Can happen when outputting invalid Unicode filenames.
sys.stderr.write(err)
if exception:
metrics.LogFatalError(exception)
sys.exit(1)
def _OutputUsageAndExit(command_runner):
command_runner.RunNamedCommand('help')
sys.exit(1)
class GsutilFormatter(logging.Formatter):
"""A logging.Formatter that supports logging microseconds (%f)."""
def formatTime(self, record, datefmt=None):
if datefmt:
return datetime.datetime.fromtimestamp(record.created).strftime(datefmt)
# Use default implementation if datefmt is not specified.
return super(GsutilFormatter, self).formatTime(record, datefmt=datefmt)
def _ConfigureRootLogger(level=logging.INFO):
"""Similar to logging.basicConfig() except it always adds a handler."""
log_format = '%(levelname)s %(asctime)s %(filename)s] %(message)s'
date_format = '%m%d %H:%M:%S.%f'
formatter = GsutilFormatter(fmt=log_format, datefmt=date_format)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(level)
def main():
InitializeSignalHandling()
# Any modules used in initializing multiprocessing variables must be
# imported after importing gslib.__main__.
# pylint: disable=redefined-outer-name,g-import-not-at-top
import gslib.boto_translation
import gslib.command
import gslib.utils.parallelism_framework_util
# pylint: disable=unused-variable
from gcs_oauth2_boto_plugin import oauth2_client
from apitools.base.py import credentials_lib
# pylint: enable=unused-variable
if (gslib.utils.parallelism_framework_util.
CheckMultiprocessingAvailableAndInit().is_available):
# These setup methods must be called, and, on Windows, they can only be
# called from within an "if __name__ == '__main__':" block.
gslib.command.InitializeMultiprocessingVariables()
gslib.boto_translation.InitializeMultiprocessingVariables()
else:
gslib.command.InitializeThreadingVariables()
# This needs to be done after InitializeMultiprocessingVariables(), since
# otherwise we can't call CreateLock.
try:
# pylint: disable=unused-import,g-import-not-at-top
import gcs_oauth2_boto_plugin
gsutil_client_id, gsutil_client_secret = (
system_util.GetGsutilClientIdAndSecret())
gcs_oauth2_boto_plugin.oauth2_helper.SetFallbackClientIdAndSecret(
gsutil_client_id, gsutil_client_secret)
gcs_oauth2_boto_plugin.oauth2_helper.SetLock(
gslib.utils.parallelism_framework_util.CreateLock())
credentials_lib.SetCredentialsCacheFileLock(
gslib.utils.parallelism_framework_util.CreateLock())
except ImportError:
pass
global debug_level
global test_exception_traces
supported, err = check_python_version_support()
if not supported:
raise CommandException(err)
sys.exit(1)
boto_util.MonkeyPatchBoto()
system_util.MonkeyPatchHttp()
# In gsutil 4.0 and beyond, we don't use the boto library for the JSON
# API. However, we still store gsutil configuration data in the .boto
# config file for compatibility with previous versions and user convenience.
# Many users have a .boto configuration file from previous versions, and it
# is useful to have all of the configuration for gsutil stored in one place.
command_runner = CommandRunner()
if not boto_util.BOTO_IS_SECURE:
raise CommandException('\n'.join(
textwrap.wrap(
'Your boto configuration has is_secure = False. Gsutil cannot be '
'run this way, for security reasons.')))
headers = {}
parallel_operations = False
quiet = False
version = False
debug_level = 0
trace_token = None
perf_trace_token = None
test_exception_traces = False
user_project = None
# If user enters no commands just print the usage info.
if len(sys.argv) == 1:
sys.argv.append('help')
# Change the default of the 'https_validate_certificates' boto option to
# True (it is currently False in boto).
if not boto.config.has_option('Boto', 'https_validate_certificates'):
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.setbool('Boto', 'https_validate_certificates', True)
for signal_num in GetCaughtSignals():
RegisterSignalHandler(signal_num, _CleanupSignalHandler)
try:
try:
opts, args = getopt.getopt(sys.argv[1:], 'dDvo:?h:i:u:mq', [
'debug', 'detailedDebug', 'version', 'option', 'help', 'header',
'impersonate-service-account=', 'multithreaded', 'quiet',
'testexceptiontraces', 'trace-token=', 'perf-trace-token='
])
except getopt.GetoptError as e:
_HandleCommandException(CommandException(e.msg))
for o, a in opts:
if o in ('-d', '--debug'):
# Also causes boto to include httplib header output.
debug_level = constants.DEBUGLEVEL_DUMP_REQUESTS
elif o in ('-D', '--detailedDebug'):
# We use debug level 3 to ask gsutil code to output more detailed
# debug output. This is a bit of a hack since it overloads the same
# flag that was originally implemented for boto use. And we use -DD
# to ask for really detailed debugging (i.e., including HTTP payload).
if debug_level == constants.DEBUGLEVEL_DUMP_REQUESTS:
debug_level = constants.DEBUGLEVEL_DUMP_REQUESTS_AND_PAYLOADS
else:
debug_level = constants.DEBUGLEVEL_DUMP_REQUESTS
elif o in ('-?', '--help'):
_OutputUsageAndExit(command_runner)
elif o in ('-h', '--header'):
(hdr_name, _, hdr_val) = a.partition(':')
if not hdr_name:
_OutputUsageAndExit(command_runner)
headers[hdr_name.lower()] = hdr_val
elif o in ('-m', '--multithreaded'):
parallel_operations = True
elif o in ('-q', '--quiet'):
quiet = True
elif o == '-u':
user_project = a
elif o in ('-v', '--version'):
version = True
elif o in ('-i', '--impersonate-service-account'):
constants.IMPERSONATE_SERVICE_ACCOUNT = a
elif o == '--perf-trace-token':
perf_trace_token = a
elif o == '--trace-token':
trace_token = a
elif o == '--testexceptiontraces': # Hidden flag for integration tests.
test_exception_traces = True
# Avoid printing extra warnings to stderr regarding long retries by
# setting the threshold very high.
constants.LONG_RETRY_WARN_SEC = 3600
elif o in ('-o', '--option'):
(opt_section_name, _, opt_value) = a.partition('=')
if not opt_section_name:
_OutputUsageAndExit(command_runner)
(opt_section, _, opt_name) = opt_section_name.partition(':')
if not opt_section or not opt_name:
_OutputUsageAndExit(command_runner)
if not boto.config.has_section(opt_section):
boto.config.add_section(opt_section)
boto.config.set(opt_section, opt_name, opt_value)
# Now that any Boto option overrides (via `-o` args) have been parsed,
# perform initialization that depends on those options.
boto_util.configured_certs_file = (boto_util.ConfigureCertsFile())
metrics.LogCommandParams(global_opts=opts)
httplib2.debuglevel = debug_level
if trace_token:
sys.stderr.write(TRACE_WARNING)
if debug_level >= constants.DEBUGLEVEL_DUMP_REQUESTS:
sys.stderr.write(DEBUG_WARNING)
_ConfigureRootLogger(level=logging.DEBUG)
command_runner.RunNamedCommand('ver', ['-l'])
config_items = []
for config_section in ('Boto', 'GSUtil'):
try:
config_items.extend(boto.config.items(config_section))
except configparser.NoSectionError:
pass
for i in range(len(config_items)):
config_item_key = config_items[i][0]
if config_item_key in CONFIG_KEYS_TO_REDACT:
config_items[i] = (config_item_key, 'REDACTED')
sys.stderr.write('Command being run: %s\n' % ' '.join(sys.argv))
sys.stderr.write('config_file_list: %s\n' %
boto_util.GetFriendlyConfigFilePaths())
sys.stderr.write('config: %s\n' % str(config_items))
else: # Non-debug log level.
root_logger_level = logging.WARNING if quiet else logging.INFO
# oauth2client uses INFO and WARNING logging in places that would better
# correspond to gsutil's debug logging (e.g., when refreshing
# access tokens), so we bump the threshold one level higher where
# appropriate. These log levels work for regular- and quiet-level logging.
oa2c_logger_level = logging.WARNING
oa2c_multiprocess_file_storage_logger_level = logging.ERROR
_ConfigureRootLogger(level=root_logger_level)
oauth2client.client.logger.setLevel(oa2c_logger_level)
oauth2client.contrib.multiprocess_file_storage.logger.setLevel(
oa2c_multiprocess_file_storage_logger_level)
# pylint: disable=protected-access
oauth2client.transport._LOGGER.setLevel(oa2c_logger_level)
reauth_creds._LOGGER.setLevel(oa2c_logger_level)
# pylint: enable=protected-access
# TODO(reauth): Fix once reauth pins to pyu2f version newer than 0.1.3.
# Fixes pyu2f v0.1.3 bug.
import six # pylint: disable=g-import-not-at-top
six.input = six.moves.input
if not boto_util.CERTIFICATE_VALIDATION_ENABLED:
sys.stderr.write(HTTP_WARNING)
if version:
command_name = 'version'
elif not args:
command_name = 'help'
else:
command_name = args[0]
_CheckAndWarnForProxyDifferences()
# Both 1 and 2 are valid _ARGCOMPLETE values; this var tells argcomplete at
# what argv[] index the command to match starts. We want it to start at the
# value for the path to gsutil, so:
# $ gsutil <command> # Should be the 1st argument, so '1'
# $ python gsutil <command> # Should be the 2nd argument, so '2'
# Both are valid; most users invoke gsutil in the first style, but our
# integration and prerelease tests invoke it in the second style, as we need
# to specify the Python interpreter used to run gsutil.
if os.environ.get('_ARGCOMPLETE', '0') in ('1', '2'):
return _PerformTabCompletion(command_runner)
return _RunNamedCommandAndHandleExceptions(
command_runner,
command_name,
args=args[1:],
headers=headers,
debug_level=debug_level,
trace_token=trace_token,
parallel_operations=parallel_operations,
perf_trace_token=perf_trace_token,
user_project=user_project)
finally:
_Cleanup()
def _CheckAndWarnForProxyDifferences():
# If there are both boto config and environment variable config present for
# proxies, unset the environment variable and warn if it differs.
boto_port = boto.config.getint('Boto', 'proxy_port', 0)
if boto.config.get('Boto', 'proxy', None) or boto_port:
for proxy_env_var in ['http_proxy', 'https_proxy', 'HTTPS_PROXY']:
if proxy_env_var in os.environ and os.environ[proxy_env_var]:
differing_values = []
proxy_info = boto_util.ProxyInfoFromEnvironmentVar(proxy_env_var)
if proxy_info.proxy_host != boto.config.get('Boto', 'proxy', None):
differing_values.append(
'Boto proxy host: "%s" differs from %s proxy host: "%s"' %
(boto.config.get('Boto', 'proxy',
None), proxy_env_var, proxy_info.proxy_host))
if (proxy_info.proxy_user != boto.config.get('Boto', 'proxy_user',
None)):
differing_values.append(
'Boto proxy user: "%s" differs from %s proxy user: "%s"' %
(boto.config.get('Boto', 'proxy_user',
None), proxy_env_var, proxy_info.proxy_user))
if (proxy_info.proxy_pass != boto.config.get('Boto', 'proxy_pass',
None)):
differing_values.append(
'Boto proxy password differs from %s proxy password' %
proxy_env_var)
# Only compare ports if at least one is present, since the
# boto logic for selecting default ports has not yet executed.
if ((proxy_info.proxy_port or boto_port) and
proxy_info.proxy_port != boto_port):
differing_values.append(
'Boto proxy port: "%s" differs from %s proxy port: "%s"' %
(boto_port, proxy_env_var, proxy_info.proxy_port))
if differing_values:
sys.stderr.write('\n'.join(
textwrap.wrap(
'WARNING: Proxy configuration is present in both the %s '
'environment variable and boto configuration, but '
'configuration differs. boto configuration proxy values will '
'be used. Differences detected:' % proxy_env_var)))
sys.stderr.write('\n%s\n' % '\n'.join(differing_values))
# Regardless of whether the proxy configuration values matched,
# delete the environment variable so as not to confuse boto.
del os.environ[proxy_env_var]
def _HandleUnknownFailure(e):
# Called if we fall through all known/handled exceptions.
raise
_OutputAndExit(message='Failure: %s.' % e, exception=e)
def _HandleCommandException(e):
if e.informational:
_OutputAndExit(message=e.reason, exception=e)
else:
_OutputAndExit(message='CommandException: %s' % e.reason, exception=e)
# pylint: disable=unused-argument
def _HandleControlC(signal_num, cur_stack_frame):
"""Called when user hits ^C.
This function prints a brief message instead of the normal Python stack trace
(unless -D option is used).
Args:
signal_num: Signal that was caught.
cur_stack_frame: Unused.
"""
if debug_level >= 2:
stack_trace = ''.join(traceback.format_list(traceback.extract_stack()))
_OutputAndExit('DEBUG: Caught CTRL-C (signal %d) - Exception stack trace:\n'
' %s' %
(signal_num, re.sub('\\n', '\n ', stack_trace)),
exception=ControlCException())
else:
_OutputAndExit('Caught CTRL-C (signal %d) - exiting' % signal_num,
exception=ControlCException())
def _HandleSigQuit(signal_num, cur_stack_frame):
r"""Called when user hits ^\, so we can force breakpoint a running gsutil."""
import pdb # pylint: disable=g-import-not-at-top
pdb.set_trace()
def _ConstructAccountProblemHelp(reason):
"""Constructs a help string for an access control error.
Args:
reason: e.reason string from caught exception.
Returns:
Contructed help text.
"""
default_project_id = boto.config.get_value('GSUtil', 'default_project_id')
# pylint: disable=line-too-long, g-inconsistent-quotes
acct_help = (
"Your request resulted in an AccountProblem (403) error. Usually this "
"happens if you attempt to create a bucket without first having "
"enabled billing for the project you are using. Please ensure billing is "
"enabled for your project by following the instructions at "
"`Google Cloud Platform Console<https://support.google.com/cloud/answer/6158867>`. "
)
if default_project_id:
acct_help += (
"In the project overview, ensure that the Project Number listed for "
"your project matches the project ID (%s) from your boto config file. "
% default_project_id)
acct_help += (
"If the above doesn't resolve your AccountProblem, please send mail to "
"[email protected] requesting assistance, noting the exact command you "
"ran, the fact that you received a 403 AccountProblem error, and your "
"project ID. Please do not post your project ID on StackOverflow. "
"Note: It's possible to use Google Cloud Storage without enabling "
"billing if you're only listing or reading objects for which you're "
"authorized, or if you're uploading objects to a bucket billed to a "
"project that has billing enabled. But if you're attempting to create "
"buckets or upload objects to a bucket owned by your own project, you "
"must first enable billing for that project.")
return acct_help
def _CheckAndHandleCredentialException(e, args):
# Provide detail to users who have no boto config file (who might previously
# have been using gsutil only for accessing publicly readable buckets and
# objects).
if (not boto_util.HasConfiguredCredentials() and not boto.config.get_value(
'Tests', 'bypass_anonymous_access_warning', False)):
# The check above allows tests to assert that we get a particular,
# expected failure, rather than always encountering this error message
# when there are no configured credentials. This allows tests to
# simulate a second user without permissions, without actually requiring
# two separate configured users.
if system_util.InvokedViaCloudSdk():
message = '\n'.join(
textwrap.wrap(
'You are attempting to access protected data with no configured '
'credentials. Please visit '
'https://cloud.google.com/console#/project and sign up for an '
'account, and then run the "gcloud auth login" command to '
'configure gsutil to use these credentials.'))
else:
message = '\n'.join(
textwrap.wrap(
'You are attempting to access protected data with no configured '
'credentials. Please visit '
'https://cloud.google.com/console#/project and sign up for an '
'account, and then run the "gsutil config" command to configure '
'gsutil to use these credentials.'))
_OutputAndExit(message=message, exception=e)
elif (e.reason and
(e.reason == 'AccountProblem' or e.reason == 'Account disabled.' or
'account for the specified project has been disabled' in e.reason) and
','.join(args).find('gs://') != -1):
_OutputAndExit('\n'.join(
textwrap.wrap(_ConstructAccountProblemHelp(e.reason))),
exception=e)
def _RunNamedCommandAndHandleExceptions(command_runner,
command_name,
args=None,
headers=None,
debug_level=0,
trace_token=None,
parallel_operations=False,
perf_trace_token=None,
user_project=None):
"""Runs the command and handles common exceptions."""
# Note that this method is run at the end of main() and thus has access to
# all of the modules imported there.
# pylint: disable=g-import-not-at-top
try:
# Catch ^C so we can print a brief message instead of the normal Python
# stack trace. Register as a final signal handler because this handler kills
# the main gsutil process (so it must run last).
RegisterSignalHandler(signal.SIGINT, _HandleControlC, is_final_handler=True)
# Catch ^\ so we can force a breakpoint in a running gsutil.
if not system_util.IS_WINDOWS:
RegisterSignalHandler(signal.SIGQUIT, _HandleSigQuit)
return command_runner.RunNamedCommand(command_name,
args,
headers,
debug_level,
trace_token,
parallel_operations,
perf_trace_token=perf_trace_token,
collect_analytics=True,
user_project=user_project)
except AttributeError as e:
if str(e).find('secret_access_key') != -1:
_OutputAndExit(
'Missing credentials for the given URI(s). Does your '
'boto config file contain all needed credentials?',
exception=e)
else:
_OutputAndExit(message=str(e), exception=e)
except CommandException as e:
_HandleCommandException(e)
except getopt.GetoptError as e:
_HandleCommandException(CommandException(e.msg))
except boto.exception.InvalidUriError as e:
_OutputAndExit(message='InvalidUriError: %s.' % e.message, exception=e)
except gslib.exception.InvalidUrlError as e:
_OutputAndExit(message='InvalidUrlError: %s.' % e.message, exception=e)
except boto.auth_handler.NotReadyToAuthenticate:
_OutputAndExit(message='NotReadyToAuthenticate', exception=e)
except OSError as e:
# In Python 3, IOError (next except) is an alias for OSError
# Sooo... we need the same logic here
if (e.errno == errno.EPIPE or
(system_util.IS_WINDOWS and e.errno == errno.EINVAL) and
not system_util.IsRunningInteractively()):
# If we get a pipe error, this just means that the pipe to stdout or
# stderr is broken. This can happen if the user pipes gsutil to a command
# that doesn't use the entire output stream. Instead of raising an error,
# just swallow it up and exit cleanly.
sys.exit(0)
else:
_OutputAndExit(message='OSError: %s.' % e.strerror, exception=e)
except IOError as e:
if (e.errno == errno.EPIPE or
(system_util.IS_WINDOWS and e.errno == errno.EINVAL) and
not system_util.IsRunningInteractively()):
# If we get a pipe error, this just means that the pipe to stdout or
# stderr is broken. This can happen if the user pipes gsutil to a command
# that doesn't use the entire output stream. Instead of raising an error,
# just swallow it up and exit cleanly.
sys.exit(0)
else:
raise
except wildcard_iterator.WildcardException as e:
_OutputAndExit(message=e.reason, exception=e)
except ProjectIdException as e:
_OutputAndExit(
'You are attempting to perform an operation that requires a '
'project id, with none configured. Please re-run '
'gsutil config and make sure to follow the instructions for '
'finding and entering your default project id.',
exception=e)
except BadRequestException as e:
if e.reason == 'MissingSecurityHeader':
_CheckAndHandleCredentialException(e, args)
_OutputAndExit(message=e, exception=e)
except AccessDeniedException as e:
_CheckAndHandleCredentialException(e, args)
_OutputAndExit(message=e, exception=e)
except ArgumentException as e:
_OutputAndExit(message=e, exception=e)
except ServiceException as e:
_OutputAndExit(message=e, exception=e)
except oauth2client.client.HttpAccessTokenRefreshError as e:
if system_util.InvokedViaCloudSdk():
_OutputAndExit(
'Your credentials are invalid. '
'Please run\n$ gcloud auth login',
exception=e)
else:
_OutputAndExit(
'Your credentials are invalid. For more help, see '
'"gsutil help creds", or re-run the gsutil config command (see '
'"gsutil help config").',
exception=e)
except apitools_exceptions.HttpError as e:
# These should usually be retried by the underlying implementation or
# wrapped by CloudApi ServiceExceptions, but if we do get them,
# print something useful.
_OutputAndExit('HttpError: %s, %s' %
(getattr(e.response, 'status', ''), e.content or ''),
exception=e)
except socket.error as e:
if e.args[0] == errno.EPIPE:
# Retrying with a smaller file (per suggestion below) works because
# the library code send loop (in boto/s3/key.py) can get through the
# entire file and then request the HTTP response before the socket
# gets closed and the response lost.
_OutputAndExit(
'Got a "Broken pipe" error. This can happen to clients using Python '
'2.x, when the server sends an error response and then closes the '
'socket (see http://bugs.python.org/issue5542). If you are trying to '
'upload a large object you might retry with a small (say 200k) '
'object, and see if you get a more specific error code.',
exception=e)
elif e.args[0] == errno.ECONNRESET and ' '.join(args).contains('s3://'):
_OutputAndExit('\n'.join(
textwrap.wrap(
'Got a "Connection reset by peer" error. One way this can happen is '
'when copying data to/from an S3 regional bucket. If you are using a '
'regional S3 bucket you could try re-running this command using the '
'regional S3 endpoint, for example '
's3://s3-<region>.amazonaws.com/your-bucket. For details about this '
'problem see https://github.com/boto/boto/issues/2207')),
exception=e)
else:
_HandleUnknownFailure(e)
except oauth2client.client.FlowExchangeError as e:
_OutputAndExit('\n%s\n\n' % '\n'.join(
textwrap.wrap(
'Failed to retrieve valid credentials (%s). Make sure you selected and '
'pasted the ENTIRE authorization code (including any numeric prefix '
"e.g. '4/')." % e)),
exception=e)
except Exception as e: # pylint: disable=broad-except
config_paths = ', '.join(boto_util.GetFriendlyConfigFilePaths())
# Check for two types of errors related to service accounts. These errors
# appear to be the same except for their messages, but they are caused by
# different problems and both have unhelpful error messages. Moreover,
# the error type belongs to PyOpenSSL, which is not necessarily installed.
if 'mac verify failure' in str(e):
_OutputAndExit(
'Encountered an error while refreshing access token. '
'If you are using a service account,\nplease verify that the '
'gs_service_key_file_password field in your config file(s),'
'\n%s, is correct.' % config_paths,
exception=e)
elif 'asn1 encoding routines' in str(e):
_OutputAndExit(
'Encountered an error while refreshing access token. '
'If you are using a service account,\nplease verify that the '
'gs_service_key_file field in your config file(s),\n%s, is correct.' %
config_paths,
exception=e)
_HandleUnknownFailure(e)
def _PerformTabCompletion(command_runner):
"""Performs gsutil-specific tab completion for the shell."""
# argparse and argcomplete are bundled with the Google Cloud SDK.
# When gsutil is invoked from the Google Cloud SDK, both should be available.
try:
import argcomplete
import argparse
except ImportError as e:
_OutputAndExit('A library required for performing tab completion was'
' not found.\nCause: %s' % e,
exception=e)
parser = argparse.ArgumentParser(add_help=False)
command_runner.ConfigureCommandArgumentParsers(parser)
argcomplete.autocomplete(parser, exit_method=sys.exit)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 1,954,081,867,482,379,000 | 41.653061 | 90 | 0.6625 | false |
hovsepm/AutoRest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/models/usage_name.py | 16 | 1055 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UsageName(Model):
"""The Usage Names.
:param value: Gets a string describing the resource name.
:type value: str
:param localized_value: Gets a localized string describing the resource
name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(self, value=None, localized_value=None):
self.value = value
self.localized_value = localized_value
| mit | 6,670,789,685,731,214,000 | 31.96875 | 76 | 0.577251 | false |
partofthething/home-assistant | homeassistant/components/freebox/device_tracker.py | 15 | 4452 | """Support for Freebox devices (Freebox v6 and Freebox mini 4K)."""
from datetime import datetime
from typing import Dict
from homeassistant.components.device_tracker import SOURCE_TYPE_ROUTER
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import HomeAssistantType
from .const import DEFAULT_DEVICE_NAME, DEVICE_ICONS, DOMAIN
from .router import FreeboxRouter
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up device tracker for Freebox component."""
router = hass.data[DOMAIN][entry.unique_id]
tracked = set()
@callback
def update_router():
"""Update the values of the router."""
add_entities(router, async_add_entities, tracked)
router.listeners.append(
async_dispatcher_connect(hass, router.signal_device_new, update_router)
)
update_router()
@callback
def add_entities(router, async_add_entities, tracked):
"""Add new tracker entities from the router."""
new_tracked = []
for mac, device in router.devices.items():
if mac in tracked:
continue
new_tracked.append(FreeboxDevice(router, device))
tracked.add(mac)
if new_tracked:
async_add_entities(new_tracked, True)
class FreeboxDevice(ScannerEntity):
"""Representation of a Freebox device."""
def __init__(self, router: FreeboxRouter, device: Dict[str, any]) -> None:
"""Initialize a Freebox device."""
self._router = router
self._name = device["primary_name"].strip() or DEFAULT_DEVICE_NAME
self._mac = device["l2ident"]["id"]
self._manufacturer = device["vendor_name"]
self._icon = icon_for_freebox_device(device)
self._active = False
self._attrs = {}
@callback
def async_update_state(self) -> None:
"""Update the Freebox device."""
device = self._router.devices[self._mac]
self._active = device["active"]
if device.get("attrs") is None:
# device
self._attrs = {
"last_time_reachable": datetime.fromtimestamp(
device["last_time_reachable"]
),
"last_time_activity": datetime.fromtimestamp(device["last_activity"]),
}
else:
# router
self._attrs = device["attrs"]
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._mac
@property
def name(self) -> str:
"""Return the name."""
return self._name
@property
def is_connected(self):
"""Return true if the device is connected to the network."""
return self._active
@property
def source_type(self) -> str:
"""Return the source type."""
return SOURCE_TYPE_ROUTER
@property
def icon(self) -> str:
"""Return the icon."""
return self._icon
@property
def device_state_attributes(self) -> Dict[str, any]:
"""Return the attributes."""
return self._attrs
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
return {
"connections": {(CONNECTION_NETWORK_MAC, self._mac)},
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": self._manufacturer,
}
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@callback
def async_on_demand_update(self):
"""Update state."""
self.async_update_state()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register state update callback."""
self.async_update_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
self._router.signal_device_update,
self.async_on_demand_update,
)
)
def icon_for_freebox_device(device) -> str:
"""Return a device icon from its type."""
return DEVICE_ICONS.get(device["host_type"], "mdi:help-network")
| mit | 3,867,445,905,360,972,300 | 29.285714 | 86 | 0.615229 | false |
Lilykos/invenio | invenio/ext/elasticsearch/tasks.py | 13 | 1428 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""ES function to submit Celery tasks."""
from invenio.celery import celery
@celery.task
def index_records(sender, recid):
"""Celery function to index records."""
from flask import current_app
current_app.extensions.get("elasticsearch").index_records([recid])
#TODO: get_text seems async should be replaced by a signal?
import time
time.sleep(1)
current_app.extensions.get("elasticsearch").index_documents([recid])
@celery.task
def index_collections(sender, collections):
"""Celery function to index collections."""
from flask import current_app
current_app.extensions.get("elasticsearch").index_collections()
| gpl-2.0 | -8,214,721,766,243,339,000 | 34.7 | 74 | 0.737395 | false |
jaywreddy/django | django/contrib/gis/db/backends/spatialite/operations.py | 257 | 11441 | """
SQL functions reference lists:
http://www.gaia-gis.it/spatialite-2.4.0/spatialite-sql-2.4.html
http://www.gaia-gis.it/spatialite-3.0.0-BETA/spatialite-sql-3.0.0.html
http://www.gaia-gis.it/gaia-sins/spatialite-sql-4.2.1.html
"""
import re
import sys
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.operations import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.functional import cached_property
class SpatiaLiteOperations(BaseSpatialOperations, DatabaseOperations):
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = SpatiaLiteAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'Area'
centroid = 'Centroid'
collect = 'Collect'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
extent = 'Extent'
intersection = 'Intersection'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
gis_operators = {
'equals': SpatialOperator(func='Equals'),
'disjoint': SpatialOperator(func='Disjoint'),
'touches': SpatialOperator(func='Touches'),
'crosses': SpatialOperator(func='Crosses'),
'within': SpatialOperator(func='Within'),
'overlaps': SpatialOperator(func='Overlaps'),
'contains': SpatialOperator(func='Contains'),
'intersects': SpatialOperator(func='Intersects'),
'relate': SpatialOperator(func='Relate'),
# Returns true if B's bounding box completely contains A's bounding box.
'contained': SpatialOperator(func='MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains': SpatialOperator(func='MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps': SpatialOperator(func='MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as': SpatialOperator(func='Equals'),
'exact': SpatialOperator(func='Equals'),
'distance_gt': SpatialOperator(func='Distance', op='>'),
'distance_gte': SpatialOperator(func='Distance', op='>='),
'distance_lt': SpatialOperator(func='Distance', op='<'),
'distance_lte': SpatialOperator(func='Distance', op='<='),
}
@cached_property
def function_names(self):
return {
'Length': 'ST_Length',
'Reverse': 'ST_Reverse',
'Scale': 'ScaleCoords',
'Translate': 'ST_Translate' if self.spatial_version >= (3, 1, 0) else 'ShiftCoords',
'Union': 'ST_Union',
}
@cached_property
def unsupported_functions(self):
unsupported = {'BoundingCircle', 'ForceRHR', 'GeoHash', 'MemSize'}
if self.spatial_version < (3, 1, 0):
unsupported.add('SnapToGrid')
if self.spatial_version < (4, 0, 0):
unsupported.update({'Perimeter', 'Reverse'})
return unsupported
@cached_property
def spatial_version(self):
"""Determine the version of the SpatiaLite library."""
try:
version = self.spatialite_version_tuple()[1:]
except Exception as msg:
new_msg = (
'Cannot determine the SpatiaLite version for the "%s" '
'database (error was "%s"). Was the SpatiaLite initialization '
'SQL loaded on this database?') % (self.connection.settings_dict['NAME'], msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
if version < (2, 4, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions '
'2.4.0 and above')
return version
@property
def _version_greater_2_4_0_rc4(self):
if self.spatial_version >= (2, 4, 1):
return True
else:
# Spatialite 2.4.0-RC4 added AsGML and AsKML, however both
# RC2 (shipped in popular Debian/Ubuntu packages) and RC4
# report version as '2.4.0', so we fall back to feature detection
try:
self._get_spatialite_func("AsGML(GeomFromText('POINT(1 1)'))")
except DatabaseError:
return False
return True
@cached_property
def disallowed_aggregates(self):
disallowed = (aggregates.Extent3D, aggregates.MakeLine)
if self.spatial_version < (3, 0, 0):
disallowed += (aggregates.Collect, aggregates.Extent)
return disallowed
@cached_property
def gml(self):
return 'AsGML' if self._version_greater_2_4_0_rc4 else None
@cached_property
def kml(self):
return 'AsKML' if self._version_greater_2_4_0_rc4 else None
@cached_property
def geojson(self):
return 'AsGeoJSON' if self.spatial_version >= (3, 0, 0) else None
def convert_extent(self, box, srid):
"""
Convert the polygon data received from Spatialite to min/max values.
"""
if box is None:
return None
shell = Geometry(box, srid).shell
xmin, ymin = shell[0][:2]
xmax, ymax = shell[2][:2]
return (xmin, ymin, xmax, ymax)
def convert_geom(self, wkt, geo_field):
"""
Converts geometry WKT returned from a SpatiaLite aggregate.
"""
if wkt:
return Geometry(wkt, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns None because geometry columnas are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type. SpatiaLite only supports regular
cartesian-based queries (no spheroid/sphere calculations for point
geometries like PostGIS).
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('SpatiaLite does not support distance queries on '
'geometry fields with a geodetic coordinate system. '
'Distance objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
Any error occurring in this method should be handled by the caller.
"""
cursor = self.connection._cursor()
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
finally:
cursor.close()
return row[0]
def geos_version(self):
"Returns the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Returns the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def spatialite_version(self):
"Returns the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Returns the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
version = self.spatialite_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_name(self, agg_name):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteGeometryColumns
return SpatialiteGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys
return SpatialiteSpatialRefSys
def get_db_converters(self, expression):
converters = super(SpatiaLiteOperations, self).get_db_converters(expression)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
| bsd-3-clause | -1,311,533,496,000,603,400 | 37.783051 | 100 | 0.617516 | false |
amenonsen/ansible | lib/ansible/modules/cloud/vmware/_vmware_host_config_facts.py | 21 | 3727 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_config_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(vmware_host_config_info) instead.
short_description: Gathers facts about an ESXi host's advance configuration information
description:
- This module can be used to gather facts about an ESXi host's advance configuration information when ESXi hostname or Cluster name is given.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster from which the ESXi host belong to.
- If C(esxi_hostname) is not given, this parameter is required.
type: str
esxi_hostname:
description:
- ESXi hostname to gather facts from.
- If C(cluster_name) is not given, this parameter is required.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather facts about all ESXi Host in given Cluster
vmware_host_config_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
delegate_to: localhost
- name: Gather facts about ESXi Host
vmware_host_config_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
'''
RETURN = r'''
hosts_facts:
description:
- dict with hostname as key and dict with host config facts
returned: always
type: dict
sample: {
"10.76.33.226": {
"Annotations.WelcomeMessage": "",
"BufferCache.FlushInterval": 30000,
"BufferCache.HardMaxDirty": 95,
"BufferCache.PerFileHardMaxDirty": 50,
"BufferCache.SoftMaxDirty": 15,
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class VmwareConfigFactsManager(PyVmomi):
def __init__(self, module):
super(VmwareConfigFactsManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
def gather_host_facts(self):
hosts_facts = {}
for host in self.hosts:
host_facts = {}
for option in host.configManager.advancedOption.QueryOptions():
host_facts[option.key] = option.value
hosts_facts[host.name] = host_facts
return hosts_facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True
)
vmware_host_config = VmwareConfigFactsManager(module)
module.exit_json(changed=False, hosts_facts=vmware_host_config.gather_host_facts())
if __name__ == "__main__":
main()
| gpl-3.0 | 122,118,836,678,814,020 | 29.056452 | 141 | 0.661926 | false |
dvliman/jaikuengine | .google_appengine/lib/django-1.3/django/contrib/messages/storage/fallback.py | 627 | 2171 | from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import CookieStorage
from django.contrib.messages.storage.session import SessionStorage
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
self.storages = [storage_class(*args, **kwargs)
for storage_class in self.storage_classes]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
| apache-2.0 | 6,933,060,659,648,638,000 | 39.203704 | 78 | 0.60433 | false |
mzbenami/pyeapi | pyeapi/api/varp.py | 1 | 7989 | #
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Module for managing the VARP configuration in EOS
This module provides an API for configuring VARP resources using
EOS and eAPI.
Arguments:
name (string): The interface name the configuration is in reference
to. The interface name is the full interface identifier
address (string): The interface IP address in the form of
address/len.
mtu (integer): The interface MTU value. The MTU value accepts
integers in the range of 68 to 65535 bytes
"""
import re
from pyeapi.api import EntityCollection
class Varp(EntityCollection):
def __init__(self, *args, **kwargs):
super(Varp, self).__init__(*args, **kwargs)
self._interfaces = None
@property
def interfaces(self):
if self._interfaces is not None:
return self._interfaces
self._interfaces = VarpInterfaces(self.node)
return self._interfaces
def get(self):
"""Returns the current VARP configuration
The Varp resource returns the following:
* mac_address (str): The virtual-router mac address
* interfaces (dict): A list of the interfaces that have a
virtual-router address configured.
Return:
A Python dictionary object of key/value pairs that represents
the current configuration of the node. If the specified
interface does not exist then None is returned::
{
"mac_address": "aa:bb:cc:dd:ee:ff",
"interfaces": {
"Vlan100": {
"addresses": [ "1.1.1.1", "2.2.2.2"]
},
"Vlan200": [...]
}
}
"""
resource = dict()
resource.update(self._parse_mac_address())
resource.update(self._parse_interfaces())
return resource
def _parse_mac_address(self):
mac_address_re = re.compile(r'^ip\svirtual-router\smac-address\s'
r'((?:[a-f0-9]{2}:){5}[a-f0-9]{2})$', re.M)
mac = mac_address_re.search(self.config)
mac = mac.group(1) if mac else None
return dict(mac_address=mac)
def _parse_interfaces(self):
interfaces = VarpInterfaces(self.node).getall()
return dict(interfaces=interfaces)
def set_mac_address(self, mac_address=None, default=False, disable=False):
""" Sets the virtual-router mac address
This method will set the switch virtual-router mac address. If a
virtual-router mac address already exists it will be overwritten.
Args:
mac_address (string): The mac address that will be assigned as
the virtual-router mac address. This should be in the format,
aa:bb:cc:dd:ee:ff.
default (bool): Sets the virtual-router mac address to the system
default (which is to remove the configuration line).
disabel (bool): Negates the virtual-router mac address using
the system no configuration command
Returns:
True if the set operation succeeds otherwise False.
"""
if not default and not disable:
if mac_address is not None:
# Check to see if mac_address matches expected format
if not re.match(r'(?:[a-f0-9]{2}:){5}[a-f0-9]{2}', mac_address):
raise ValueError('mac_address must be formatted like:'
'aa:bb:cc:dd:ee:ff')
else:
raise ValueError('mac_address must be a properly formatted '
'address string')
commands = self.command_builder('ip virtual-router mac-address',
value=mac_address, default=default,
disable=disable)
return self.configure(commands)
class VarpInterfaces(EntityCollection):
"""The VarpInterfaces class helps manage interfaces with
virtual-router configuration.
"""
def get(self, name):
interface_re = r'interface\s%s' % name
config = self.get_block(interface_re)
if not config:
return None
resource = dict(addresses=dict())
resource.update(self._parse_virtual_addresses(config))
return resource
def getall(self):
resources = dict()
interfaces_re = re.compile(r'^interface\s(Vlan\d+)$', re.M)
for name in interfaces_re.findall(self.config):
interface_detail = self.get(name)
if interface_detail:
resources[name] = interface_detail
return resources
def set_addresses(self, name, addresses=None, default=False, disable=False):
commands = list()
commands.append('interface %s' % name)
if default:
commands.append('default ip virtual-router address')
elif disable:
commands.append('no ip virtual-router address')
elif addresses is not None:
try:
current_addresses = self.get(name)['addresses']
except:
current_addresses = []
# remove virtual-router addresses not present in addresses list
for entry in set(current_addresses).difference(addresses):
commands.append('no ip virtual-router address %s' % entry)
# add new set virtual-router addresses that werent present
for entry in set(addresses).difference(current_addresses):
commands.append('ip virtual-router address %s' % entry)
else:
commands.append('no ip virtual-router address')
return self.configure(commands) if commands else True
def _parse_virtual_addresses(self, config):
virt_ip_re = re.compile(r'^\s+ip\svirtual-router\saddress\s(\S+)$',
re.M)
return dict(addresses=virt_ip_re.findall(config))
def instance(node):
"""Returns an instance of Ipinterfaces
This method will create and return an instance of the Varp object
passing the value of node to the instance. This function is required
for the resource to be autoloaded by the Node object
Args:
node (Node): The node argument provides an instance of Node to
the Varp instance
"""
return Varp(node)
| bsd-3-clause | -437,381,868,914,811,700 | 37.408654 | 80 | 0.623357 | false |
andrewyoung1991/abjad | abjad/tools/lilypondparsertools/ContextSpeccedMusic.py | 2 | 2353 | # -*- encoding: utf-8 -*-
from abjad.tools import stringtools
from abjad.tools.lilypondparsertools.Music import Music
class ContextSpeccedMusic(Music):
r'''Abjad model of the LilyPond AST context-specced music node.
'''
### CLASS VARIABLES ###
__slots__ = (
#'context',
'context_name',
'music',
'optional_id',
'optional_context_mod',
)
### INITIALIZER ###
def __init__(
self,
context_name=None,
optional_id=None,
optional_context_mod=None,
music=None,
):
from abjad.tools import lilypondparsertools
context_name = context_name or ''
music = music or lilypondparsertools.SequentialMusic()
assert stringtools.is_string(context_name)
assert isinstance(music, Music)
self.context_name = context_name
self.optional_id = optional_id
self.optional_context_mod = optional_context_mod
self.music = music
### PUBLIC METHODS ###
def construct(self):
r'''Constructs context.
Returns context.
'''
if self.context_name in self.known_contexts:
context = known_contexts[self.context_name]([])
else:
message = 'context type not supported: {}.'
message = message.format(self.context_name)
raise Exception(message)
if self.optional_id is not None:
context.name = self.optional_id
if self.optional_context_mod is not None:
for x in self.optional_context_mod:
print(x)
# TODO: implement context modifications on contexts
pass
if isinstance(self.music, lilypondparsertools.SimultaneousMusic):
context.is_simultaneous = True
context.extend(music.construct())
return context
### PUBLIC PROPERTIES ###
@property
def known_contexts(self):
r'''Known contexts.
Returns dictionary.
'''
return {
'ChoirStaff': scoretools.StaffGroup,
'GrandStaff': scoretools.StaffGroup,
'PianoStaff': scoretools.StaffGroup,
'Score': scoretools.Score,
'Staff': scoretools.Staff,
'StaffGroup': scoretools.StaffGroup,
'Voice': scoretools.Voice,
}
| gpl-3.0 | 7,994,195,174,127,207,000 | 27.011905 | 73 | 0.58096 | false |
nalle/dyndd | dyndd/controller.py | 1 | 9149 | from twisted.internet import reactor, defer
from twisted.python import failure
from twisted.names import client, dns, error, server
import ConfigParser
import logging
import os
import time
import datetime
import socket
import signal
from twisted.internet.address import IPv4Address
import MySQLdb
class DynDDServerFactory(server.DNSServerFactory):
def handleQuery(self, message, protocol, address):
if protocol.transport.socket.type == socket.SOCK_STREAM:
self.peer_address = protocol.transport.getPeer()
elif protocol.transport.socket.type == socket.SOCK_DGRAM:
self.peer_address = IPv4Address('UDP', *address)
else:
print "Unexpected socket type %r" % protocol.transport.socket.type
for resolver in self.resolver.resolvers:
if hasattr(resolver, 'peer_address'):
resolver.peer_address = self.peer_address
return server.DNSServerFactory.handleQuery(self, message
protocol, address)
class DynDDController(object):
def __init__(self):
self.config = ConfigParser.ConfigParser()
self.config.read("/etc/dyndd/dyndd.conf")
if self.config.get("global", "LogLevel").lower() == "fatal":
level = logging.FATAL
elif self.config.get("global", "LogLevel").lower() == "info":
level = logging.INFO
elif self.config.get("global", "LogLevel").lower() == "warn":
level = logging.WARN
elif self.config.get("global", "LogLevel").lower() == "error":
level = logging.ERROR
elif self.config.get("global", "LogLevel").lower() == "debug":
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s',
level=logging.DEBUG,
filename=self.config.get("global",
"ApplicationLog"))
self.connect_mysql()
signal.signal(1, self._reload)
self._peer_address = None
@property
def peer_address(self):
return self._peer_address
@peer_address.setter
def peer_address(self, value):
self._peer_address = value
def _reload(self, signum, handler):
self.config.read("/etc/dyndd/dyndd.conf")
def connect_mysql(self):
try:
self.db = MySQLdb.connect(host=self.config.get("global",
"DBHost"),
user=self.config.get("global",
"DBUser"),
passwd=self.config.get("global",
"DBPass"),
db=self.config.get("global",
"DBSchema"))
self.cursor = self.db.cursor(MySQLdb.cursors.DictCursor)
logging.info("MySQL connection successfully established to %s"
% self.config.get("global", "DBHost"))
except Exception, e:
logging.error("MySQL connection failed to %s, retrying (%s)"
% (self.config.get('global', 'DBHost'), str(e)))
self.connect_mysql()
return True
def ping_mysql(self):
try:
self.db.ping()
except Exception, e:
logging.error("Connection to MySQL %s lost, retrying (%s)"
% (self.config.get('global', 'DBHost'), (str(e))))
self.connect_mysql()
return True
def _type(self, record_type):
if record_type == dns.A:
return "A"
if record_type == dns.AAAA:
return "AAAA"
if record_type == dns.NS:
return "NS"
if record_type == dns.SOA:
return "SOA"
if record_type == dns.MX:
return "MX"
else:
return ""
def _dynamicResponseRequired(self, query):
if query.type == dns.NS or query.type == dns.SOA:
self.cursor.execute("SELECT domain, nameservers, contact
FROM domains WHERE domain='%s'"
% query.name.name)
self.db.commit()
else:
self.cursor.execute("SELECT hostname, ip, recordtype
FROM dnsrecords WHERE hostname='%s'
AND recordtype='%s'"
% (str(query.name.name),
self._type(query.type)))
self.db.commit()
self.lookup_result = self.cursor.fetchall()
if self.cursor.rowcount > 0:
self.lookup_result = self.lookup_result[0]
return self.lookup_result
else:
return ""
def _Record_A(self, query):
answers = [dns.RRHeader(
name=query.name.name, type=query.type,
payload=dns.Record_A(address=self.lookup_result['ip'],
ttl=5), auth=True)]
return answers, [], []
def _Record_AAAA(self, query):
answers = [dns.RRHeader(
name=query.name.name, type=query.type,
payload=dns.Record_AAAA(address=self.lookup_result['ip'],
ttl=5), auth=True)]
return answers, [], []
def _Record_NS(self, query):
answers = []
for nameserver in self.lookup_result['nameservers'].split(','):
answers.append(dns.RRHeader(
name=query.name.name, type=query.type,
payload=dns.Record_NS(name=nameserver,
ttl=5), auth=True))
return answers, [], []
def _Record_MX(self, query):
answers = [dns.RRHeader(
name=query.name.name, type=query.type,
payload=dns.Record_MX(10, self.lookup_result['ip'],
ttl=5), auth=True)]
return answers, [], []
def _Record_SOA(self, query):
d = self.lookup_result['domain']
answers = [dns.RRHeader(
name=query.name.name, type=dns.SOA,
payload=dns.Record_SOA(mname=self.lookup_result['domain'],
rname="hostmaster." % d,
serial=int(time.time()),
refresh=3600,
ttl=5), auth=True)]
return answers, [], []
def _Record_Unknown(self, query):
answers = [dns.RRHeader(
name=query.name.name, type=query.type,
payload=dns.UnknownRecord(query.name.name, ttl=5), auth=True)]
return answers, [], []
def _Record_NXDOMAIN(self, query):
return [], [], []
def _FigureOutSOAForQuery(self, query):
tmp = query.name.name.split('.')
domain = tmp[-2]+"."+tmp[-1]
self.cursor.execute("SELECT domain, nameservers, contact
FROM domains WHERE domain='%s'" % domain)
self.db.commit()
self.lookup_result = self.cursor.fetchall()
if self.cursor.rowcount > 0:
self.lookup_result = self.lookup_result[0]
else:
self.lookup_result = ""
def _doDynamicResponse(self, query):
if query.type == dns.SOA:
return self._Record_SOA(query)
elif query.type == dns.NS:
return self._Record_NS(query)
elif query.type == dns.A:
return self._Record_A(query)
elif query.type == dns.AAAA:
return self._Record_AAAA(query)
elif query.type == dns.MX:
return self._Record_MX(query)
elif query.type == dns.CNAME:
return self._Record_CNAME(query)
else:
return self._Record_Unknown(query)
def query(self, query, timeout=None):
self.ping_mysql()
try:
logging.info(query)
if len(self._dynamicResponseRequired(query)) > 0:
return defer.succeed(self._doDynamicResponse(query))
else:
if query.type == dns.MX:
return defer.succeed(self._Record_NXDOMAIN(query))
return defer.succeed(([], [], []))
except Exception, e:
logging.error(e)
return defer.fail(error.DomainError())
def _src(self, query):
return {'type': query.type,
'src': self.peer_address.host.replace("::ffff:", ""),
'name': query.name.name}
def main(self):
factory = DynDDServerFactory(
clients=[DynDDController(),
client.Resolver(resolv='/etc/resolv.conf')]
)
protocol = dns.DNSDatagramProtocol(controller=factory)
reactor.listenUDP(53, protocol, interface="::0")
reactor.listenTCP(53, factory)
reactor.run()
| gpl-3.0 | -6,304,562,743,559,989,000 | 35.596 | 78 | 0.512406 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.